diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-17 10:57:56 +0400 | 
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-17 11:01:08 +0400 | 
| commit | a22ddff8bedfe33eeb1330bbb7ef1fbe007a42c4 (patch) | |
| tree | 61a2eb7fa62f5af10c2b913ca429e6b068b0eb2d /mm/vmalloc.c | |
| parent | 20d5a540e55a29daeef12706f9ee73baf5641c16 (diff) | |
| parent | d9875690d9b89a866022ff49e3fcea892345ad92 (diff) | |
| download | linux-a22ddff8bedfe33eeb1330bbb7ef1fbe007a42c4.tar.xz | |
Merge tag 'v3.6-rc2' into drm-intel-next
Backmerge Linux 3.6-rc2 to resolve a few funny conflicts before we put
even more madness on top:
- drivers/gpu/drm/i915/i915_irq.c: Just a spurious WARN removed in
  -fixes, that has been changed in a variable-rename in -next, too.
- drivers/gpu/drm/i915/intel_ringbuffer.c: -next remove scratch_addr
  (since all their users have been extracted in another fucntion),
  -fixes added another user for a hw workaroudn.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 52 | 
1 files changed, 32 insertions, 20 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2aad49981b57..2bb90b1d241c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -413,11 +413,11 @@ nocache:  		if (addr + size - 1 < addr)  			goto overflow; -		n = rb_next(&first->rb_node); -		if (n) -			first = rb_entry(n, struct vmap_area, rb_node); -		else +		if (list_is_last(&first->list, &vmap_area_list))  			goto found; + +		first = list_entry(first->list.next, +				struct vmap_area, list);  	}  found: @@ -904,6 +904,14 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)  	BUG_ON(size & ~PAGE_MASK);  	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +	if (WARN_ON(size == 0)) { +		/* +		 * Allocating 0 bytes isn't what caller wants since +		 * get_order(0) returns funny result. Just warn and terminate +		 * early. +		 */ +		return NULL; +	}  	order = get_order(size);  again: @@ -1280,7 +1288,7 @@ DEFINE_RWLOCK(vmlist_lock);  struct vm_struct *vmlist;  static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, -			      unsigned long flags, void *caller) +			      unsigned long flags, const void *caller)  {  	vm->flags = flags;  	vm->addr = (void *)va->va_start; @@ -1306,7 +1314,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)  }  static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, -			      unsigned long flags, void *caller) +			      unsigned long flags, const void *caller)  {  	setup_vmalloc_vm(vm, va, flags, caller);  	insert_vmalloc_vmlist(vm); @@ -1314,7 +1322,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,  static struct vm_struct *__get_vm_area_node(unsigned long size,  		unsigned long align, unsigned long flags, unsigned long start, -		unsigned long end, int node, gfp_t gfp_mask, void *caller) +		unsigned long end, int node, gfp_t gfp_mask, const void *caller)  {  	struct vmap_area *va;  	struct vm_struct *area; @@ -1375,7 +1383,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);  struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,  				       unsigned long start, unsigned long end, -				       void *caller) +				       const void *caller)  {  	return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,  				  caller); @@ -1397,13 +1405,21 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)  }  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, -				void *caller) +				const void *caller)  {  	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,  						-1, GFP_KERNEL, caller);  } -static struct vm_struct *find_vm_area(const void *addr) +/** + *	find_vm_area  -  find a continuous kernel virtual area + *	@addr:		base address + * + *	Search for the kernel VM area starting at @addr, and return it. + *	It is up to the caller to do all required locking to keep the returned + *	pointer valid. + */ +struct vm_struct *find_vm_area(const void *addr)  {  	struct vmap_area *va; @@ -1568,9 +1584,9 @@ EXPORT_SYMBOL(vmap);  static void *__vmalloc_node(unsigned long size, unsigned long align,  			    gfp_t gfp_mask, pgprot_t prot, -			    int node, void *caller); +			    int node, const void *caller);  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, -				 pgprot_t prot, int node, void *caller) +				 pgprot_t prot, int node, const void *caller)  {  	const int order = 0;  	struct page **pages; @@ -1643,7 +1659,7 @@ fail:   */  void *__vmalloc_node_range(unsigned long size, unsigned long align,  			unsigned long start, unsigned long end, gfp_t gfp_mask, -			pgprot_t prot, int node, void *caller) +			pgprot_t prot, int node, const void *caller)  {  	struct vm_struct *area;  	void *addr; @@ -1699,7 +1715,7 @@ fail:   */  static void *__vmalloc_node(unsigned long size, unsigned long align,  			    gfp_t gfp_mask, pgprot_t prot, -			    int node, void *caller) +			    int node, const void *caller)  {  	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,  				gfp_mask, prot, node, caller); @@ -1975,9 +1991,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)   *	IOREMAP area is treated as memory hole and no copy is done.   *   *	If [addr...addr+count) doesn't includes any intersects with alive - *	vm_struct area, returns 0. - *	@buf should be kernel's buffer. Because	this function uses KM_USER0, - *	the caller should guarantee KM_USER0 is not used. + *	vm_struct area, returns 0. @buf should be kernel's buffer.   *   *	Note: In usual ops, vread() is never necessary because the caller   *	should know vmalloc() area is valid and can use memcpy(). @@ -2051,9 +2065,7 @@ finished:   *	IOREMAP area is treated as memory hole and no copy is done.   *   *	If [addr...addr+count) doesn't includes any intersects with alive - *	vm_struct area, returns 0. - *	@buf should be kernel's buffer. Because	this function uses KM_USER0, - *	the caller should guarantee KM_USER0 is not used. + *	vm_struct area, returns 0. @buf should be kernel's buffer.   *   *	Note: In usual ops, vwrite() is never necessary because the caller   *	should know vmalloc() area is valid and can use memcpy().  | 
