diff options
author | Rajman Mekaco <rajman.mekaco@gmail.com> | 2012-05-30 02:06:21 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-30 03:22:19 +0400 |
commit | 841e31e5cc6219d62054788faa289b6ed682d068 (patch) | |
tree | 6c6bbcf2be5082c5bdc71bfccad4d57be0e99126 /mm/mmap.c | |
parent | 4d67d860531ad5378dedfad7661c540f3365013d (diff) | |
download | linux-841e31e5cc6219d62054788faa289b6ed682d068.tar.xz |
mm/mmap.c: find_vma(): remove unnecessary if(mm) check
The "if (mm)" check is not required in find_vma, as the kernel code
calls find_vma only when it is absolutely sure that the mm_struct arg to
it is non-NULL.
Remove the if(mm) check and adding the a WARN_ONCE(!mm) for now. This
will serve the purpose of mandating that the execution
context(user-mode/kernel-mode) be known before find_vma is called. Also
fixed 2 checkpatch.pl errors in the declaration of the rb_node and
vma_tmp local variables.
I was browsing through the internet and read a discussion at
https://lkml.org/lkml/2012/3/27/342 which discusses removal of the
validation check within find_vma. Since no-one responded, I decided to
send this patch with Andrew's suggestions.
[akpm@linux-foundation.org: add remove-me comment]
Signed-off-by: Rajman Mekaco <rajman.mekaco@gmail.com>
Cc: Kautuk Consul <consul.kautuk@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index e8dcfc7de866..4a9c2a391e28 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1639,33 +1639,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = NULL; - if (mm) { - /* Check the cache first. */ - /* (Cache hit rate is typically around 35%.) */ - vma = mm->mmap_cache; - if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { - struct rb_node * rb_node; - - rb_node = mm->mm_rb.rb_node; - vma = NULL; - - while (rb_node) { - struct vm_area_struct * vma_tmp; - - vma_tmp = rb_entry(rb_node, - struct vm_area_struct, vm_rb); - - if (vma_tmp->vm_end > addr) { - vma = vma_tmp; - if (vma_tmp->vm_start <= addr) - break; - rb_node = rb_node->rb_left; - } else - rb_node = rb_node->rb_right; - } - if (vma) - mm->mmap_cache = vma; + if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */ + return NULL; + + /* Check the cache first. */ + /* (Cache hit rate is typically around 35%.) */ + vma = mm->mmap_cache; + if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { + struct rb_node *rb_node; + + rb_node = mm->mm_rb.rb_node; + vma = NULL; + + while (rb_node) { + struct vm_area_struct *vma_tmp; + + vma_tmp = rb_entry(rb_node, + struct vm_area_struct, vm_rb); + + if (vma_tmp->vm_end > addr) { + vma = vma_tmp; + if (vma_tmp->vm_start <= addr) + break; + rb_node = rb_node->rb_left; + } else + rb_node = rb_node->rb_right; } + if (vma) + mm->mmap_cache = vma; } return vma; } |