summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-04-30 02:07:34 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 02:54:34 +0400
commitf98782ddd31ac6f938386b79d8bd7aa7c8a78c50 (patch)
tree7e4b2133cdc46edb611a1879c86e0bd4f05df753 /mm/vmalloc.c
parente81ce85f960c2e26efb5d0802d56c34533edb1bd (diff)
downloadlinux-f98782ddd31ac6f938386b79d8bd7aa7c8a78c50.tar.xz
mm, vmalloc: iterate vmap_area_list in get_vmalloc_info()
This patch is a preparatory step for removing vmlist entirely. For above purpose, we change iterating a vmap_list codes to iterating a vmap_area_list. It is somewhat trivial change, but just one thing should be noticed. vmlist is lack of information about some areas in vmalloc address space. For example, vm_map_ram() allocate area in vmalloc address space, but it doesn't make a link with vmlist. To provide full information about vmalloc address space is better idea, so we don't use va->vm and use vmap_area directly. This makes get_vmalloc_info() more precise. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Dave Anderson <anderson@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Ingo Molnar <mingo@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 59aa328007b2..aee1f61727a3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2671,46 +2671,50 @@ module_init(proc_vmalloc_init);
void get_vmalloc_info(struct vmalloc_info *vmi)
{
- struct vm_struct *vma;
+ struct vmap_area *va;
unsigned long free_area_size;
unsigned long prev_end;
vmi->used = 0;
+ vmi->largest_chunk = 0;
- if (!vmlist) {
- vmi->largest_chunk = VMALLOC_TOTAL;
- } else {
- vmi->largest_chunk = 0;
+ prev_end = VMALLOC_START;
- prev_end = VMALLOC_START;
-
- read_lock(&vmlist_lock);
+ spin_lock(&vmap_area_lock);
- for (vma = vmlist; vma; vma = vma->next) {
- unsigned long addr = (unsigned long) vma->addr;
+ if (list_empty(&vmap_area_list)) {
+ vmi->largest_chunk = VMALLOC_TOTAL;
+ goto out;
+ }
- /*
- * Some archs keep another range for modules in vmlist
- */
- if (addr < VMALLOC_START)
- continue;
- if (addr >= VMALLOC_END)
- break;
+ list_for_each_entry(va, &vmap_area_list, list) {
+ unsigned long addr = va->va_start;
- vmi->used += vma->size;
+ /*
+ * Some archs keep another range for modules in vmalloc space
+ */
+ if (addr < VMALLOC_START)
+ continue;
+ if (addr >= VMALLOC_END)
+ break;
- free_area_size = addr - prev_end;
- if (vmi->largest_chunk < free_area_size)
- vmi->largest_chunk = free_area_size;
+ if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+ continue;
- prev_end = vma->size + addr;
- }
+ vmi->used += (va->va_end - va->va_start);
- if (VMALLOC_END - prev_end > vmi->largest_chunk)
- vmi->largest_chunk = VMALLOC_END - prev_end;
+ free_area_size = addr - prev_end;
+ if (vmi->largest_chunk < free_area_size)
+ vmi->largest_chunk = free_area_size;
- read_unlock(&vmlist_lock);
+ prev_end = va->va_end;
}
+
+ if (VMALLOC_END - prev_end > vmi->largest_chunk)
+ vmi->largest_chunk = VMALLOC_END - prev_end;
+
+out:
+ spin_unlock(&vmap_area_lock);
}
#endif