diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 46 |
1 files changed, 17 insertions, 29 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index effd1ff6a4b4..5977b178694d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -790,6 +790,7 @@ unsigned long vmalloc_nr_pages(void) return atomic_long_read(&nr_vmalloc_pages); } +/* Look up the first VA which satisfies addr < va_end, NULL if none. */ static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) { struct vmap_area *va = NULL; @@ -874,11 +875,9 @@ find_va_links(struct vmap_area *va, * Trigger the BUG() if there are sides(left/right) * or full overlaps. */ - if (va->va_start < tmp_va->va_end && - va->va_end <= tmp_va->va_start) + if (va->va_end <= tmp_va->va_start) link = &(*link)->rb_left; - else if (va->va_end > tmp_va->va_start && - va->va_start >= tmp_va->va_end) + else if (va->va_start >= tmp_va->va_end) link = &(*link)->rb_right; else { WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", @@ -931,7 +930,7 @@ link_va(struct vmap_area *va, struct rb_root *root, * Some explanation here. Just perform simple insertion * to the tree. We do not set va->subtree_max_size to * its current size before calling rb_insert_augmented(). - * It is because of we populate the tree from the bottom + * It is because we populate the tree from the bottom * to parent levels when the node _is_ in the tree. * * Therefore we set subtree_max_size to zero after insertion, @@ -1335,10 +1334,10 @@ classify_va_fit_type(struct vmap_area *va, static __always_inline int adjust_va_to_fit_type(struct vmap_area *va, - unsigned long nva_start_addr, unsigned long size, - enum fit_type type) + unsigned long nva_start_addr, unsigned long size) { struct vmap_area *lva = NULL; + enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); if (type == FL_FIT_TYPE) { /* @@ -1444,7 +1443,6 @@ __alloc_vmap_area(unsigned long size, unsigned long align, bool adjust_search_size = true; unsigned long nva_start_addr; struct vmap_area *va; - enum fit_type type; int ret; /* @@ -1472,14 +1470,9 @@ __alloc_vmap_area(unsigned long size, unsigned long align, if (nva_start_addr + size > vend) return vend; - /* Classify what we have found. */ - type = classify_va_fit_type(va, nva_start_addr, size); - if (WARN_ON_ONCE(type == NOTHING_FIT)) - return vend; - /* Update the free vmap_area. */ - ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); - if (ret) + ret = adjust_va_to_fit_type(va, nva_start_addr, size); + if (WARN_ON_ONCE(ret)) return vend; #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK @@ -1663,7 +1656,7 @@ static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* * Serialize vmap purging. There is no actual critical section protected - * by this look, but we want to avoid concurrent calls for performance + * by this lock, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ static DEFINE_MUTEX(vmap_purge_lock); @@ -1677,32 +1670,32 @@ static void purge_fragmented_blocks_allcpus(void); static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) { unsigned long resched_threshold; - struct list_head local_pure_list; + struct list_head local_purge_list; struct vmap_area *va, *n_va; lockdep_assert_held(&vmap_purge_lock); spin_lock(&purge_vmap_area_lock); purge_vmap_area_root = RB_ROOT; - list_replace_init(&purge_vmap_area_list, &local_pure_list); + list_replace_init(&purge_vmap_area_list, &local_purge_list); spin_unlock(&purge_vmap_area_lock); - if (unlikely(list_empty(&local_pure_list))) + if (unlikely(list_empty(&local_purge_list))) return false; start = min(start, - list_first_entry(&local_pure_list, + list_first_entry(&local_purge_list, struct vmap_area, list)->va_start); end = max(end, - list_last_entry(&local_pure_list, + list_last_entry(&local_purge_list, struct vmap_area, list)->va_end); flush_tlb_kernel_range(start, end); resched_threshold = lazy_max_pages() << 1; spin_lock(&free_vmap_area_lock); - list_for_each_entry_safe(va, n_va, &local_pure_list, list) { + list_for_each_entry_safe(va, n_va, &local_purge_list, list) { unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; unsigned long orig_start = va->va_start; unsigned long orig_end = va->va_end; @@ -3735,7 +3728,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, int area, area2, last_area, term_area; unsigned long base, start, size, end, last_end, orig_start, orig_end; bool purged = false; - enum fit_type type; /* verify parameters and allocate data structures */ BUG_ON(offset_in_page(align) || !is_power_of_2(align)); @@ -3846,15 +3838,11 @@ retry: /* It is a BUG(), but trigger recovery instead. */ goto recovery; - type = classify_va_fit_type(va, start, size); - if (WARN_ON_ONCE(type == NOTHING_FIT)) + ret = adjust_va_to_fit_type(va, start, size); + if (WARN_ON_ONCE(unlikely(ret))) /* It is a BUG(), but trigger recovery instead. */ goto recovery; - ret = adjust_va_to_fit_type(va, start, size, type); - if (unlikely(ret)) - goto recovery; - /* Allocated area. */ va = vas[area]; va->va_start = start; |