diff options
author | Christoph Lameter <cl@linux.com> | 2012-06-13 19:24:54 +0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-06-14 10:19:52 +0400 |
commit | b5568280c9b9162b384be9d447013b74d682d4b3 (patch) | |
tree | d3b32a2279d30a7a373349a821a1949abf7bb59c /mm/slob.c | |
parent | 690d5777392180fdc05a82c0c7979e50e8d93de8 (diff) | |
download | linux-b5568280c9b9162b384be9d447013b74d682d4b3.tar.xz |
slob: Remove various small accessors
Those have become so simple that they are no longer needed.
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Acked-by: David Rientjes <rientjes@google.com>
signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 49 |
1 files changed, 9 insertions, 40 deletions
diff --git a/mm/slob.c b/mm/slob.c index 74c3bb25f640..c85265d22e08 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -92,14 +92,6 @@ struct slob_block { typedef struct slob_block slob_t; /* - * free_slob_page: call before a slob_page is returned to the page allocator. - */ -static inline void free_slob_page(struct page *sp) -{ - reset_page_mapcount(sp); -} - -/* * All partially free slob pages go on these lists. */ #define SLOB_BREAK1 256 @@ -109,29 +101,6 @@ static LIST_HEAD(free_slob_medium); static LIST_HEAD(free_slob_large); /* - * is_slob_page: True for all slob pages (false for bigblock pages) - */ -static inline int is_slob_page(struct page *sp) -{ - return PageSlab(sp); -} - -static inline void set_slob_page(struct page *sp) -{ - __SetPageSlab(sp); -} - -static inline void clear_slob_page(struct page *sp) -{ - __ClearPageSlab(sp); -} - -static inline struct page *slob_page(const void *addr) -{ - return virt_to_page(addr); -} - -/* * slob_page_free: true for pages on free_slob_pages list. */ static inline int slob_page_free(struct page *sp) @@ -347,8 +316,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return NULL; - sp = slob_page(b); - set_slob_page(sp); + sp = virt_to_page(b); + __SetPageSlab(sp); spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); @@ -380,7 +349,7 @@ static void slob_free(void *block, int size) return; BUG_ON(!size); - sp = slob_page(block); + sp = virt_to_page(block); units = SLOB_UNITS(size); spin_lock_irqsave(&slob_lock, flags); @@ -390,8 +359,8 @@ static void slob_free(void *block, int size) if (slob_page_free(sp)) clear_slob_page_free(sp); spin_unlock_irqrestore(&slob_lock, flags); - clear_slob_page(sp); - free_slob_page(sp); + __ClearPageSlab(sp); + reset_page_mapcount(sp); slob_free_pages(b, 0); return; } @@ -508,8 +477,8 @@ void kfree(const void *block) return; kmemleak_free(block); - sp = slob_page(block); - if (is_slob_page(sp)) { + sp = virt_to_page(block); + if (PageSlab(sp)) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); @@ -527,8 +496,8 @@ size_t ksize(const void *block) if (unlikely(block == ZERO_SIZE_PTR)) return 0; - sp = slob_page(block); - if (is_slob_page(sp)) { + sp = virt_to_page(block); + if (PageSlab(sp)) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; |