diff options
author | Christoph Lameter <cl@linux.com> | 2012-06-13 19:24:56 +0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-06-14 10:20:05 +0400 |
commit | 350260889b251821e770573dfd65cd851b4ef781 (patch) | |
tree | 298a87d2d54f2f3e99138f1798d242d4e7149657 /mm/slab.c | |
parent | e571b0ad3495be5793e54e21cd244c4545c49d88 (diff) | |
download | linux-350260889b251821e770573dfd65cd851b4ef781.tar.xz |
slab: Remove some accessors
Those are rather trivial now and its better to see inline what is
really going on.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 35 |
1 files changed, 8 insertions, 27 deletions
diff --git a/mm/slab.c b/mm/slab.c index af05147d7abd..28a8f7d29d4a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -489,16 +489,6 @@ EXPORT_SYMBOL(slab_buffer_size); static int slab_max_order = SLAB_MAX_ORDER_LO; static bool slab_max_order_set __initdata; -/* - * Functions for storing/retrieving the cachep and or slab from the page - * allocator. These are used to find the slab an obj belongs to. With kfree(), - * these are used to find the cache which an obj belongs to. - */ -static inline void page_set_cache(struct page *page, struct kmem_cache *cache) -{ - page->slab_cache = cache; -} - static inline struct kmem_cache *page_get_cache(struct page *page) { page = compound_head(page); @@ -506,27 +496,18 @@ static inline struct kmem_cache *page_get_cache(struct page *page) return page->slab_cache; } -static inline void page_set_slab(struct page *page, struct slab *slab) -{ - page->slab_page = slab; -} - -static inline struct slab *page_get_slab(struct page *page) -{ - BUG_ON(!PageSlab(page)); - return page->slab_page; -} - static inline struct kmem_cache *virt_to_cache(const void *obj) { struct page *page = virt_to_head_page(obj); - return page_get_cache(page); + return page->slab_cache; } static inline struct slab *virt_to_slab(const void *obj) { struct page *page = virt_to_head_page(obj); - return page_get_slab(page); + + VM_BUG_ON(!PageSlab(page)); + return page->slab_page; } static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, @@ -2918,8 +2899,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, nr_pages <<= cache->gfporder; do { - page_set_cache(page, cache); - page_set_slab(page, slab); + page->slab_cache = cache; + page->slab_page = slab; page++; } while (--nr_pages); } @@ -3057,7 +3038,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, kfree_debugcheck(objp); page = virt_to_head_page(objp); - slabp = page_get_slab(page); + slabp = page->slab_page; if (cachep->flags & SLAB_RED_ZONE) { verify_redzone_free(cachep, objp); @@ -3261,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, struct slab *slabp; unsigned objnr; - slabp = page_get_slab(virt_to_head_page(objp)); + slabp = virt_to_head_page(objp)->slab_page; objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; } |