summaryrefslogtreecommitdiff
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c97
1 files changed, 56 insertions, 41 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 45d4ca79933a..1e921c5e9576 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
void *page;
#ifdef CONFIG_NUMA
- if (node != -1)
+ if (node != NUMA_NO_NODE)
page = alloc_pages_exact_node(node, gfp, order);
else
#endif
@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
- if (node != -1 && page_to_nid(sp) != node)
+ if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
continue;
#endif
/* Enough room on this page? */
@@ -425,10 +425,11 @@ out:
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{
unsigned int *m;
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret;
gfp &= gfp_allowed_mask;
@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
*m = size;
ret = (void *)m + align;
- trace_kmalloc_node(_RET_IP_, ret,
+ trace_kmalloc_node(caller, ret,
size, size + align, gfp, node);
} else {
unsigned int order = get_order(size);
@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page->private = size;
}
- trace_kmalloc_node(_RET_IP_, ret,
+ trace_kmalloc_node(caller, ret,
size, PAGE_SIZE << order, gfp, node);
}
kmemleak_alloc(ret, size, 1, gfp);
return ret;
}
+
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+{
+ return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+}
EXPORT_SYMBOL(__kmalloc_node);
+#ifdef CONFIG_TRACING
+void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
+{
+ return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
+}
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
+ int node, unsigned long caller)
+{
+ return __do_kmalloc_node(size, gfp, node, caller);
+}
+#endif
+#endif
+
void kfree(const void *block)
{
struct page *sp;
@@ -481,7 +502,7 @@ void kfree(const void *block)
sp = virt_to_page(block);
if (PageSlab(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else
@@ -500,7 +521,7 @@ size_t ksize(const void *block)
sp = virt_to_page(block);
if (PageSlab(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
} else
@@ -508,44 +529,24 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags, void (*ctor)(void *))
+int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
- struct kmem_cache *c;
-
- c = slob_alloc(sizeof(struct kmem_cache),
- GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
+ size_t align = c->size;
- if (c) {
- c->name = name;
- c->size = size;
- if (flags & SLAB_DESTROY_BY_RCU) {
- /* leave room for rcu footer at the end of object */
- c->size += sizeof(struct slob_rcu);
- }
- c->flags = flags;
- c->ctor = ctor;
- /* ignore alignment unless it's forced */
- c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < ARCH_SLAB_MINALIGN)
- c->align = ARCH_SLAB_MINALIGN;
- if (c->align < align)
- c->align = align;
-
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
- c->refcount = 1;
+ if (flags & SLAB_DESTROY_BY_RCU) {
+ /* leave room for rcu footer at the end of object */
+ c->size += sizeof(struct slob_rcu);
}
- return c;
-}
+ c->flags = flags;
+ /* ignore alignment unless it's forced */
+ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
+ if (c->align < ARCH_SLAB_MINALIGN)
+ c->align = ARCH_SLAB_MINALIGN;
+ if (c->align < align)
+ c->align = align;
-void kmem_cache_destroy(struct kmem_cache *c)
-{
- kmemleak_free(c);
- if (c->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
- slob_free(c, sizeof(struct kmem_cache));
+ return 0;
}
-EXPORT_SYMBOL(kmem_cache_destroy);
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
@@ -613,14 +614,28 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
}
EXPORT_SYMBOL(kmem_cache_size);
+int __kmem_cache_shutdown(struct kmem_cache *c)
+{
+ /* No way to check for remaining objects */
+ return 0;
+}
+
int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);
+struct kmem_cache kmem_cache_boot = {
+ .name = "kmem_cache",
+ .size = sizeof(struct kmem_cache),
+ .flags = SLAB_PANIC,
+ .align = ARCH_KMALLOC_MINALIGN,
+};
+
void __init kmem_cache_init(void)
{
+ kmem_cache = &kmem_cache_boot;
slab_state = UP;
}