summaryrefslogtreecommitdiff
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 5adc29cb58dd..c6933bc19bcd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
- * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
+ * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
@@ -150,15 +150,6 @@ static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags);
}
-static int FASTCALL(find_order(int size));
-static int fastcall find_order(int size)
-{
- int order = 0;
- for ( ; size > 4096 ; size >>=1)
- order++;
- return order;
-}
-
void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
@@ -174,7 +165,7 @@ void *__kmalloc(size_t size, gfp_t gfp)
if (!bb)
return 0;
- bb->order = find_order(size);
+ bb->order = get_order(size);
bb->pages = (void *)__get_free_pages(gfp, bb->order);
if (bb->pages) {
@@ -190,6 +181,39 @@ void *__kmalloc(size_t size, gfp_t gfp)
}
EXPORT_SYMBOL(__kmalloc);
+/**
+ * krealloc - reallocate memory. The contents will remain unchanged.
+ *
+ * @p: object to reallocate memory for.
+ * @new_size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * The contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes. If @p is %NULL, krealloc()
+ * behaves exactly like kmalloc(). If @size is 0 and @p is not a
+ * %NULL pointer, the object pointed to is freed.
+ */
+void *krealloc(const void *p, size_t new_size, gfp_t flags)
+{
+ void *ret;
+
+ if (unlikely(!p))
+ return kmalloc_track_caller(new_size, flags);
+
+ if (unlikely(!new_size)) {
+ kfree(p);
+ return NULL;
+ }
+
+ ret = kmalloc_track_caller(new_size, flags);
+ if (ret) {
+ memcpy(ret, p, min(new_size, ksize(p)));
+ kfree(p);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(krealloc);
+
void kfree(const void *block)
{
bigblock_t *bb, **last = &bigblocks;
@@ -219,7 +243,7 @@ void kfree(const void *block)
EXPORT_SYMBOL(kfree);
-unsigned int ksize(const void *block)
+size_t ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
@@ -262,10 +286,11 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
c->ctor = ctor;
c->dtor = dtor;
/* ignore alignment unless it's forced */
- c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
+ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < align)
c->align = align;
- }
+ } else if (flags & SLAB_PANIC)
+ panic("Cannot create slab cache %s\n", name);
return c;
}
@@ -284,7 +309,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align);
else
- b = (void *)__get_free_pages(flags, find_order(c->size));
+ b = (void *)__get_free_pages(flags, get_order(c->size));
if (c->ctor)
c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
@@ -311,7 +336,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
if (c->size < PAGE_SIZE)
slob_free(b, c->size);
else
- free_pages((unsigned long)b, find_order(c->size));
+ free_pages((unsigned long)b, get_order(c->size));
}
EXPORT_SYMBOL(kmem_cache_free);