diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mempolicy.h | 2 | ||||
-rw-r--r-- | include/linux/mm_types.h | 11 | ||||
-rw-r--r-- | include/linux/slab.h | 24 | ||||
-rw-r--r-- | include/linux/slab_def.h | 12 | ||||
-rw-r--r-- | include/linux/slub_def.h | 3 |
5 files changed, 42 insertions, 10 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 4aa42732e47f..95b738c7abff 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -215,7 +215,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); -extern unsigned slab_node(struct mempolicy *policy); +extern unsigned slab_node(void); extern enum zone_type policy_zone; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 704a626d94a0..074eb98fe15d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -53,7 +53,7 @@ struct page { struct { union { pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* slub first free object */ + void *freelist; /* slub/slob first free object */ }; union { @@ -91,11 +91,12 @@ struct page { */ atomic_t _mapcount; - struct { + struct { /* SLUB */ unsigned inuse:16; unsigned objects:15; unsigned frozen:1; }; + int units; /* SLOB */ }; atomic_t _count; /* Usage count, see below. */ }; @@ -117,6 +118,12 @@ struct page { short int pobjects; #endif }; + + struct list_head list; /* slobs list of pages */ + struct { /* slab fields */ + struct kmem_cache *slab_cache; + struct slab *slab_page; + }; }; /* Remainder is not double word aligned */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 67d5d94b783a..0dd2dfa7beca 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -93,6 +93,30 @@ (unsigned long)ZERO_SIZE_PTR) /* + * Common fields provided in kmem_cache by all slab allocators + * This struct is either used directly by the allocator (SLOB) + * or the allocator must include definitions for all fields + * provided in kmem_cache_common in their definition of kmem_cache. + * + * Once we can do anonymous structs (C11 standard) we could put a + * anonymous struct definition in these allocators so that the + * separate allocations in the kmem_cache structure of SLAB and + * SLUB is no longer needed. + */ +#ifdef CONFIG_SLOB +struct kmem_cache { + unsigned int object_size;/* The original size of the object */ + unsigned int size; /* The aligned/padded/added on size */ + unsigned int align; /* Alignment as calculated */ + unsigned long flags; /* Active flags on the slab */ + const char *name; /* Slab name for sysfs */ + int refcount; /* Use counter */ + void (*ctor)(void *); /* Called on object slot creation */ + struct list_head list; /* List of all slab caches on the system */ +}; +#endif + +/* * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index fbd1117fdfde..0c634fa376c9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -27,7 +27,7 @@ struct kmem_cache { unsigned int limit; unsigned int shared; - unsigned int buffer_size; + unsigned int size; u32 reciprocal_buffer_size; /* 2) touched by every alloc & free from the backend */ @@ -39,7 +39,7 @@ struct kmem_cache { unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ - gfp_t gfpflags; + gfp_t allocflags; size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ @@ -52,7 +52,10 @@ struct kmem_cache { /* 4) cache creation/removal */ const char *name; - struct list_head next; + struct list_head list; + int refcount; + int object_size; + int align; /* 5) statistics */ #ifdef CONFIG_DEBUG_SLAB @@ -73,12 +76,11 @@ struct kmem_cache { /* * If debugging is enabled, then the allocator can add additional - * fields and/or padding to every object. buffer_size contains the total + * fields and/or padding to every object. size contains the total * object size including these internal fields, the following two * variables contain the offset to the user object and its size. */ int obj_offset; - int obj_size; #endif /* CONFIG_DEBUG_SLAB */ /* 6) per-cpu/per-node data, touched during every alloc/free */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index c2f8c8bc56ed..df448adb7283 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -48,7 +48,6 @@ struct kmem_cache_cpu { unsigned long tid; /* Globally unique transaction id */ struct page *page; /* The slab from which we are allocating */ struct page *partial; /* Partially allocated frozen slabs */ - int node; /* The node of the page (or -1 for debug) */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif @@ -83,7 +82,7 @@ struct kmem_cache { unsigned long flags; unsigned long min_partial; int size; /* The size of an object including meta data */ - int objsize; /* The size of an object without meta data */ + int object_size; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ int cpu_partial; /* Number of per cpu partial objects to keep around */ struct kmem_cache_order_objects oo; |