From 49e2258586b423684f03c278149ab46d8f8b6700 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 9 Aug 2011 16:12:27 -0500 Subject: slub: per cpu cache for partial pages Allow filling out the rest of the kmem_cache_cpu cacheline with pointers to partial pages. The partial page list is used in slab_free() to avoid per node lock taking. In __slab_alloc() we can then take multiple partial pages off the per node partial list in one go reducing node lock pressure. We can also use the per cpu partial list in slab_alloc() to avoid scanning partial lists for pages with free objects. The main effect of a per cpu partial list is that the per node list_lock is taken for batches of partial pages instead of individual ones. Potential future enhancements: 1. The pickup from the partial list could be perhaps be done without disabling interrupts with some work. The free path already puts the page into the per cpu partial list without disabling interrupts. 2. __slab_free() may have some code paths that could use optimization. Performance: Before After ./hackbench 100 process 200000 Time: 1953.047 1564.614 ./hackbench 100 process 20000 Time: 207.176 156.940 ./hackbench 100 process 20000 Time: 204.468 156.940 ./hackbench 100 process 20000 Time: 204.879 158.772 ./hackbench 10 process 20000 Time: 20.153 15.853 ./hackbench 10 process 20000 Time: 20.153 15.986 ./hackbench 10 process 20000 Time: 19.363 16.111 ./hackbench 1 process 20000 Time: 2.518 2.307 ./hackbench 1 process 20000 Time: 2.258 2.339 ./hackbench 1 process 20000 Time: 2.864 2.163 Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/mm_types.h | 14 +++++++++++++- include/linux/slub_def.h | 4 ++++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 774b8952deb4..7870e473033c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -79,9 +79,21 @@ struct page { }; /* Third double word block */ - struct list_head lru; /* Pageout list, eg. active_list + union { + struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! */ + struct { /* slub per cpu partial pages */ + struct page *next; /* Next partial slab */ +#ifdef CONFIG_64BIT + int pages; /* Nr of partial slabs left */ + int pobjects; /* Approximate # of objects */ +#else + short int pages; + short int pobjects; +#endif + }; + }; /* Remainder is not double word aligned */ union { diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f58d6413d230..4890ef79d752 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -36,12 +36,15 @@ enum stat_item { ORDER_FALLBACK, /* Number of times fallback was necessary */ CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ + CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ + CPU_PARTIAL_FREE, /* USed cpu partial on free */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { void **freelist; /* Pointer to next available object */ unsigned long tid; /* Globally unique transaction id */ struct page *page; /* The slab from which we are allocating */ + struct page *partial; /* Partially allocated frozen slabs */ int node; /* The node of the page (or -1 for debug) */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; @@ -79,6 +82,7 @@ struct kmem_cache { int size; /* The size of an object including meta data */ int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ + int cpu_partial; /* Number of per cpu partial pages to keep around */ struct kmem_cache_order_objects oo; /* Allocation and freeing of slabs */ -- cgit v1.2.3 From 9f26490412cf15b04ac8f44a512ba0b09e774576 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Thu, 1 Sep 2011 11:32:18 +0800 Subject: slub: correct comments error for per cpu partial Correct comment errors, that mistake cpu partial objects number as pages number, may make reader misunderstand. Signed-off-by: Alex Shi Reviewed-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slub_def.h | 2 +- mm/slub.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4890ef79d752..a32bcfdc7834 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -82,7 +82,7 @@ struct kmem_cache { int size; /* The size of an object including meta data */ int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ - int cpu_partial; /* Number of per cpu partial pages to keep around */ + int cpu_partial; /* Number of per cpu partial objects to keep around */ struct kmem_cache_order_objects oo; /* Allocation and freeing of slabs */ diff --git a/mm/slub.c b/mm/slub.c index 4982fb5c91de..8f687575d310 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3084,7 +3084,7 @@ static int kmem_cache_open(struct kmem_cache *s, * * A) The number of objects from per cpu partial slabs dumped to the * per node list when we reach the limit. - * B) The number of objects in partial partial slabs to extract from the + * B) The number of objects in cpu partial slabs to extract from the * per node list when we run out of per cpu objects. We only fetch 50% * to keep some capacity around for frees. */ -- cgit v1.2.3