diff options
author | Mark Brown <broonie@kernel.org> | 2014-10-20 20:55:07 +0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2014-10-20 21:27:32 +0400 |
commit | b7a40242c82cd73cfcea305f23e67d068dd8401a (patch) | |
tree | 251b49d19cd7c371847ae1f951e1b537ca0e1c15 /mm/slab.c | |
parent | d26833bfce5e56017bea9f1f50838f20e18e7b7e (diff) | |
parent | 9c6de47d53a3ce8df1642ae67823688eb98a190a (diff) | |
download | linux-b7a40242c82cd73cfcea305f23e67d068dd8401a.tar.xz |
Merge branch 'fix/dw' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into spi-dw
Conflicts:
drivers/spi/spi-dw-mid.c
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 15 |
1 files changed, 4 insertions, 11 deletions
diff --git a/mm/slab.c b/mm/slab.c index a467b308c682..7c52b3890d25 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2124,7 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) int __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { - size_t left_over, freelist_size, ralign; + size_t left_over, freelist_size; + size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; size_t size = cachep->size; @@ -2157,14 +2158,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size &= ~(BYTES_PER_WORD - 1); } - /* - * Redzoning and user store require word alignment or possibly larger. - * Note this will be overridden by architecture or caller mandated - * alignment if either is greater than BYTES_PER_WORD. - */ - if (flags & SLAB_STORE_USER) - ralign = BYTES_PER_WORD; - if (flags & SLAB_RED_ZONE) { ralign = REDZONE_ALIGN; /* If redzoning, ensure that the second redzone is suitably @@ -2994,7 +2987,7 @@ out: #ifdef CONFIG_NUMA /* - * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set. + * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. @@ -3226,7 +3219,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) { void *objp; - if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) { + if (current->mempolicy || cpuset_do_slab_mem_spread()) { objp = alternate_node_alloc(cache, flags); if (objp) goto out; |