diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-03 03:29:19 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-03 03:29:19 +0400 |
commit | f929d3995d61ee0ff5d35e9f3fbef3b80bf4ccaa (patch) | |
tree | 12378638e4eb6cf56036029e4a6bc4e38478005c | |
parent | b601ce0fe3aafadba21cd42eb3d63660010e2a2b (diff) | |
parent | abe5f972912d086c080be4bde67750630b6fb38b (diff) | |
download | linux-f929d3995d61ee0ff5d35e9f3fbef3b80bf4ccaa.tar.xz |
Merge branch 'akpm' (fixes from Andrew Morton)
Merge fixes from Andrew Morton:
"5 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm: page_alloc: fix zone allocation fairness on UP
perf: fix perf bug in fork()
MAINTAINERS: change git URL for mpc5xxx tree
mm: memcontrol: do not iterate uninitialized memcgs
ocfs2/dlm: should put mle when goto kill in dlm_assert_master_handler
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 4 | ||||
-rw-r--r-- | kernel/events/core.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | mm/memcontrol.c | 36 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 |
6 files changed, 45 insertions, 13 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index c3cfa1be846e..f10ed3914ea8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5484,7 +5484,7 @@ F: drivers/macintosh/ LINUX FOR POWERPC EMBEDDED MPC5XXX M: Anatolij Gustschin <agust@denx.de> L: linuxppc-dev@lists.ozlabs.org -T: git git://git.denx.de/linux-2.6-agust.git +T: git git://git.denx.de/linux-denx-agust.git S: Maintained F: arch/powerpc/platforms/512x/ F: arch/powerpc/platforms/52xx/ diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index e3cfa0227026..12ba682fc53c 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2039,6 +2039,10 @@ kill: "and killing the other node now! This node is OK and can continue.\n"); __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); + spin_lock(&dlm->master_lock); + if (mle) + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); *ret_data = (void *)res; dlm_put(dlm); diff --git a/kernel/events/core.c b/kernel/events/core.c index d640a8b4dcbc..963bf139e2b2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7948,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child) for_each_task_context_nr(ctxn) { ret = perf_event_init_context(child, ctxn); - if (ret) + if (ret) { + perf_event_free_task(child); return ret; + } } return 0; diff --git a/kernel/fork.c b/kernel/fork.c index 0cf9cdb6e491..a91e47d86de2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) - goto bad_fork_cleanup_policy; + goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = copy_semundo(clone_flags, p); @@ -1566,8 +1566,9 @@ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_audit: audit_free(p); -bad_fork_cleanup_policy: +bad_fork_cleanup_perf: perf_event_free_task(p); +bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 085dc6d2f876..28928ce9b07f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -292,6 +292,9 @@ struct mem_cgroup { /* vmpressure notifications */ struct vmpressure vmpressure; + /* css_online() has been completed */ + int initialized; + /* * the counter to account for mem+swap usage. */ @@ -1099,10 +1102,21 @@ skip_node: * skipping css reference should be safe. */ if (next_css) { - if ((next_css == &root->css) || - ((next_css->flags & CSS_ONLINE) && - css_tryget_online(next_css))) - return mem_cgroup_from_css(next_css); + struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); + + if (next_css == &root->css) + return memcg; + + if (css_tryget_online(next_css)) { + /* + * Make sure the memcg is initialized: + * mem_cgroup_css_online() orders the the + * initialization against setting the flag. + */ + if (smp_load_acquire(&memcg->initialized)) + return memcg; + css_put(next_css); + } prev_css = next_css; goto skip_node; @@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); + int ret; if (css->id > MEM_CGROUP_ID_MAX) return -ENOSPC; @@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) } mutex_unlock(&memcg_create_mutex); - return memcg_init_kmem(memcg, &memory_cgrp_subsys); + ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); + if (ret) + return ret; + + /* + * Make sure the memcg is initialized: mem_cgroup_iter() + * orders reading memcg->initialized against its callers + * reading the memcg members. + */ + smp_store_release(&memcg->initialized, 1); + + return 0; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 18cee0d4c8a2..eee961958021 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1612,7 +1612,7 @@ again: } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); - if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && + if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && !zone_is_fair_depleted(zone)) zone_set_flag(zone, ZONE_FAIR_DEPLETED); @@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void) zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); __mod_zone_page_state(zone, NR_ALLOC_BATCH, - high_wmark_pages(zone) - - low_wmark_pages(zone) - - zone_page_state(zone, NR_ALLOC_BATCH)); + high_wmark_pages(zone) - low_wmark_pages(zone) - + atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); setup_zone_migrate_reserve(zone); spin_unlock_irqrestore(&zone->lock, flags); |