summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPaul Menage <menage@google.com>2007-07-16 10:40:11 +0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-16 20:05:43 +0400
commitc2aef333c98b41eeb0f0d55b7faa7d4625a6160b (patch)
tree39917beefd3cddf1c5c2c68894346efe0cc7dff1 /kernel
parentb2ff457b09554813a7df9e0cd30d5a169a257419 (diff)
downloadlinux-c2aef333c98b41eeb0f0d55b7faa7d4625a6160b.tar.xz
Reduce cpuset.c write_lock_irq() to read_lock()
cpuset.c:update_nodemask() uses a write_lock_irq() on tasklist_lock to block concurrent forks; a read_lock() suffices and is less intrusive. Signed-off-by: Paul Menage<menage@google.com> Acked-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4c49188cc49b..824b1c01f410 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -981,10 +981,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
if (!mmarray)
goto done;
- write_lock_irq(&tasklist_lock); /* block fork */
+ read_lock(&tasklist_lock); /* block fork */
if (atomic_read(&cs->count) <= ntasks)
break; /* got enough */
- write_unlock_irq(&tasklist_lock); /* try again */
+ read_unlock(&tasklist_lock); /* try again */
kfree(mmarray);
}
@@ -1006,7 +1006,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
continue;
mmarray[n++] = mm;
} while_each_thread(g, p);
- write_unlock_irq(&tasklist_lock);
+ read_unlock(&tasklist_lock);
/*
* Now that we've dropped the tasklist spinlock, we can