summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-03-05 13:06:47 +0300
committerDavid S. Miller <davem@davemloft.net>2009-03-05 13:06:47 +0300
commit508827ff0ac3981d420edac64a70de7f4e304d38 (patch)
treeb0cee8ddef9f0ceab68c388e4ae46b7295eb2cb5 /kernel
parent2c3c3d02f28801d7ad2da4952b2c7ca6621ef221 (diff)
parent72e2240f181871675d3a979766330c91d48a1673 (diff)
downloadlinux-508827ff0ac3981d420edac64a70de7f4e304d38.tar.xz
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/tokenring/tmspci.c drivers/net/ucc_geth_mii.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcuclassic.c4
-rw-r--r--kernel/rcupdate.c12
-rw-r--r--kernel/rcupreempt.c3
-rw-r--r--kernel/rcutree.c4
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/seccomp.c7
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/user.c18
8 files changed, 73 insertions, 21 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index bd5a9003497c..654c640a6b9c 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
- (idle_cpu(cpu) && !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+ (idle_cpu(cpu) && rcu_scheduler_active &&
+ !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
/*
* Get here if this CPU took its interrupt from user
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index d92a76a881aa..cae8a059cf47 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,6 +44,7 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/kernel_stat.h>
enum rcu_barrier {
RCU_BARRIER_STD,
@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
+int rcu_scheduler_active __read_mostly;
/*
* Awaken the corresponding synchronize_rcu() instance now that a
@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
+
+ if (rcu_blocking_is_gp())
+ return;
+
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu);
@@ -175,3 +181,9 @@ void __init rcu_init(void)
__rcu_init();
}
+void rcu_scheduler_starting(void)
+{
+ WARN_ON(num_online_cpus() != 1);
+ WARN_ON(nr_context_switches() > 0);
+ rcu_scheduler_active = 1;
+}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 33cfc50781f9..5d59e850fb71 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
{
struct rcu_synchronize rcu;
+ if (num_online_cpus() == 1)
+ return; /* blocking is gp if only one CPU! */
+
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index b2fd602a6f6f..97ce31579ec0 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
- (idle_cpu(cpu) && !in_softirq() &&
- hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+ (idle_cpu(cpu) && rcu_scheduler_active &&
+ !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
/*
* Get here if this CPU took its interrupt from user
diff --git a/kernel/sched.c b/kernel/sched.c
index 410eec404133..8e2558c2ba67 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
ktime_t now;
- if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
+ if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
if (hrtimer_active(&rt_b->rt_period_timer))
@@ -9224,6 +9224,16 @@ static int sched_rt_global_constraints(void)
return ret;
}
+
+int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
+{
+ /* Don't accept realtime tasks when there is no way for them to run */
+ if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
+ return 0;
+
+ return 1;
+}
+
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
@@ -9317,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
- /* Don't accept realtime tasks when there is no way for them to run */
- if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
+ if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index ad64fcb731f2..57d4b13b631d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -8,6 +8,7 @@
#include <linux/seccomp.h>
#include <linux/sched.h>
+#include <linux/compat.h>
/* #define SECCOMP_DEBUG 1 */
#define NR_SECCOMP_MODES 1
@@ -22,7 +23,7 @@ static int mode1_syscalls[] = {
0, /* null terminated */
};
-#ifdef TIF_32BIT
+#ifdef CONFIG_COMPAT
static int mode1_syscalls_32[] = {
__NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
0, /* null terminated */
@@ -37,8 +38,8 @@ void __secure_computing(int this_syscall)
switch (mode) {
case 1:
syscall = mode1_syscalls;
-#ifdef TIF_32BIT
- if (test_thread_flag(TIF_32BIT))
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
syscall = mode1_syscalls_32;
#endif
do {
diff --git a/kernel/sys.c b/kernel/sys.c
index f145c415bc16..37f458e6882a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -559,7 +559,7 @@ error:
abort_creds(new);
return retval;
}
-
+
/*
* change the user struct in a credentials set to match the new UID
*/
@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
if (!new_user)
return -EAGAIN;
+ if (!task_can_switch_user(new_user, current)) {
+ free_uid(new_user);
+ return -EINVAL;
+ }
+
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) {
@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
goto error;
}
- retval = -EAGAIN;
- if (new->uid != old->uid && set_user(new) < 0)
- goto error;
-
+ if (new->uid != old->uid) {
+ retval = set_user(new);
+ if (retval < 0)
+ goto error;
+ }
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old->uid))
new->suid = new->euid;
@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
retval = -EPERM;
if (capable(CAP_SETUID)) {
new->suid = new->uid = uid;
- if (uid != old->uid && set_user(new) < 0) {
- retval = -EAGAIN;
- goto error;
+ if (uid != old->uid) {
+ retval = set_user(new);
+ if (retval < 0)
+ goto error;
}
} else if (uid != old->uid && uid != new->suid) {
goto error;
@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
goto error;
}
- retval = -EAGAIN;
if (ruid != (uid_t) -1) {
new->uid = ruid;
- if (ruid != old->uid && set_user(new) < 0)
- goto error;
+ if (ruid != old->uid) {
+ retval = set_user(new);
+ if (retval < 0)
+ goto error;
+ }
}
if (euid != (uid_t) -1)
new->euid = euid;
diff --git a/kernel/user.c b/kernel/user.c
index 3551ac742395..6a9b696128c8 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
#endif
+#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
+/*
+ * We need to check if a setuid can take place. This function should be called
+ * before successfully completing the setuid.
+ */
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+
+ return sched_rt_can_attach(up->tg, tsk);
+
+}
+#else
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+ return 1;
+}
+#endif
+
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().