summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/irq/handle.c1
-rw-r--r--kernel/mutex-debug.c1
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/power/suspend_test.c5
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_fair.c27
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/time/timekeeping.c1
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/workqueue.c18
14 files changed, 56 insertions, 33 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ca83b73fba19..0249f4be9b5c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
return -EFAULT;
buffer[nbytes] = 0; /* nul-terminate */
- strstrip(buffer);
if (cft->write_u64) {
- u64 val = simple_strtoull(buffer, &end, 0);
+ u64 val = simple_strtoull(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
retval = cft->write_u64(cgrp, cft, val);
} else {
- s64 val = simple_strtoll(buffer, &end, 0);
+ s64 val = simple_strtoll(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
retval = cft->write_s64(cgrp, cft, val);
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
}
buffer[nbytes] = 0; /* nul-terminate */
- strstrip(buffer);
- retval = cft->write_string(cgrp, cft, buffer);
+ retval = cft->write_string(cgrp, cft, strstrip(buffer));
if (!retval)
retval = nbytes;
out:
diff --git a/kernel/exit.c b/kernel/exit.c
index e61891f80123..f7864ac2ecc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
- if (task_session(curr) != pid) {
+ if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
- proc_sid_connector(curr);
- }
if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a81cf80554db..17c71bb565c6 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,6 +11,7 @@
*/
#include <linux/irq.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/random.h>
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 50d022e5a560..ec815a960b5d 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/poison.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9d0b5c665883..afb7ef3dbc44 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
u64 interrupts, freq;
spin_lock(&ctx->lock);
- list_for_each_entry(event, &ctx->group_list, group_entry) {
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 17d8bb1acf9c..25596e450ac7 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -19,7 +19,7 @@
* The time it takes is system-specific though, so when we test this
* during system bootup we allow a LOT of time.
*/
-#define TEST_SUSPEND_SECONDS 5
+#define TEST_SUSPEND_SECONDS 10
static unsigned long suspend_test_start_time;
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
* has some performance issues. The stack dump of a WARN_ON
* is more likely to get the right attention than a printk...
*/
- WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
+ WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
+ "Component: %s, time: %u\n", label, msec);
}
/*
diff --git a/kernel/sched.c b/kernel/sched.c
index 76c0e9691fc0..e88689522e66 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -676,6 +676,7 @@ inline void update_rq_clock(struct rq *rq)
/**
* runqueue_is_locked
+ * @cpu: the processor in question.
*
* Returns true if the current cpu runqueue is locked.
* This interface allows printk to be called with the runqueue lock
@@ -2311,7 +2312,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
- struct rq *rq;
+ struct rq *rq, *orig_rq;
if (!sched_feat(SYNC_WAKEUPS))
wake_flags &= ~WF_SYNC;
@@ -2319,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
this_cpu = get_cpu();
smp_wmb();
- rq = task_rq_lock(p, &flags);
+ rq = orig_rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
if (!(p->state & state))
goto out;
@@ -2350,6 +2351,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
set_task_cpu(p, cpu);
rq = task_rq_lock(p, &flags);
+
+ if (rq != orig_rq)
+ update_rq_clock(rq);
+
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);
@@ -3656,6 +3661,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
+ * @sd: The sched_domain whose statistics are to be updated.
* @group: sched_group whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
@@ -6718,9 +6724,6 @@ EXPORT_SYMBOL(yield);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
- *
- * But don't do that if it is a deliberate, throttling IO wait (this task
- * has set its backing_dev_info: the queue against which it should throttle)
*/
void __sched io_schedule(void)
{
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4e777b47eeda..c32c3e643daa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -861,12 +861,21 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *buddy;
- if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
- return cfs_rq->next;
+ if (cfs_rq->next) {
+ buddy = cfs_rq->next;
+ cfs_rq->next = NULL;
+ if (wakeup_preempt_entity(buddy, se) < 1)
+ return buddy;
+ }
- if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
- return cfs_rq->last;
+ if (cfs_rq->last) {
+ buddy = cfs_rq->last;
+ cfs_rq->last = NULL;
+ if (wakeup_preempt_entity(buddy, se) < 1)
+ return buddy;
+ }
return se;
}
@@ -1654,16 +1663,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
do {
se = pick_next_entity(cfs_rq);
- /*
- * If se was a buddy, clear it so that it will have to earn
- * the favour again.
- *
- * If se was not a buddy, clear the buddies because neither
- * was elegible to run, let them earn it again.
- *
- * IOW. unconditionally clear buddies.
- */
- __clear_buddies(cfs_rq, NULL);
set_next_entity(cfs_rq, se);
cfs_rq = group_cfs_rq(se);
} while (cfs_rq);
diff --git a/kernel/sys.c b/kernel/sys.c
index 255475d163e0..1828f8d10844 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid)
err = session;
out:
write_unlock_irq(&tasklist_lock);
+ if (err > 0)
+ proc_sid_connector(group_leader);
return err;
}
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b38423ca711a..b6e7aaea4604 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
if (!table->ctl_name && table->strategy)
set_fail(&fail, table, "Strategy without ctl_name");
#endif
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_PROC_SYSCTL
if (table->procname && !table->proc_handler)
set_fail(&fail, table, "No proc_handler");
#endif
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fb0f46fa1ecd..c3a4e2907eaa 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -13,6 +13,7 @@
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/sysdev.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 45068269ebb1..c820b0310a12 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1393,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- return trace_array_printk(&global_trace, ip, fmt, args);
+ return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 23245785927f..98a6cc5c64ed 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -933,8 +933,9 @@ static void postfix_clear(struct filter_parse_state *ps)
while (!list_empty(&ps->postfix)) {
elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
- kfree(elt->operand);
list_del(&elt->list);
+ kfree(elt->operand);
+ kfree(elt);
}
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index addfe2df93b1..47cdd7e76f2b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
EXPORT_SYMBOL(schedule_delayed_work);
/**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+ if (del_timer_sync(&dwork->timer)) {
+ struct cpu_workqueue_struct *cwq;
+ cwq = wq_per_cpu(keventd_wq, get_cpu());
+ __queue_work(cwq, &dwork->work);
+ put_cpu();
+ }
+ flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done