summaryrefslogtreecommitdiff
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c143
1 files changed, 75 insertions, 68 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5a58778c91d4..eda2e2a0bdc6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -64,21 +64,33 @@ int sysctl_oom_dump_tasks = 1;
*/
DEFINE_MUTEX(oom_lock);
+static inline bool is_memcg_oom(struct oom_control *oc)
+{
+ return oc->memcg != NULL;
+}
+
#ifdef CONFIG_NUMA
/**
- * has_intersects_mems_allowed() - check task eligiblity for kill
+ * oom_cpuset_eligible() - check task eligiblity for kill
* @start: task struct of which task to consider
* @mask: nodemask passed to page allocator for mempolicy ooms
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
* and whether or not it has the same set of allowed cpuset nodes.
+ *
+ * This function is assuming oom-killer context and 'current' has triggered
+ * the oom-killer.
*/
-static bool has_intersects_mems_allowed(struct task_struct *start,
- const nodemask_t *mask)
+static bool oom_cpuset_eligible(struct task_struct *start,
+ struct oom_control *oc)
{
struct task_struct *tsk;
bool ret = false;
+ const nodemask_t *mask = oc->nodemask;
+
+ if (is_memcg_oom(oc))
+ return true;
rcu_read_lock();
for_each_thread(start, tsk) {
@@ -105,8 +117,7 @@ static bool has_intersects_mems_allowed(struct task_struct *start,
return ret;
}
#else
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
- const nodemask_t *mask)
+static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
{
return true;
}
@@ -146,28 +157,13 @@ static inline bool is_sysrq_oom(struct oom_control *oc)
return oc->order == -1;
}
-static inline bool is_memcg_oom(struct oom_control *oc)
-{
- return oc->memcg != NULL;
-}
-
/* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p,
- struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p)
{
if (is_global_init(p))
return true;
if (p->flags & PF_KTHREAD)
return true;
-
- /* When mem_cgroup_out_of_memory() and p is not member of the group */
- if (memcg && !task_in_mem_cgroup(p, memcg))
- return true;
-
- /* p may not have freeable memory in nodemask */
- if (!has_intersects_mems_allowed(p, nodemask))
- return true;
-
return false;
}
@@ -194,20 +190,17 @@ static bool is_dump_unreclaim_slabs(void)
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
- * @memcg: task's memory controller, if constrained
- * @nodemask: nodemask passed to page allocator for mempolicy ooms
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
-unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, unsigned long totalpages)
+unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
{
long points;
long adj;
- if (oom_unkillable_task(p, memcg, nodemask))
+ if (oom_unkillable_task(p))
return 0;
p = find_lock_task_mm(p);
@@ -318,7 +311,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
struct oom_control *oc = arg;
unsigned long points;
- if (oom_unkillable_task(task, NULL, oc->nodemask))
+ if (oom_unkillable_task(task))
+ goto next;
+
+ /* p may not have freeable memory in nodemask */
+ if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
goto next;
/*
@@ -342,13 +339,10 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
goto select;
}
- points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
+ points = oom_badness(task, oc->totalpages);
if (!points || points < oc->chosen_points)
goto next;
- /* Prefer thread group leaders for display purposes */
- if (points == oc->chosen_points && thread_group_leader(oc->chosen))
- goto next;
select:
if (oc->chosen)
put_task_struct(oc->chosen);
@@ -381,14 +375,44 @@ static void select_bad_process(struct oom_control *oc)
break;
rcu_read_unlock();
}
+}
- oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
+static int dump_task(struct task_struct *p, void *arg)
+{
+ struct oom_control *oc = arg;
+ struct task_struct *task;
+
+ if (oom_unkillable_task(p))
+ return 0;
+
+ /* p may not have freeable memory in nodemask */
+ if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
+ return 0;
+
+ task = find_lock_task_mm(p);
+ if (!task) {
+ /*
+ * This is a kthread or all of p's threads have already
+ * detached their mm's. There's no need to report
+ * them; they can't be oom killed anyway.
+ */
+ return 0;
+ }
+
+ pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
+ task->pid, from_kuid(&init_user_ns, task_uid(task)),
+ task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
+ mm_pgtables_bytes(task->mm),
+ get_mm_counter(task->mm, MM_SWAPENTS),
+ task->signal->oom_score_adj, task->comm);
+ task_unlock(task);
+
+ return 0;
}
/**
* dump_tasks - dump current memory state of all system tasks
- * @memcg: current's memory controller, if constrained
- * @nodemask: nodemask passed to page allocator for mempolicy ooms
+ * @oc: pointer to struct oom_control
*
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
@@ -396,37 +420,21 @@ static void select_bad_process(struct oom_control *oc)
* State information includes task's pid, uid, tgid, vm size, rss,
* pgtables_bytes, swapents, oom_score_adj value, and name.
*/
-static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static void dump_tasks(struct oom_control *oc)
{
- struct task_struct *p;
- struct task_struct *task;
-
pr_info("Tasks state (memory values in pages):\n");
pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
- rcu_read_lock();
- for_each_process(p) {
- if (oom_unkillable_task(p, memcg, nodemask))
- continue;
- task = find_lock_task_mm(p);
- if (!task) {
- /*
- * This is a kthread or all of p's threads have already
- * detached their mm's. There's no need to report
- * them; they can't be oom killed anyway.
- */
- continue;
- }
+ if (is_memcg_oom(oc))
+ mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
+ else {
+ struct task_struct *p;
- pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
- task->pid, from_kuid(&init_user_ns, task_uid(task)),
- task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- mm_pgtables_bytes(task->mm),
- get_mm_counter(task->mm, MM_SWAPENTS),
- task->signal->oom_score_adj, task->comm);
- task_unlock(task);
+ rcu_read_lock();
+ for_each_process(p)
+ dump_task(p, oc);
+ rcu_read_unlock();
}
- rcu_read_unlock();
}
static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
@@ -458,7 +466,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
dump_unreclaimable_slab();
}
if (sysctl_oom_dump_tasks)
- dump_tasks(oc->memcg, oc->nodemask);
+ dump_tasks(oc);
if (p)
dump_oom_summary(oc, p);
}
@@ -987,8 +995,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
-static void check_panic_on_oom(struct oom_control *oc,
- enum oom_constraint constraint)
+static void check_panic_on_oom(struct oom_control *oc)
{
if (likely(!sysctl_panic_on_oom))
return;
@@ -998,7 +1005,7 @@ static void check_panic_on_oom(struct oom_control *oc,
* does not panic for cpuset, mempolicy, or memcg allocation
* failures.
*/
- if (constraint != CONSTRAINT_NONE)
+ if (oc->constraint != CONSTRAINT_NONE)
return;
}
/* Do not panic for oom kills triggered by sysrq */
@@ -1035,7 +1042,6 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
bool out_of_memory(struct oom_control *oc)
{
unsigned long freed = 0;
- enum oom_constraint constraint = CONSTRAINT_NONE;
if (oom_killer_disabled)
return false;
@@ -1071,13 +1077,14 @@ bool out_of_memory(struct oom_control *oc)
* Check if there were limitations on the allocation (only relevant for
* NUMA and memcg) that may require different handling.
*/
- constraint = constrained_alloc(oc);
- if (constraint != CONSTRAINT_MEMORY_POLICY)
+ oc->constraint = constrained_alloc(oc);
+ if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
oc->nodemask = NULL;
- check_panic_on_oom(oc, constraint);
+ check_panic_on_oom(oc);
if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
- current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
+ current->mm && !oom_unkillable_task(current) &&
+ oom_cpuset_eligible(current, oc) &&
current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
get_task_struct(current);
oc->chosen = current;