diff options
author | Kees Cook <keescook@chromium.org> | 2012-11-20 03:21:26 +0400 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2012-11-20 22:32:08 +0400 |
commit | 235e752789eb65a81477bb82845323dfcbf93012 (patch) | |
tree | c4efa5eff81c01029ab884c0d43af16bb91b44b4 | |
parent | 93b69d437effff11b1c37f330d3265c37ec2f84b (diff) | |
download | linux-235e752789eb65a81477bb82845323dfcbf93012.tar.xz |
Yama: remove locking from delete path
Instead of locking the list during a delete, mark entries as invalid
and trigger a workqueue to clean them up. This lets us easily handle
task_free from interrupt context.
Signed-off-by: Kees Cook <keescook@chromium.org>
-rw-r--r-- | security/yama/yama_lsm.c | 49 |
1 files changed, 42 insertions, 7 deletions
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 70cd85e3ba30..2663145d1197 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -17,6 +17,7 @@ #include <linux/ptrace.h> #include <linux/prctl.h> #include <linux/ratelimit.h> +#include <linux/workqueue.h> #define YAMA_SCOPE_DISABLED 0 #define YAMA_SCOPE_RELATIONAL 1 @@ -29,6 +30,7 @@ static int ptrace_scope = YAMA_SCOPE_RELATIONAL; struct ptrace_relation { struct task_struct *tracer; struct task_struct *tracee; + bool invalid; struct list_head node; struct rcu_head rcu; }; @@ -36,6 +38,29 @@ struct ptrace_relation { static LIST_HEAD(ptracer_relations); static DEFINE_SPINLOCK(ptracer_relations_lock); +static void yama_relation_cleanup(struct work_struct *work); +static DECLARE_WORK(yama_relation_work, yama_relation_cleanup); + +/** + * yama_relation_cleanup - remove invalid entries from the relation list + * + */ +static void yama_relation_cleanup(struct work_struct *work) +{ + struct ptrace_relation *relation; + + spin_lock(&ptracer_relations_lock); + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) { + list_del_rcu(&relation->node); + kfree_rcu(relation, rcu); + } + } + rcu_read_unlock(); + spin_unlock(&ptracer_relations_lock); +} + /** * yama_ptracer_add - add/replace an exception for this tracer/tracee pair * @tracer: the task_struct of the process doing the ptrace @@ -57,10 +82,13 @@ static int yama_ptracer_add(struct task_struct *tracer, added->tracee = tracee; added->tracer = tracer; + added->invalid = false; - spin_lock_bh(&ptracer_relations_lock); + spin_lock(&ptracer_relations_lock); rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; if (relation->tracee == tracee) { list_replace_rcu(&relation->node, &added->node); kfree_rcu(relation, rcu); @@ -72,7 +100,7 @@ static int yama_ptracer_add(struct task_struct *tracer, out: rcu_read_unlock(); - spin_unlock_bh(&ptracer_relations_lock); + spin_unlock(&ptracer_relations_lock); return 0; } @@ -85,18 +113,22 @@ static void yama_ptracer_del(struct task_struct *tracer, struct task_struct *tracee) { struct ptrace_relation *relation; + bool marked = false; - spin_lock_bh(&ptracer_relations_lock); rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; if (relation->tracee == tracee || (tracer && relation->tracer == tracer)) { - list_del_rcu(&relation->node); - kfree_rcu(relation, rcu); + relation->invalid = true; + marked = true; } } rcu_read_unlock(); - spin_unlock_bh(&ptracer_relations_lock); + + if (marked) + schedule_work(&yama_relation_work); } /** @@ -223,12 +255,15 @@ static int ptracer_exception_found(struct task_struct *tracer, rcu_read_lock(); if (!thread_group_leader(tracee)) tracee = rcu_dereference(tracee->group_leader); - list_for_each_entry_rcu(relation, &ptracer_relations, node) + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; if (relation->tracee == tracee) { parent = relation->tracer; found = true; break; } + } if (found && (parent == NULL || task_is_descendant(parent, tracer))) rc = 1; |