diff options
author | Todd Kjos <tkjos@android.com> | 2017-06-29 22:02:08 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-07-17 15:48:24 +0300 |
commit | 2c1838dc6817dd28cf24ba0c57cc8693be9bbfc5 (patch) | |
tree | 4c231a1b44cd8808867d112dced8c650ea0d906f /drivers/android | |
parent | b3e6861283790d78f298f2a1bc3ef5fd81b381f4 (diff) | |
download | linux-2c1838dc6817dd28cf24ba0c57cc8693be9bbfc5.tar.xz |
binder: protect binder_ref with outer lock
Use proc->outer_lock to protect the binder_ref structure.
The outer lock allows functions operating on the binder_ref
to do nested acquires of node and inner locks as necessary
to attach refs to nodes atomically.
Binder refs must never be accesssed without holding the
outer lock.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android')
-rw-r--r-- | drivers/android/binder.c | 133 |
1 files changed, 83 insertions, 50 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 704540ea3e12..f07f0d488aa4 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -475,7 +475,9 @@ enum binder_deferred_state { * this proc ordered by node->ptr * (protected by @inner_lock) * @refs_by_desc: rbtree of refs ordered by ref->desc + * (protected by @outer_lock) * @refs_by_node: rbtree of refs ordered by ref->node + * (protected by @outer_lock) * @pid PID of group_leader of process * (invariant after initialized) * @tsk task_struct for group_leader of process @@ -1269,8 +1271,8 @@ static void binder_put_node(struct binder_node *node) binder_dec_node_tmpref(node); } -static struct binder_ref *binder_get_ref(struct binder_proc *proc, - u32 desc, bool need_strong_ref) +static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, + u32 desc, bool need_strong_ref) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; @@ -1293,7 +1295,7 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, } /** - * binder_get_ref_for_node() - get the ref associated with given node + * binder_get_ref_for_node_olocked() - get the ref associated with given node * @proc: binder_proc that owns the ref * @node: binder_node of target * @new_ref: newly allocated binder_ref to be initialized or %NULL @@ -1310,9 +1312,10 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, * new_ref. new_ref must be kfree'd by the caller in * this case. */ -static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, - struct binder_node *node, - struct binder_ref *new_ref) +static struct binder_ref *binder_get_ref_for_node_olocked( + struct binder_proc *proc, + struct binder_node *node, + struct binder_ref *new_ref) { struct binder_context *context = proc->context; struct rb_node **p = &proc->refs_by_node.rb_node; @@ -1375,7 +1378,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, return new_ref; } -static void binder_cleanup_ref(struct binder_ref *ref) +static void binder_cleanup_ref_olocked(struct binder_ref *ref) { bool delete_node = false; @@ -1418,17 +1421,17 @@ static void binder_cleanup_ref(struct binder_ref *ref) } /** - * binder_inc_ref() - increment the ref for given handle + * binder_inc_ref_olocked() - increment the ref for given handle * @ref: ref to be incremented * @strong: if true, strong increment, else weak * @target_list: list to queue node work on * - * Increment the ref. + * Increment the ref. @ref->proc->outer_lock must be held on entry * * Return: 0, if successful, else errno */ -static int binder_inc_ref(struct binder_ref *ref, int strong, - struct list_head *target_list) +static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, + struct list_head *target_list) { int ret; @@ -1457,12 +1460,9 @@ static int binder_inc_ref(struct binder_ref *ref, int strong, * * Decrement the ref. * - * TODO: kfree is avoided here since an upcoming patch - * will put this under a lock. - * * Return: true if ref is cleaned up and ready to be freed */ -static bool binder_dec_ref(struct binder_ref *ref, int strong) +static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) { if (strong) { if (ref->data.strong == 0) { @@ -1486,13 +1486,7 @@ static bool binder_dec_ref(struct binder_ref *ref, int strong) ref->data.weak--; } if (ref->data.strong == 0 && ref->data.weak == 0) { - binder_cleanup_ref(ref); - /* - * TODO: we could kfree(ref) here, but an upcoming - * patch will call this with a lock held, so we - * return an indication that the ref should be - * freed. - */ + binder_cleanup_ref_olocked(ref); return true; } return false; @@ -1517,7 +1511,8 @@ static struct binder_node *binder_get_node_from_ref( struct binder_node *node; struct binder_ref *ref; - ref = binder_get_ref(proc, desc, need_strong_ref); + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, need_strong_ref); if (!ref) goto err_no_ref; node = ref->node; @@ -1528,10 +1523,12 @@ static struct binder_node *binder_get_node_from_ref( binder_inc_node_tmpref(node); if (rdata) *rdata = ref->data; + binder_proc_unlock(proc); return node; err_no_ref: + binder_proc_unlock(proc); return NULL; } @@ -1571,24 +1568,27 @@ static int binder_update_ref_for_handle(struct binder_proc *proc, struct binder_ref *ref; bool delete_ref = false; - ref = binder_get_ref(proc, desc, strong); + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, strong); if (!ref) { ret = -EINVAL; goto err_no_ref; } if (increment) - ret = binder_inc_ref(ref, strong, NULL); + ret = binder_inc_ref_olocked(ref, strong, NULL); else - delete_ref = binder_dec_ref(ref, strong); + delete_ref = binder_dec_ref_olocked(ref, strong); if (rdata) *rdata = ref->data; + binder_proc_unlock(proc); if (delete_ref) binder_free_ref(ref); return ret; err_no_ref: + binder_proc_unlock(proc); return ret; } @@ -1633,15 +1633,19 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, struct binder_ref *new_ref = NULL; int ret = 0; - ref = binder_get_ref_for_node(proc, node, NULL); + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, NULL); if (!ref) { + binder_proc_unlock(proc); new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!new_ref) return -ENOMEM; - ref = binder_get_ref_for_node(proc, node, new_ref); + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, new_ref); } - ret = binder_inc_ref(ref, strong, target_list); + ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; + binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* * Another thread created the ref first so @@ -2497,11 +2501,14 @@ static void binder_transaction(struct binder_proc *proc, * stays alive until the transaction is * done. */ - ref = binder_get_ref(proc, tr->target.handle, true); + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, tr->target.handle, + true); if (ref) { binder_inc_node(ref->node, 1, 0, NULL); target_node = ref->node; } + binder_proc_unlock(proc); if (target_node == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); @@ -3277,7 +3284,7 @@ static int binder_thread_write(struct binder_proc *proc, uint32_t target; binder_uintptr_t cookie; struct binder_ref *ref; - struct binder_ref_death *death; + struct binder_ref_death *death = NULL; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; @@ -3285,7 +3292,29 @@ static int binder_thread_write(struct binder_proc *proc, if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - ref = binder_get_ref(proc, target, false); + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { + /* + * Allocate memory for death notification + * before taking lock + */ + death = kzalloc(sizeof(*death), GFP_KERNEL); + if (death == NULL) { + WARN_ON(thread->return_error.cmd != + BR_OK); + thread->return_error.cmd = BR_ERROR; + binder_enqueue_work( + thread->proc, + &thread->return_error.work, + &thread->todo); + binder_debug( + BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", + proc->pid, thread->pid); + break; + } + } + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, target, false); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, @@ -3293,6 +3322,8 @@ static int binder_thread_write(struct binder_proc *proc, "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); + binder_proc_unlock(proc); + kfree(death); break; } @@ -3310,20 +3341,8 @@ static int binder_thread_write(struct binder_proc *proc, if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); - break; - } - death = kzalloc(sizeof(*death), GFP_KERNEL); - if (death == NULL) { - WARN_ON(thread->return_error.cmd != - BR_OK); - thread->return_error.cmd = BR_ERROR; - binder_enqueue_work( - thread->proc, - &thread->return_error.work, - &thread->todo); - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", - proc->pid, thread->pid); + binder_proc_unlock(proc); + kfree(death); break; } binder_stats_created(BINDER_STAT_DEATH); @@ -3356,6 +3375,7 @@ static int binder_thread_write(struct binder_proc *proc, binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); binder_node_unlock(ref->node); + binder_proc_unlock(proc); break; } death = ref->death; @@ -3365,6 +3385,7 @@ static int binder_thread_write(struct binder_proc *proc, (u64)death->cookie, (u64)cookie); binder_node_unlock(ref->node); + binder_proc_unlock(proc); break; } ref->death = NULL; @@ -3391,6 +3412,7 @@ static int binder_thread_write(struct binder_proc *proc, binder_inner_proc_unlock(proc); binder_node_unlock(ref->node); } + binder_proc_unlock(proc); } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; @@ -4601,14 +4623,18 @@ static void binder_deferred_release(struct binder_proc *proc) binder_inner_proc_unlock(proc); outgoing_refs = 0; + binder_proc_lock(proc); while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref; ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; - binder_cleanup_ref(ref); + binder_cleanup_ref_olocked(ref); + binder_proc_unlock(proc); binder_free_ref(ref); + binder_proc_lock(proc); } + binder_proc_unlock(proc); binder_release_work(proc, &proc->todo); binder_release_work(proc, &proc->delivered_death); @@ -4816,8 +4842,10 @@ static void print_binder_node_nilocked(struct seq_file *m, } } -static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) +static void print_binder_ref_olocked(struct seq_file *m, + struct binder_ref *ref) { + WARN_ON(!spin_is_locked(&ref->proc->outer_lock)); binder_node_lock(ref->node); seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", ref->data.debug_id, ref->data.desc, @@ -4869,11 +4897,14 @@ static void print_binder_proc(struct seq_file *m, binder_put_node(last_node); if (print_all) { + binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) - print_binder_ref(m, rb_entry(n, struct binder_ref, - rb_node_desc)); + print_binder_ref_olocked(m, rb_entry(n, + struct binder_ref, + rb_node_desc)); + binder_proc_unlock(proc); } binder_alloc_print_allocated(m, &proc->alloc); binder_inner_proc_lock(proc); @@ -5013,6 +5044,7 @@ static void print_binder_proc_stats(struct seq_file *m, count = 0; strong = 0; weak = 0; + binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); @@ -5020,6 +5052,7 @@ static void print_binder_proc_stats(struct seq_file *m, strong += ref->data.strong; weak += ref->data.weak; } + binder_proc_unlock(proc); seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); count = binder_alloc_get_allocated_count(&proc->alloc); |