diff options
author | Minchan Kim <minchan@kernel.org> | 2018-08-23 08:29:56 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-09-12 10:18:29 +0300 |
commit | da1b9564e85b1d7baf66cbfabcab27e183a1db63 (patch) | |
tree | 4fad5a83a7dab99424f0920dd1a5cdab3d199015 | |
parent | 34f1166afd67f9f48a08c52f36180048908506a4 (diff) | |
download | linux-da1b9564e85b1d7baf66cbfabcab27e183a1db63.tar.xz |
android: binder: fix the race mmap and alloc_new_buf_locked
There is RaceFuzzer report like below because we have no lock to close
below the race between binder_mmap and binder_alloc_new_buf_locked.
To close the race, let's use memory barrier so that if someone see
alloc->vma is not NULL, alloc->vma_vm_mm should be never NULL.
(I didn't add stable mark intentionallybecause standard android
userspace libraries that interact with binder (libbinder & libhwbinder)
prevent the mmap/ioctl race. - from Todd)
"
Thread interleaving:
CPU0 (binder_alloc_mmap_handler) CPU1 (binder_alloc_new_buf_locked)
===== =====
// drivers/android/binder_alloc.c
// #L718 (v4.18-rc3)
alloc->vma = vma;
// drivers/android/binder_alloc.c
// #L346 (v4.18-rc3)
if (alloc->vma == NULL) {
...
// alloc->vma is not NULL at this point
return ERR_PTR(-ESRCH);
}
...
// #L438
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
end_page_addr);
// In binder_update_page_range() #L218
// But still alloc->vma_vm_mm is NULL here
if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
alloc->vma_vm_mm = vma->vm_mm;
Crash Log:
==================================================================
BUG: KASAN: null-ptr-deref in __atomic_add_unless include/asm-generic/atomic-instrumented.h:89 [inline]
BUG: KASAN: null-ptr-deref in atomic_add_unless include/linux/atomic.h:533 [inline]
BUG: KASAN: null-ptr-deref in mmget_not_zero include/linux/sched/mm.h:75 [inline]
BUG: KASAN: null-ptr-deref in binder_update_page_range+0xece/0x18e0 drivers/android/binder_alloc.c:218
Write of size 4 at addr 0000000000000058 by task syz-executor0/11184
CPU: 1 PID: 11184 Comm: syz-executor0 Not tainted 4.18.0-rc3 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.2-0-g33fbe13 by qemu-project.org 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x16e/0x22c lib/dump_stack.c:113
kasan_report_error mm/kasan/report.c:352 [inline]
kasan_report+0x163/0x380 mm/kasan/report.c:412
check_memory_region_inline mm/kasan/kasan.c:260 [inline]
check_memory_region+0x140/0x1a0 mm/kasan/kasan.c:267
kasan_check_write+0x14/0x20 mm/kasan/kasan.c:278
__atomic_add_unless include/asm-generic/atomic-instrumented.h:89 [inline]
atomic_add_unless include/linux/atomic.h:533 [inline]
mmget_not_zero include/linux/sched/mm.h:75 [inline]
binder_update_page_range+0xece/0x18e0 drivers/android/binder_alloc.c:218
binder_alloc_new_buf_locked drivers/android/binder_alloc.c:443 [inline]
binder_alloc_new_buf+0x467/0xc30 drivers/android/binder_alloc.c:513
binder_transaction+0x125b/0x4fb0 drivers/android/binder.c:2957
binder_thread_write+0xc08/0x2770 drivers/android/binder.c:3528
binder_ioctl_write_read.isra.39+0x24f/0x8e0 drivers/android/binder.c:4456
binder_ioctl+0xa86/0xf34 drivers/android/binder.c:4596
vfs_ioctl fs/ioctl.c:46 [inline]
do_vfs_ioctl+0x154/0xd40 fs/ioctl.c:686
ksys_ioctl+0x94/0xb0 fs/ioctl.c:701
__do_sys_ioctl fs/ioctl.c:708 [inline]
__se_sys_ioctl fs/ioctl.c:706 [inline]
__x64_sys_ioctl+0x43/0x50 fs/ioctl.c:706
do_syscall_64+0x167/0x4b0 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
"
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Martijn Coenen <maco@android.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/android/binder_alloc.c | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 3f3b7b253445..64fd96eada31 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -332,6 +332,35 @@ err_no_vma: return vma ? -ENOMEM : -ESRCH; } + +static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + if (vma) + alloc->vma_vm_mm = vma->vm_mm; + /* + * If we see alloc->vma is not NULL, buffer data structures set up + * completely. Look at smp_rmb side binder_alloc_get_vma. + * We also want to guarantee new alloc->vma_vm_mm is always visible + * if alloc->vma is set. + */ + smp_wmb(); + alloc->vma = vma; +} + +static inline struct vm_area_struct *binder_alloc_get_vma( + struct binder_alloc *alloc) +{ + struct vm_area_struct *vma = NULL; + + if (alloc->vma) { + /* Look at description in binder_alloc_set_vma */ + smp_rmb(); + vma = alloc->vma; + } + return vma; +} + static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, size_t data_size, @@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; - if (alloc->vma == NULL) { + if (!binder_alloc_get_vma(alloc)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", alloc->pid); @@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - barrier(); - alloc->vma = vma; - alloc->vma_vm_mm = vma->vm_mm; + binder_alloc_set_vma(alloc, vma); mmgrab(alloc->vma_vm_mm); return 0; @@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int buffers, page_count; struct binder_buffer *buffer; - BUG_ON(alloc->vma); - buffers = 0; mutex_lock(&alloc->mutex); + BUG_ON(alloc->vma); + while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - WRITE_ONCE(alloc->vma, NULL); + binder_alloc_set_vma(alloc, NULL); } /** @@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - vma = alloc->vma; + vma = binder_alloc_get_vma(alloc); if (vma) { if (!mmget_not_zero(alloc->vma_vm_mm)) goto err_mmget; |