diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-08-14 21:24:01 +0300 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-08-16 14:40:58 +0300 |
commit | b016cd6ed4b772759804e0d6082bd1f5ca63b8ee (patch) | |
tree | 278f8b1426b40316f5744cd6155ec51ecc9dd6c7 /drivers/dma-buf/dma-buf.c | |
parent | dc2e1e5b279966affbd11ff7cfef52eb634ca2c9 (diff) | |
download | linux-b016cd6ed4b772759804e0d6082bd1f5ca63b8ee.tar.xz |
dma-buf: Restore seqlock around dma_resv updates
This reverts
67c97fb79a7f ("dma-buf: add reservation_object_fences helper")
dd7a7d1ff2f1 ("drm/i915: use new reservation_object_fences helper")
0e1d8083bddb ("dma-buf: further relax reservation_object_add_shared_fence")
5d344f58da76 ("dma-buf: nuke reservation_object seq number")
The scenario that defeats simply grabbing a set of shared/exclusive
fences and using them blissfully under RCU is that any of those fences
may be reallocated by a SLAB_TYPESAFE_BY_RCU fence slab cache. In this
scenario, while keeping the rcu_read_lock we need to establish that no
fence was changed in the dma_resv after a read (or full) memory barrier.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190814182401.25009-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/dma-buf/dma-buf.c')
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 31 |
1 files changed, 26 insertions, 5 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index b3400d6524ab..433d91d710e4 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -199,7 +199,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) struct dma_resv_list *fobj; struct dma_fence *fence_excl; __poll_t events; - unsigned shared_count; + unsigned shared_count, seq; dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) @@ -213,8 +213,21 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) if (!events) return 0; +retry: + seq = read_seqcount_begin(&resv->seq); rcu_read_lock(); - dma_resv_fences(resv, &fence_excl, &fobj, &shared_count); + + fobj = rcu_dereference(resv->fence); + if (fobj) + shared_count = fobj->shared_count; + else + shared_count = 0; + fence_excl = rcu_dereference(resv->fence_excl); + if (read_seqcount_retry(&resv->seq, seq)) { + rcu_read_unlock(); + goto retry; + } + if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; __poll_t pevents = EPOLLIN; @@ -1144,6 +1157,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) struct dma_resv *robj; struct dma_resv_list *fobj; struct dma_fence *fence; + unsigned seq; int count = 0, attach_count, shared_count, i; size_t size = 0; @@ -1174,9 +1188,16 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) buf_obj->name ?: ""); robj = buf_obj->resv; - rcu_read_lock(); - dma_resv_fences(robj, &fence, &fobj, &shared_count); - rcu_read_unlock(); + while (true) { + seq = read_seqcount_begin(&robj->seq); + rcu_read_lock(); + fobj = rcu_dereference(robj->fence); + shared_count = fobj ? fobj->shared_count : 0; + fence = rcu_dereference(robj->fence_excl); + if (!read_seqcount_retry(&robj->seq, seq)) + break; + rcu_read_unlock(); + } if (fence) seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", |