From 992c238188a83befa0094a8c00bfead31aa302ed Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 28 Jul 2021 19:51:50 +0200 Subject: dma-buf: nuke seqno-fence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Entirely unused. Signed-off-by: Christian König Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210729070330.41443-1-christian.koenig@amd.com --- include/linux/seqno-fence.h | 109 -------------------------------------------- 1 file changed, 109 deletions(-) delete mode 100644 include/linux/seqno-fence.h (limited to 'include/linux') diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h deleted file mode 100644 index 3cca2b8fac43..000000000000 --- a/include/linux/seqno-fence.h +++ /dev/null @@ -1,109 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * seqno-fence, using a dma-buf to synchronize fencing - * - * Copyright (C) 2012 Texas Instruments - * Copyright (C) 2012 Canonical Ltd - * Authors: - * Rob Clark - * Maarten Lankhorst - */ - -#ifndef __LINUX_SEQNO_FENCE_H -#define __LINUX_SEQNO_FENCE_H - -#include -#include - -enum seqno_fence_condition { - SEQNO_FENCE_WAIT_GEQUAL, - SEQNO_FENCE_WAIT_NONZERO -}; - -struct seqno_fence { - struct dma_fence base; - - const struct dma_fence_ops *ops; - struct dma_buf *sync_buf; - uint32_t seqno_ofs; - enum seqno_fence_condition condition; -}; - -extern const struct dma_fence_ops seqno_fence_ops; - -/** - * to_seqno_fence - cast a fence to a seqno_fence - * @fence: fence to cast to a seqno_fence - * - * Returns NULL if the fence is not a seqno_fence, - * or the seqno_fence otherwise. - */ -static inline struct seqno_fence * -to_seqno_fence(struct dma_fence *fence) -{ - if (fence->ops != &seqno_fence_ops) - return NULL; - return container_of(fence, struct seqno_fence, base); -} - -/** - * seqno_fence_init - initialize a seqno fence - * @fence: seqno_fence to initialize - * @lock: pointer to spinlock to use for fence - * @sync_buf: buffer containing the memory location to signal on - * @context: the execution context this fence is a part of - * @seqno_ofs: the offset within @sync_buf - * @seqno: the sequence # to signal on - * @cond: fence wait condition - * @ops: the fence_ops for operations on this seqno fence - * - * This function initializes a struct seqno_fence with passed parameters, - * and takes a reference on sync_buf which is released on fence destruction. - * - * A seqno_fence is a dma_fence which can complete in software when - * enable_signaling is called, but it also completes when - * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true - * - * The seqno_fence will take a refcount on the sync_buf until it's - * destroyed, but actual lifetime of sync_buf may be longer if one of the - * callers take a reference to it. - * - * Certain hardware have instructions to insert this type of wait condition - * in the command stream, so no intervention from software would be needed. - * This type of fence can be destroyed before completed, however a reference - * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same - * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the - * device's vm can be expensive. - * - * It is recommended for creators of seqno_fence to call dma_fence_signal() - * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check dma_fence_is_signaled() - * before submitting instructions for the hardware to wait on the fence. - * However, when ops.enable_signaling is not called, it doesn't have to be - * done as soon as possible, just before there's any real danger of seqno - * wraparound. - */ -static inline void -seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, - struct dma_buf *sync_buf, uint32_t context, - uint32_t seqno_ofs, uint32_t seqno, - enum seqno_fence_condition cond, - const struct dma_fence_ops *ops) -{ - BUG_ON(!fence || !sync_buf || !ops); - BUG_ON(!ops->wait || !ops->enable_signaling || - !ops->get_driver_name || !ops->get_timeline_name); - - /* - * ops is used in dma_fence_init for get_driver_name, so needs to be - * initialized first - */ - fence->ops = ops; - dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); - get_dma_buf(sync_buf); - fence->sync_buf = sync_buf; - fence->seqno_ofs = seqno_ofs; - fence->condition = cond; -} - -#endif /* __LINUX_SEQNO_FENCE_H */ -- cgit v1.2.3 From 880121be1179a0db3eb4fdb198108d9475b8be4c Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 15 Apr 2021 13:56:23 +0200 Subject: mm/vmscan: add sync_shrinkers function v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While unplugging a device the TTM shrinker implementation needs a barrier to make sure that all concurrent shrink operations are done and no other CPU is referring to a device specific pool any more. Taking and releasing the shrinker semaphore on the write side after unmapping and freeing all pages from the device pool should make sure that no shrinker is running in paralell. This allows us to avoid the contented mutex in the TTM pool implementation for every alloc/free operation. v2: rework the commit message to make clear why we need this v3: rename the function and add more doc as suggested by Daniel Signed-off-by: Christian König Acked-by: Huang Rui Acked-by: Andrew Morton Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210820120528.81114-2-christian.koenig@amd.com --- include/linux/shrinker.h | 1 + mm/vmscan.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9814fff58a69..76fbf92b04d9 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -93,4 +93,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker); extern int register_shrinker(struct shrinker *shrinker); extern void unregister_shrinker(struct shrinker *shrinker); extern void free_prealloced_shrinker(struct shrinker *shrinker); +extern void synchronize_shrinkers(void); #endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 4620df62f0ff..5d22a63472f0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -638,6 +638,21 @@ void unregister_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(unregister_shrinker); +/** + * synchronize_shrinkers - Wait for all running shrinkers to complete. + * + * This is equivalent to calling unregister_shrink() and register_shrinker(), + * but atomically and with less overhead. This is useful to guarantee that all + * shrinker invocations have seen an update, before freeing memory, similar to + * rcu. + */ +void synchronize_shrinkers(void) +{ + down_write(&shrinker_rwsem); + up_write(&shrinker_rwsem); +} +EXPORT_SYMBOL(synchronize_shrinkers); + #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, -- cgit v1.2.3 From d9edf92d496b61e5ac75b2b0aba5ea6c7f7ecdca Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 5 Aug 2021 12:47:05 +0200 Subject: dma-resv: Give the docs a do-over MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Specifically document the new/clarified rules around how the shared fences do not have any ordering requirements against the exclusive fence. But also document all the things a bit better, given how central struct dma_resv to dynamic buffer management the docs have been very inadequat. - Lots more links to other pieces of the puzzle. Unfortunately ttm_buffer_object has no docs, so no links :-( - Explain/complain a bit about dma_resv_locking_ctx(). I still don't like that one, but fixing the ttm call chains is going to be horrible. Plus we want to plug in real slowpath locking when we do that anyway. - Main part of the patch is some actual docs for struct dma_resv. Overall I think we still have a lot of bad naming in this area (e.g. dma_resv.fence is singular, but contains the multiple shared fences), but I think that's more indicative of how the semantics and rules are just not great. Another thing that's real awkard is how chaining exclusive fences right now means direct dma_resv.exclusive_fence pointer access with an rcu_assign_pointer. Not so great either. v2: - Fix a pile of typos (Matt, Jason) - Hammer it in that breaking the rules leads to use-after-free issues around dma-buf sharing (Christian) Reviewed-by: Christian König Cc: Jason Ekstrand Cc: Matthew Auld Reviewed-by: Matthew Auld Signed-off-by: Daniel Vetter Cc: Sumit Semwal Cc: "Christian König" Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Link: https://patchwork.freedesktop.org/patch/msgid/20210805104705.862416-21-daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-resv.c | 24 ++++++++--- include/linux/dma-buf.h | 7 +++ include/linux/dma-resv.h | 104 ++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 124 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index e744fd87c63c..84fbe60629e3 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -48,6 +48,8 @@ * write operations) or N shared fences (read operations). The RCU * mechanism is used to protect read access to fences from locked * write-side updates. + * + * See struct dma_resv for more details. */ DEFINE_WD_CLASS(reservation_ww_class); @@ -137,7 +139,11 @@ EXPORT_SYMBOL(dma_resv_fini); * @num_fences: number of fences we want to add * * Should be called before dma_resv_add_shared_fence(). Must - * be called with obj->lock held. + * be called with @obj locked through dma_resv_lock(). + * + * Note that the preallocated slots need to be re-reserved if @obj is unlocked + * at any time before calling dma_resv_add_shared_fence(). This is validated + * when CONFIG_DEBUG_MUTEXES is enabled. * * RETURNS * Zero for success, or -errno @@ -234,8 +240,10 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max); * @obj: the reservation object * @fence: the shared fence to add * - * Add a fence to a shared slot, obj->lock must be held, and + * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and * dma_resv_reserve_shared() has been called. + * + * See also &dma_resv.fence for a discussion of the semantics. */ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) { @@ -278,9 +286,11 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); /** * dma_resv_add_excl_fence - Add an exclusive fence. * @obj: the reservation object - * @fence: the shared fence to add + * @fence: the exclusive fence to add * - * Add a fence to the exclusive slot. The obj->lock must be held. + * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). + * Note that this function replaces all fences attached to @obj, see also + * &dma_resv.fence_excl for a discussion of the semantics. */ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { @@ -609,9 +619,11 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) * fence * * Callers are not required to hold specific locks, but maybe hold - * dma_resv_lock() already + * dma_resv_lock() already. + * * RETURNS - * true if all fences signaled, else false + * + * True if all fences signaled, else false. */ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8b32b4bdd590..66470c37e471 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -420,6 +420,13 @@ struct dma_buf { * - Dynamic importers should set fences for any access that they can't * disable immediately from their &dma_buf_attach_ops.move_notify * callback. + * + * IMPORTANT: + * + * All drivers must obey the struct dma_resv rules, specifically the + * rules for updating fences, see &dma_resv.fence_excl and + * &dma_resv.fence. If these dependency rules are broken access tracking + * can be lost resulting in use after free issues. */ struct dma_resv *resv; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index e1ca2080a1ff..9100dd3dc21f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -62,16 +62,90 @@ struct dma_resv_list { /** * struct dma_resv - a reservation object manages fences for a buffer - * @lock: update side lock - * @seq: sequence count for managing RCU read-side synchronization - * @fence_excl: the exclusive fence, if there is one currently - * @fence: list of current shared fences + * + * There are multiple uses for this, with sometimes slightly different rules in + * how the fence slots are used. + * + * One use is to synchronize cross-driver access to a struct dma_buf, either for + * dynamic buffer management or just to handle implicit synchronization between + * different users of the buffer in userspace. See &dma_buf.resv for a more + * in-depth discussion. + * + * The other major use is to manage access and locking within a driver in a + * buffer based memory manager. struct ttm_buffer_object is the canonical + * example here, since this is where reservation objects originated from. But + * use in drivers is spreading and some drivers also manage struct + * drm_gem_object with the same scheme. */ struct dma_resv { + /** + * @lock: + * + * Update side lock. Don't use directly, instead use the wrapper + * functions like dma_resv_lock() and dma_resv_unlock(). + * + * Drivers which use the reservation object to manage memory dynamically + * also use this lock to protect buffer object state like placement, + * allocation policies or throughout command submission. + */ struct ww_mutex lock; + + /** + * @seq: + * + * Sequence count for managing RCU read-side synchronization, allows + * read-only access to @fence_excl and @fence while ensuring we take a + * consistent snapshot. + */ seqcount_ww_mutex_t seq; + /** + * @fence_excl: + * + * The exclusive fence, if there is one currently. + * + * There are two ways to update this fence: + * + * - First by calling dma_resv_add_excl_fence(), which replaces all + * fences attached to the reservation object. To guarantee that no + * fences are lost, this new fence must signal only after all previous + * fences, both shared and exclusive, have signalled. In some cases it + * is convenient to achieve that by attaching a struct dma_fence_array + * with all the new and old fences. + * + * - Alternatively the fence can be set directly, which leaves the + * shared fences unchanged. To guarantee that no fences are lost, this + * new fence must signal only after the previous exclusive fence has + * signalled. Since the shared fences are staying intact, it is not + * necessary to maintain any ordering against those. If semantically + * only a new access is added without actually treating the previous + * one as a dependency the exclusive fences can be strung together + * using struct dma_fence_chain. + * + * Note that actual semantics of what an exclusive or shared fence mean + * is defined by the user, for reservation objects shared across drivers + * see &dma_buf.resv. + */ struct dma_fence __rcu *fence_excl; + + /** + * @fence: + * + * List of current shared fences. + * + * There are no ordering constraints of shared fences against the + * exclusive fence slot. If a waiter needs to wait for all access, it + * has to wait for both sets of fences to signal. + * + * A new fence is added by calling dma_resv_add_shared_fence(). Since + * this often needs to be done past the point of no return in command + * submission it cannot fail, and therefore sufficient slots need to be + * reserved by calling dma_resv_reserve_shared(). + * + * Note that actual semantics of what an exclusive or shared fence mean + * is defined by the user, for reservation objects shared across drivers + * see &dma_buf.resv. + */ struct dma_resv_list __rcu *fence; }; @@ -98,6 +172,13 @@ static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. + * + * Unlocked by calling dma_resv_unlock(). + * + * See also dma_resv_lock_interruptible() for the interruptible variant. */ static inline int dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -119,6 +200,12 @@ static inline int dma_resv_lock(struct dma_resv *obj, * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on + * @obj. + * + * Unlocked by calling dma_resv_unlock(). */ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -134,6 +221,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, * Acquires the reservation object after a die case. This function * will sleep until the lock becomes available. See dma_resv_lock() as * well. + * + * See also dma_resv_lock_slow_interruptible() for the interruptible variant. */ static inline void dma_resv_lock_slow(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -167,7 +256,7 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, * if they overlap with a writer. * * Also note that since no context is provided, no deadlock protection is - * possible. + * possible, which is also not needed for a trylock. * * Returns true if the lock was acquired, false otherwise. */ @@ -193,6 +282,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj) * * Returns the context used to lock a reservation object or NULL if no context * was used or the object is not locked at all. + * + * WARNING: This interface is pretty horrible, but TTM needs it because it + * doesn't pass the struct ww_acquire_ctx around in some very long callchains. + * Everyone else just uses it to check whether they're holding a reservation or + * not. */ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) { -- cgit v1.2.3 From d72277b6c37db66b457fd6b77aabd5e930d58687 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 29 Jul 2021 08:39:31 +0200 Subject: dma-buf: nuke DMA_FENCE_TRACE macros v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only the DRM GPU scheduler, radeon and amdgpu where using them and they depend on a non existing config option to actually emit some code. v2: keep the signal path as is for now Signed-off-by: Christian König Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210818105443.1578-1-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 10 +--------- drivers/gpu/drm/radeon/radeon_fence.c | 24 ++++-------------------- drivers/gpu/drm/scheduler/sched_fence.c | 18 ++---------------- include/linux/dma-fence.h | 22 ---------------------- 4 files changed, 7 insertions(+), 67 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 0b1c48590c43..c65994e382bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -246,7 +246,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) struct amdgpu_fence_driver *drv = &ring->fence_drv; struct amdgpu_device *adev = ring->adev; uint32_t seq, last_seq; - int r; do { last_seq = atomic_read(&ring->fence_drv.last_seq); @@ -278,12 +277,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; - r = dma_fence_signal(fence); - if (!r) - DMA_FENCE_TRACE(fence, "signaled from irq context\n"); - else - BUG(); - + dma_fence_signal(fence); dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -639,8 +633,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); - return true; } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 18f2c2e0dfb3..3f351d222cbb 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, */ seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); if (seq >= fence->seq) { - int ret = dma_fence_signal_locked(&fence->base); - - if (!ret) - DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->base, "was already signaled\n"); - + dma_fence_signal_locked(&fence->base); radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); dma_fence_put(&fence->base); - } else - DMA_FENCE_TRACE(&fence->base, "pending\n"); + } return 0; } @@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f) fence->fence_wake.func = radeon_fence_check_signaled; __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); dma_fence_get(f); - - DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); return true; } @@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence) return true; if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { - int ret; - - ret = dma_fence_signal(&fence->base); - if (!ret) - DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); + dma_fence_signal(&fence->base); return true; } return false; @@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo { uint64_t seq[RADEON_NUM_RINGS] = {}; long r; - int r_sig; /* * This function should not be called on !radeon fences. @@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo return r; } - r_sig = dma_fence_signal(&fence->base); - if (!r_sig) - DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); + dma_fence_signal(&fence->base); return r; } diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index bcea035cf4c6..db3fd1303fc4 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void) void drm_sched_fence_scheduled(struct drm_sched_fence *fence) { - int ret = dma_fence_signal(&fence->scheduled); - - if (!ret) - DMA_FENCE_TRACE(&fence->scheduled, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->scheduled, - "was already signaled\n"); + dma_fence_signal(&fence->scheduled); } void drm_sched_fence_finished(struct drm_sched_fence *fence) { - int ret = dma_fence_signal(&fence->finished); - - if (!ret) - DMA_FENCE_TRACE(&fence->finished, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->finished, - "was already signaled\n"); + dma_fence_signal(&fence->finished); } static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 6ffb4b2c6371..4cc119ab272f 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -590,26 +590,4 @@ struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); -#define DMA_FENCE_TRACE(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ - pr_info("f %llu#%llu: " fmt, \ - __ff->context, __ff->seqno, ##args); \ - } while (0) - -#define DMA_FENCE_WARN(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ - ##args); \ - } while (0) - -#define DMA_FENCE_ERR(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - #endif /* __LINUX_DMA_FENCE_H */ -- cgit v1.2.3 From b83dcd753dbe42d5e7467ab65124f3d0a6002dc3 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 21 Jul 2021 11:01:04 +0200 Subject: dma-buf: clarify dma_fence_ops->wait documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This callback is pretty much deprecated and should not be used by new implementations. Clarify that in the documentation as well. Signed-off-by: Christian König Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20210901120240.7339-2-christian.koenig@amd.com --- include/linux/dma-fence.h | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 4cc119ab272f..a706b7bf51d7 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -214,19 +214,15 @@ struct dma_fence_ops { * Custom wait implementation, defaults to dma_fence_default_wait() if * not set. * - * The dma_fence_default_wait implementation should work for any fence, as long - * as @enable_signaling works correctly. This hook allows drivers to - * have an optimized version for the case where a process context is - * already available, e.g. if @enable_signaling for the general case - * needs to set up a worker thread. + * Deprecated and should not be used by new implementations. Only used + * by existing implementations which need special handling for their + * hardware reset procedure. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. - * - * This callback is optional. */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout); -- cgit v1.2.3