diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-10-27 21:00:19 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2019-10-27 21:00:19 +0300 |
commit | 728d90bdc9e480dc93913e59a0aa3c896c7aa697 (patch) | |
tree | 258b1b6ee711f0ef67fd225700d84eccec285194 /drivers/gpu/drm/i915/intel_wakeref.h | |
parent | cb3efd5a38855eabd26c2b631dd027169678d60f (diff) | |
parent | d6d5df1db6e9d7f8f76d2911707f7d5877251b02 (diff) | |
download | linux-728d90bdc9e480dc93913e59a0aa3c896c7aa697.tar.xz |
Merge tag 'v5.4-rc5' into next
Sync up with mainline.
Diffstat (limited to 'drivers/gpu/drm/i915/intel_wakeref.h')
-rw-r--r-- | drivers/gpu/drm/i915/intel_wakeref.h | 84 |
1 files changed, 62 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 38275310b196..5f0c972a80fb 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -8,35 +8,56 @@ #define INTEL_WAKEREF_H #include <linux/atomic.h> +#include <linux/bits.h> #include <linux/mutex.h> #include <linux/refcount.h> #include <linux/stackdepot.h> #include <linux/timer.h> +#include <linux/workqueue.h> + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) +#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) +#else +#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif struct intel_runtime_pm; +struct intel_wakeref; typedef depot_stack_handle_t intel_wakeref_t; +struct intel_wakeref_ops { + int (*get)(struct intel_wakeref *wf); + int (*put)(struct intel_wakeref *wf); + + unsigned long flags; +#define INTEL_WAKEREF_PUT_ASYNC BIT(0) +}; + struct intel_wakeref { atomic_t count; struct mutex mutex; + intel_wakeref_t wakeref; + + struct intel_runtime_pm *rpm; + const struct intel_wakeref_ops *ops; + + struct work_struct work; }; void __intel_wakeref_init(struct intel_wakeref *wf, + struct intel_runtime_pm *rpm, + const struct intel_wakeref_ops *ops, struct lock_class_key *key); -#define intel_wakeref_init(wf) do { \ +#define intel_wakeref_init(wf, rpm, ops) do { \ static struct lock_class_key __key; \ \ - __intel_wakeref_init((wf), &__key); \ + __intel_wakeref_init((wf), (rpm), (ops), &__key); \ } while (0) -int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, - struct intel_wakeref *wf, - int (*fn)(struct intel_wakeref *wf)); -int __intel_wakeref_put_last(struct intel_runtime_pm *rpm, - struct intel_wakeref *wf, - int (*fn)(struct intel_wakeref *wf)); +int __intel_wakeref_get_first(struct intel_wakeref *wf); +void __intel_wakeref_put_last(struct intel_wakeref *wf); /** * intel_wakeref_get: Acquire the wakeref @@ -55,12 +76,10 @@ int __intel_wakeref_put_last(struct intel_runtime_pm *rpm, * code otherwise. */ static inline int -intel_wakeref_get(struct intel_runtime_pm *rpm, - struct intel_wakeref *wf, - int (*fn)(struct intel_wakeref *wf)) +intel_wakeref_get(struct intel_wakeref *wf) { if (unlikely(!atomic_inc_not_zero(&wf->count))) - return __intel_wakeref_get_first(rpm, wf, fn); + return __intel_wakeref_get_first(wf); return 0; } @@ -96,15 +115,12 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf) * Returns: 0 if the wakeref was released successfully, or a negative error * code otherwise. */ -static inline int -intel_wakeref_put(struct intel_runtime_pm *rpm, - struct intel_wakeref *wf, - int (*fn)(struct intel_wakeref *wf)) +static inline void +intel_wakeref_put(struct intel_wakeref *wf) { - if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex)) - return __intel_wakeref_put_last(rpm, wf, fn); - - return 0; + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); + if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) + __intel_wakeref_put_last(wf); } /** @@ -136,17 +152,41 @@ intel_wakeref_unlock(struct intel_wakeref *wf) } /** - * intel_wakeref_active: Query whether the wakeref is currently held + * intel_wakeref_is_active: Query whether the wakeref is currently held * @wf: the wakeref * * Returns: true if the wakeref is currently held. */ static inline bool -intel_wakeref_active(struct intel_wakeref *wf) +intel_wakeref_is_active(const struct intel_wakeref *wf) { return READ_ONCE(wf->wakeref); } +/** + * __intel_wakeref_defer_park: Defer the current park callback + * @wf: the wakeref + */ +static inline void +__intel_wakeref_defer_park(struct intel_wakeref *wf) +{ + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); + atomic_set_release(&wf->count, 1); +} + +/** + * intel_wakeref_wait_for_idle: Wait until the wakeref is idle + * @wf: the wakeref + * + * Wait for the earlier asynchronous release of the wakeref. Note + * this will wait for any third party as well, so make sure you only wait + * when you have control over the wakeref and trust no one else is acquiring + * it. + * + * Return: 0 on success, error code if killed. + */ +int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); + struct intel_wakeref_auto { struct intel_runtime_pm *rpm; struct timer_list timer; |