diff options
-rw-r--r-- | drivers/hwspinlock/hwspinlock_core.c | 34 | ||||
-rw-r--r-- | include/linux/hwspinlock.h | 58 |
2 files changed, 85 insertions, 7 deletions
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index f4a59f5631e4..5278d0560a4a 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -71,10 +71,16 @@ static DEFINE_MUTEX(hwspinlock_tree_lock); * This function attempts to lock an hwspinlock, and will immediately * fail if the hwspinlock is already taken. * - * Upon a successful return from this function, preemption (and possibly - * interrupts) is disabled, so the caller must not sleep, and is advised to - * release the hwspinlock as soon as possible. This is required in order to - * minimize remote cores polling on the hardware interconnect. + * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine + * of getting hardware lock with mutex or spinlock. Since in some scenarios, + * user need some time-consuming or sleepable operations under the hardware + * lock, they need one sleepable lock (like mutex) to protect the operations. + * + * If the mode is not HWLOCK_RAW, upon a successful return from this function, + * preemption (and possibly interrupts) is disabled, so the caller must not + * sleep, and is advised to release the hwspinlock as soon as possible. This is + * required in order to minimize remote cores polling on the hardware + * interconnect. * * The user decides whether local interrupts are disabled or not, and if yes, * whether he wants their previous state to be saved. It is up to the user @@ -113,6 +119,9 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) case HWLOCK_IRQ: ret = spin_trylock_irq(&hwlock->lock); break; + case HWLOCK_RAW: + ret = 1; + break; default: ret = spin_trylock(&hwlock->lock); break; @@ -134,6 +143,9 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) case HWLOCK_IRQ: spin_unlock_irq(&hwlock->lock); break; + case HWLOCK_RAW: + /* Nothing to do */ + break; default: spin_unlock(&hwlock->lock); break; @@ -170,9 +182,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock); * is already taken, the function will busy loop waiting for it to * be released, but give up after @timeout msecs have elapsed. * - * Upon a successful return from this function, preemption is disabled - * (and possibly local interrupts, too), so the caller must not sleep, - * and is advised to release the hwspinlock as soon as possible. + * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine + * of getting hardware lock with mutex or spinlock. Since in some scenarios, + * user need some time-consuming or sleepable operations under the hardware + * lock, they need one sleepable lock (like mutex) to protect the operations. + * + * If the mode is not HWLOCK_RAW, upon a successful return from this function, + * preemption is disabled (and possibly local interrupts, too), so the caller + * must not sleep, and is advised to release the hwspinlock as soon as possible. * This is required in order to minimize remote cores polling on the * hardware interconnect. * @@ -266,6 +283,9 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) case HWLOCK_IRQ: spin_unlock_irq(&hwlock->lock); break; + case HWLOCK_RAW: + /* Nothing to do */ + break; default: spin_unlock(&hwlock->lock); break; diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index 859d673d98c8..fe450ee58d55 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -24,6 +24,7 @@ /* hwspinlock mode argument */ #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ +#define HWLOCK_RAW 0x03 struct device; struct device_node; @@ -176,6 +177,25 @@ static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) } /** + * hwspin_trylock_raw() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * Caution: User must protect the routine of getting hardware lock with mutex + * or spinlock to avoid dead-lock, that will let user can do some time-consuming + * or sleepable operations under the hardware lock. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_raw(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL); +} + +/** * hwspin_trylock() - attempt to lock a specific hwspinlock * @hwlock: an hwspinlock which we want to trylock * @@ -243,6 +263,29 @@ int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) } /** + * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * Caution: User must protect the routine of getting hardware lock with mutex + * or spinlock to avoid dead-lock, that will let user can do some time-consuming + * or sleepable operations under the hardware lock. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL); +} + +/** * hwspin_lock_timeout() - lock an hwspinlock with timeout limit * @hwlock: the hwspinlock to be locked * @to: timeout value in msecs @@ -302,6 +345,21 @@ static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) } /** + * hwspin_unlock_raw() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock_raw(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_RAW, NULL); +} + +/** * hwspin_unlock() - unlock hwspinlock * @hwlock: a previously-acquired hwspinlock which we want to unlock * |