diff options
author | Matthew Wilcox <matthew@wil.cx> | 2008-03-08 05:55:58 +0300 |
---|---|---|
committer | Matthew Wilcox <willy@linux.intel.com> | 2008-04-17 18:42:34 +0400 |
commit | 64ac24e738823161693bf791f87adc802cf529ff (patch) | |
tree | 19c0b0cf314d4394ca580c05b86cdf874ce0a167 /include/linux/semaphore.h | |
parent | e48b3deee475134585eed03e7afebe4bf9e0dba9 (diff) | |
download | linux-64ac24e738823161693bf791f87adc802cf529ff.tar.xz |
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C
implementation is better for maintainability, debuggability and
extensibility. Thanks to Peter Zijlstra for fixing the lockdep
warning. Thanks to Harvey Harrison for pointing out that the
unlikely() was unnecessary.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/semaphore.h')
-rw-r--r-- | include/linux/semaphore.h | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h new file mode 100644 index 000000000000..b3c691b089b2 --- /dev/null +++ b/include/linux/semaphore.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2008 Intel Corporation + * Author: Matthew Wilcox <willy@linux.intel.com> + * + * Distributed under the terms of the GNU GPL, version 2 + * + * Counting semaphores allow up to <n> tasks to acquire the semaphore + * simultaneously. + */ +#ifndef __LINUX_SEMAPHORE_H +#define __LINUX_SEMAPHORE_H + +#include <linux/list.h> +#include <linux/spinlock.h> + +/* + * The spinlock controls access to the other members of the semaphore. + * 'count' is decremented by every task which calls down*() and incremented + * by every call to up(). Thus, if it is positive, it indicates how many + * more tasks may acquire the lock. If it is negative, it indicates how + * many tasks are waiting for the lock. Tasks waiting for the lock are + * kept on the wait_list. + */ +struct semaphore { + spinlock_t lock; + int count; + struct list_head wait_list; +}; + +#define __SEMAPHORE_INITIALIZER(name, n) \ +{ \ + .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ + .count = n, \ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ +} + +#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) + +static inline void sema_init(struct semaphore *sem, int val) +{ + static struct lock_class_key __key; + *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); + lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); +} + +#define init_MUTEX(sem) sema_init(sem, 1) +#define init_MUTEX_LOCKED(sem) sema_init(sem, 0) + +/* + * Attempt to acquire the semaphore. If another task is already holding the + * semaphore, sleep until the semaphore is released. + */ +extern void down(struct semaphore *sem); + +/* + * As down(), except the sleep may be interrupted by a signal. If it is, + * this function will return -EINTR. + */ +extern int __must_check down_interruptible(struct semaphore *sem); + +/* + * As down(), except this function will not sleep. It will return 0 if it + * acquired the semaphore and 1 if the semaphore was contended. This + * function may be called from any context, including interrupt and softirq. + */ +extern int __must_check down_trylock(struct semaphore *sem); + +/* + * Release the semaphore. Unlike mutexes, up() may be called from any + * context and even by tasks which have never called down(). + */ +extern void up(struct semaphore *sem); + +#endif /* __LINUX_SEMAPHORE_H */ |