diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 22:14:18 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 22:14:18 +0300 |
commit | 892ad5acca0b2ddb514fae63fa4686bf726d2471 (patch) | |
tree | 9945b41e6552c0b6b9b1b5b0ffb77f2f15857671 | |
parent | 162b246eb420d2ca2002a50917c897b10c9aba09 (diff) | |
parent | 5d6dec6fba38c3e2d408df108bb92ef4ac201f18 (diff) | |
download | linux-892ad5acca0b2ddb514fae63fa4686bf726d2471.tar.xz |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle were:
- Add CONFIG_REFCOUNT_FULL=y to allow the disabling of the 'full'
(robustness checked) refcount_t implementation with slightly lower
runtime overhead. (Kees Cook)
The lighter weight variant is the default. The two variants use the
same API. Having this variant was a precondition by some
maintainers to merge refcount_t cleanups.
- Add lockdep support for rtmutexes (Peter Zijlstra)
- liblockdep fixes and improvements (Sasha Levin, Ben Hutchings)
- ... misc fixes and improvements"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
locking/refcount: Remove the half-implemented refcount_sub() API
locking/refcount: Create unchecked atomic_t implementation
locking/rtmutex: Don't initialize lockdep when not required
locking/selftest: Add RT-mutex support
locking/selftest: Remove the bad unlock ordering test
rt_mutex: Add lockdep annotations
MAINTAINERS: Claim atomic*_t maintainership
locking/x86: Remove the unused atomic_inc_short() methd
tools/lib/lockdep: Remove private kernel headers
tools/lib/lockdep: Hide liblockdep output from test results
tools/lib/lockdep: Add dummy current_gfp_context()
tools/include: Add IS_ERR_OR_NULL to err.h
tools/lib/lockdep: Add empty __is_[module,kernel]_percpu_address
tools/lib/lockdep: Include err.h
tools/include: Add (mostly) empty include/linux/sched/mm.h
tools/lib/lockdep: Use LDFLAGS
tools/lib/lockdep: Remove double-quotes from soname
tools/lib/lockdep: Fix object file paths used in an out-of-tree build
tools/lib/lockdep: Fix compilation for 4.11
tools/lib/lockdep: Don't mix fd-based and stream IO
...
72 files changed, 641 insertions, 234 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 503f80a43bec..d357695ee4fe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2322,6 +2322,15 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt F: drivers/input/touchscreen/atmel_mxt_ts.c F: include/linux/platform_data/atmel_mxt_ts.h +ATOMIC INFRASTRUCTURE +M: Will Deacon <will.deacon@arm.com> +M: Peter Zijlstra <peterz@infradead.org> +R: Boqun Feng <boqun.feng@gmail.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: arch/*/include/asm/atomic*.h +F: include/*/atomic*.h + ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER M: Bradley Grove <linuxdrivers@attotech.com> L: linux-scsi@vger.kernel.org @@ -7555,7 +7564,7 @@ S: Maintained F: drivers/ata/sata_promise.* LIBLOCKDEP -M: Sasha Levin <sasha.levin@oracle.com> +M: Sasha Levin <alexander.levin@verizon.com> S: Maintained F: tools/lib/lockdep/ diff --git a/arch/Kconfig b/arch/Kconfig index 6c00e5b00f8b..f76b214cf7ad 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -867,4 +867,13 @@ config STRICT_MODULE_RWX config ARCH_WANT_RELAX_ORDER bool +config REFCOUNT_FULL + bool "Perform full reference count validation at the expense of speed" + help + Enabling this switches the refcounting infrastructure from a fast + unchecked atomic_t implementation to a fully state checked + implementation, which can be (slightly) slower but provides protections + against various use-after-free conditions that can be used in + security flaw exploits. + source "kernel/gcov/Kconfig" diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 1a70e6c0f259..94709ab41ed8 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -24,8 +24,7 @@ * has an opportunity to return -EFAULT to the user if needed. * The 64-bit routines just return a "long long" with the value, * since they are only used from kernel space and don't expect to fault. - * Support for 16-bit ops is included in the framework but we don't provide - * any (x86_64 has an atomic_inc_short(), so we might want to some day). + * Support for 16-bit ops is included in the framework but we don't provide any. * * Note that the caller is advised to issue a suitable L1 or L2 * prefetch on the address being manipulated to avoid extra stalls. diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index caa5798c92f4..33380b871463 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } -/** - * atomic_inc_short - increment of a short integer - * @v: pointer to type int - * - * Atomically adds 1 to @v - * Returns the new value of @u - */ -static __always_inline short int atomic_inc_short(short int *v) -{ - asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); - return *v; -} - #ifdef CONFIG_X86_32 # include <asm/atomic64_32.h> #else diff --git a/include/linux/refcount.h b/include/linux/refcount.h index b34aa649d204..591792c8e5b0 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } +#ifdef CONFIG_REFCOUNT_FULL extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); extern void refcount_add(unsigned int i, refcount_t *r); @@ -48,10 +49,45 @@ extern __must_check bool refcount_inc_not_zero(refcount_t *r); extern void refcount_inc(refcount_t *r); extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); -extern void refcount_sub(unsigned int i, refcount_t *r); extern __must_check bool refcount_dec_and_test(refcount_t *r); extern void refcount_dec(refcount_t *r); +#else +static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r) +{ + return atomic_add_unless(&r->refs, i, 0); +} + +static inline void refcount_add(unsigned int i, refcount_t *r) +{ + atomic_add(i, &r->refs); +} + +static inline __must_check bool refcount_inc_not_zero(refcount_t *r) +{ + return atomic_add_unless(&r->refs, 1, 0); +} + +static inline void refcount_inc(refcount_t *r) +{ + atomic_inc(&r->refs); +} + +static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) +{ + return atomic_sub_and_test(i, &r->refs); +} + +static inline __must_check bool refcount_dec_and_test(refcount_t *r) +{ + return atomic_dec_and_test(&r->refs); +} + +static inline void refcount_dec(refcount_t *r) +{ + atomic_dec(&r->refs); +} +#endif /* CONFIG_REFCOUNT_FULL */ extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 1abba5ce2a2f..44fd002f7cd5 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -37,6 +37,9 @@ struct rt_mutex { int line; void *magic; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; struct rt_mutex_waiter; @@ -58,19 +61,33 @@ struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ , .name = #mutexname, .file = __FILE__, .line = __LINE__ -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + +# define rt_mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + __rt_mutex_init(mutex, __func__, &__key); \ +} while (0) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) # define rt_mutex_debug_task_free(t) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + , .dep_map = { .name = #mutexname } +#else +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) +#endif + #define __RT_MUTEX_INITIALIZER(mutexname) \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) @@ -86,7 +103,7 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) return lock->owner != NULL; } -extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 58e366ad36f4..ac35e648b0e5 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -166,12 +166,16 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) memset(waiter, 0x22, sizeof(*waiter)); } -void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) +void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) { /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lock->name = name; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif } diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index b585af9a1b50..5078c6ddf4a5 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -11,7 +11,7 @@ extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); -extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 28cd09e635ed..78069895032a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock); @@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { + int ret; + might_sleep(); - return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + + return ret; } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); @@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) { + int ret; + might_sleep(); - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, RT_MUTEX_MIN_CHAINWALK, rt_mutex_slowlock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + + return ret; } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); */ int __sched rt_mutex_trylock(struct rt_mutex *lock) { + int ret; + if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) return 0; - return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; } EXPORT_SYMBOL_GPL(rt_mutex_trylock); @@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); */ void __sched rt_mutex_unlock(struct rt_mutex *lock) { + mutex_release(&lock->dep_map, 1, _RET_IP_); rt_mutex_fastunlock(lock, rt_mutex_slowunlock); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); @@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock) lock->magic = NULL; #endif } - EXPORT_SYMBOL_GPL(rt_mutex_destroy); /** @@ -1632,14 +1653,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); * * Initializing of a locked rt lock is not allowed */ -void __rt_mutex_init(struct rt_mutex *lock, const char *name) +void __rt_mutex_init(struct rt_mutex *lock, const char *name, + struct lock_class_key *key) { lock->owner = NULL; raw_spin_lock_init(&lock->wait_lock); lock->waiters = RB_ROOT; lock->waiters_leftmost = NULL; - debug_rt_mutex_init(lock, name); + if (name && key) + debug_rt_mutex_init(lock, name, key); } EXPORT_SYMBOL_GPL(__rt_mutex_init); @@ -1660,7 +1683,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - __rt_mutex_init(lock, NULL); + __rt_mutex_init(lock, NULL, NULL); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index 6607802efa8b..5c253caffe91 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -17,7 +17,7 @@ #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) #define debug_rt_mutex_proxy_unlock(l) do { } while (0) #define debug_rt_mutex_unlock(l) do { } while (0) -#define debug_rt_mutex_init(m, n) do { } while (0) +#define debug_rt_mutex_init(m, n, k) do { } while (0) #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) #define debug_rt_mutex_print_deadlock(w) do { } while (0) #define debug_rt_mutex_reset_waiter(w) do { } while (0) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a7a751a75cfd..9c5d40a50930 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1052,6 +1052,7 @@ config DEBUG_LOCK_ALLOC depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select LOCKDEP help This feature will check whether any held lock (spinlock, rwlock, @@ -1067,6 +1068,7 @@ config PROVE_LOCKING select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC select TRACE_IRQFLAGS default n @@ -1121,6 +1123,7 @@ config LOCK_STAT select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC default n help diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h new file mode 100644 index 000000000000..e3cb83989d16 --- /dev/null +++ b/lib/locking-selftest-rtmutex.h @@ -0,0 +1,11 @@ +#undef LOCK +#define LOCK RTL + +#undef UNLOCK +#define UNLOCK RTU + +#undef RLOCK +#undef WLOCK + +#undef INIT +#define INIT RTI diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index f3a217ea0388..6f2b135dc5e8 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> +#include <linux/rtmutex.h> /* * Change this to 1 if you want to see the failure printouts: @@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); #define LOCKTYPE_MUTEX 0x4 #define LOCKTYPE_RWSEM 0x8 #define LOCKTYPE_WW 0x10 +#define LOCKTYPE_RTMUTEX 0x20 static struct ww_acquire_ctx t, t2; static struct ww_mutex o, o2, o3; @@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B); static DECLARE_RWSEM(rwsem_C); static DECLARE_RWSEM(rwsem_D); +#ifdef CONFIG_RT_MUTEXES + +static DEFINE_RT_MUTEX(rtmutex_A); +static DEFINE_RT_MUTEX(rtmutex_B); +static DEFINE_RT_MUTEX(rtmutex_C); +static DEFINE_RT_MUTEX(rtmutex_D); + +#endif + /* * Locks that we initialize dynamically as well so that * e.g. X1 and X2 becomes two instances of the same class, @@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2); static DECLARE_RWSEM(rwsem_Z1); static DECLARE_RWSEM(rwsem_Z2); +#ifdef CONFIG_RT_MUTEXES + +static DEFINE_RT_MUTEX(rtmutex_X1); +static DEFINE_RT_MUTEX(rtmutex_X2); +static DEFINE_RT_MUTEX(rtmutex_Y1); +static DEFINE_RT_MUTEX(rtmutex_Y2); +static DEFINE_RT_MUTEX(rtmutex_Z1); +static DEFINE_RT_MUTEX(rtmutex_Z2); + +#endif + /* * non-inlined runtime initializers, to let separate locks share * the same lock-class: @@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z) static void init_shared_classes(void) { +#ifdef CONFIG_RT_MUTEXES + static struct lock_class_key rt_X, rt_Y, rt_Z; + + __rt_mutex_init(&rtmutex_X1, __func__, &rt_X); + __rt_mutex_init(&rtmutex_X2, __func__, &rt_X); + __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y); + __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y); + __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z); + __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z); +#endif + init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); @@ -193,6 +226,10 @@ static void init_shared_classes(void) #define MU(x) mutex_unlock(&mutex_##x) #define MI(x) mutex_init(&mutex_##x) +#define RTL(x) rt_mutex_lock(&rtmutex_##x) +#define RTU(x) rt_mutex_unlock(&rtmutex_##x) +#define RTI(x) rt_mutex_init(&rtmutex_##x) + #define WSL(x) down_write(&rwsem_##x) #define WSU(x) up_write(&rwsem_##x) @@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(AA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(AA_rtmutex); +#endif + #undef E /* @@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABBA_rtmutex); +#endif + #undef E /* @@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBCCA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABBCCA_rtmutex); +#endif + #undef E /* @@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCABC_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABCABC_rtmutex); +#endif + #undef E /* @@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBCCDDA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABBCCDDA_rtmutex); +#endif + #undef E /* @@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCDBDDA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABCDBDDA_rtmutex); +#endif + #undef E /* @@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCDBCDA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABCDBCDA_rtmutex); +#endif + #undef E /* @@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(double_unlock_rsem) -#undef E - -/* - * Bad unlock ordering: - */ -#define E() \ - \ - LOCK(A); \ - LOCK(B); \ - UNLOCK(A); /* fail */ \ - UNLOCK(B); - -/* - * 6 testcases: - */ -#include "locking-selftest-spin.h" -GENERATE_TESTCASE(bad_unlock_order_spin) -#include "locking-selftest-wlock.h" -GENERATE_TESTCASE(bad_unlock_order_wlock) -#include "locking-selftest-rlock.h" -GENERATE_TESTCASE(bad_unlock_order_rlock) -#include "locking-selftest-mutex.h" -GENERATE_TESTCASE(bad_unlock_order_mutex) -#include "locking-selftest-wsem.h" -GENERATE_TESTCASE(bad_unlock_order_wsem) -#include "locking-selftest-rsem.h" -GENERATE_TESTCASE(bad_unlock_order_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(double_unlock_rtmutex); +#endif #undef E @@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(init_held_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(init_held_rtmutex); +#endif + #undef E /* @@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map) +#ifdef CONFIG_RT_MUTEXES +# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) +#endif #else # define I_SPINLOCK(x) # define I_RWLOCK(x) @@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) # define I_WW(x) #endif +#ifndef I_RTMUTEX +# define I_RTMUTEX(x) +#endif + +#ifdef CONFIG_RT_MUTEXES +#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x) +#else +#define I2_RTMUTEX(x) +#endif + #define I1(x) \ do { \ I_SPINLOCK(x); \ I_RWLOCK(x); \ I_MUTEX(x); \ I_RWSEM(x); \ + I_RTMUTEX(x); \ } while (0) #define I2(x) \ @@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) rwlock_init(&rwlock_##x); \ mutex_init(&mutex_##x); \ init_rwsem(&rwsem_##x); \ + I2_RTMUTEX(x); \ } while (0) static void reset_locks(void) @@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) reset_locks(); } +#ifdef CONFIG_RT_MUTEXES +#define dotest_rt(fn, e, m) dotest((fn), (e), (m)) +#else +#define dotest_rt(fn, e, m) +#endif + static inline void print_testname(const char *testname) { printk("%33s:", testname); @@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ + dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \ pr_cont("\n"); #define DO_TESTCASE_6_SUCCESS(desc, name) \ @@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ + dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \ pr_cont("\n"); /* @@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ + dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \ pr_cont("\n"); #define DO_TESTCASE_2I(desc, name, nr) \ @@ -1825,7 +1903,6 @@ void locking_selftest(void) DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); DO_TESTCASE_6("double unlock", double_unlock); DO_TESTCASE_6("initialize held", init_held); - DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order); printk(" --------------------------------------------------------------------------\n"); print_testname("recursive read-lock"); diff --git a/lib/refcount.c b/lib/refcount.c index 9f906783987e..5d0582a9480c 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -37,6 +37,8 @@ #include <linux/refcount.h> #include <linux/bug.h> +#ifdef CONFIG_REFCOUNT_FULL + /** * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount @@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r) WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); } EXPORT_SYMBOL(refcount_dec); +#endif /* CONFIG_REFCOUNT_FULL */ /** * refcount_dec_if_one - decrement a refcount if it is 1 diff --git a/tools/Makefile b/tools/Makefile index c8a90d01dd8e..221e1ce78b06 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -19,6 +19,7 @@ help: @echo ' kvm_stat - top-like utility for displaying kvm statistics' @echo ' leds - LEDs tools' @echo ' lguest - a minimal 32-bit x86 hypervisor' + @echo ' liblockdep - user-space wrapper for kernel locking-validator' @echo ' net - misc networking tools' @echo ' perf - Linux performance measurement and analysis tool' @echo ' selftests - various kernel selftests' @@ -89,7 +90,7 @@ freefall: FORCE kvm_stat: FORCE $(call descend,kvm/$@) -all: acpi cgroup cpupower gpio hv firewire lguest \ +all: acpi cgroup cpupower gpio hv firewire lguest liblockdep \ perf selftests turbostat usb \ virtio vm net x86_energy_perf_policy \ tmon freefall objtool kvm_stat @@ -103,6 +104,9 @@ cpupower_install: cgroup_install firewire_install gpio_install hv_install lguest_install perf_install usb_install virtio_install vm_install net_install objtool_install: $(call descend,$(@:_install=),install) +liblockdep_install: + $(call descend,lib/lockdep,install) + selftests_install: $(call descend,testing/$(@:_install=),install) @@ -119,7 +123,7 @@ kvm_stat_install: $(call descend,kvm/$(@:_install=),install) install: acpi_install cgroup_install cpupower_install gpio_install \ - hv_install firewire_install lguest_install \ + hv_install firewire_install lguest_install liblockdep_install \ perf_install selftests_install turbostat_install usb_install \ virtio_install vm_install net_install x86_energy_perf_policy_install \ tmon_install freefall_install objtool_install kvm_stat_install diff --git a/tools/include/asm/sections.h b/tools/include/asm/sections.h new file mode 100644 index 000000000000..a80643d7a7f1 --- /dev/null +++ b/tools/include/asm/sections.h @@ -0,0 +1,4 @@ +#ifndef __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H +#define __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H + +#endif /* __TOOLS_INCLUDE_LINUX_ASM_SECTIONS_H */ diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index 1aecad369af5..969db1981868 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -61,4 +61,14 @@ static inline unsigned fls_long(unsigned long l) return fls64(l); } +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + #endif diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 23299d7e7160..ef6ab908a42f 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -45,6 +45,10 @@ # define __maybe_unused __attribute__((unused)) #endif +#ifndef __used +# define __used __attribute__((__unused__)) +#endif + #ifndef __packed # define __packed __attribute__((__packed__)) #endif @@ -65,6 +69,14 @@ # define unlikely(x) __builtin_expect(!!(x), 0) #endif +#ifndef __init +# define __init +#endif + +#ifndef noinline +# define noinline +#endif + #define uninitialized_var(x) x = *(&(x)) #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) diff --git a/tools/lib/lockdep/uinclude/linux/debug_locks.h b/tools/include/linux/debug_locks.h index f38eb64df794..61cc7f501168 100644 --- a/tools/lib/lockdep/uinclude/linux/debug_locks.h +++ b/tools/include/linux/debug_locks.h @@ -3,8 +3,9 @@ #include <stddef.h> #include <linux/compiler.h> +#include <asm/bug.h> -#define DEBUG_LOCKS_WARN_ON(x) (x) +#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x) extern bool debug_locks; extern bool debug_locks_silent; diff --git a/tools/include/linux/delay.h b/tools/include/linux/delay.h new file mode 100644 index 000000000000..55aa4173af1f --- /dev/null +++ b/tools/include/linux/delay.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_DELAY_H +#define _TOOLS_INCLUDE_LINUX_DELAY_H + +#endif /* _TOOLS_INCLUDE_LINUX_DELAY_H */ diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h index bdc3dd8131d4..abf0478a8fb2 100644 --- a/tools/include/linux/err.h +++ b/tools/include/linux/err.h @@ -46,4 +46,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr) return IS_ERR_VALUE((unsigned long)ptr); } +static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) +{ + return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr); +} + #endif /* _LINUX_ERR_H */ diff --git a/tools/include/linux/ftrace.h b/tools/include/linux/ftrace.h new file mode 100644 index 000000000000..949f541ce11e --- /dev/null +++ b/tools/include/linux/ftrace.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_FTRACE_H +#define _TOOLS_INCLUDE_LINUX_FTRACE_H + +#endif /* _TOOLS_INCLUDE_LINUX_FTRACE_H */ diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h new file mode 100644 index 000000000000..22030756fbc0 --- /dev/null +++ b/tools/include/linux/gfp.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_GFP_H +#define _TOOLS_INCLUDE_LINUX_GFP_H + +#endif /* _TOOLS_INCLUDE_LINUX_GFP_H */ diff --git a/tools/lib/lockdep/uinclude/linux/hardirq.h b/tools/include/linux/hardirq.h index c8f3f8f58729..c8f3f8f58729 100644 --- a/tools/lib/lockdep/uinclude/linux/hardirq.h +++ b/tools/include/linux/hardirq.h diff --git a/tools/include/linux/interrupt.h b/tools/include/linux/interrupt.h new file mode 100644 index 000000000000..6be25bbdca9e --- /dev/null +++ b/tools/include/linux/interrupt.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_INTERRUPT_H +#define _TOOLS_INCLUDE_LINUX_INTERRUPT_H + +#endif /* _TOOLS_INCLUDE_LINUX_INTERRUPT_H */ diff --git a/tools/lib/lockdep/uinclude/linux/irqflags.h b/tools/include/linux/irqflags.h index 6cc296f0fad0..df77669cfe1c 100644 --- a/tools/lib/lockdep/uinclude/linux/irqflags.h +++ b/tools/include/linux/irqflags.h @@ -17,19 +17,19 @@ #define raw_local_irq_disable() do { } while (0) #define raw_local_irq_enable() do { } while (0) #define raw_local_irq_save(flags) ((flags) = 0) -#define raw_local_irq_restore(flags) do { } while (0) +#define raw_local_irq_restore(flags) ((void)(flags)) #define raw_local_save_flags(flags) ((flags) = 0) -#define raw_irqs_disabled_flags(flags) do { } while (0) +#define raw_irqs_disabled_flags(flags) ((void)(flags)) #define raw_irqs_disabled() 0 #define raw_safe_halt() #define local_irq_enable() do { } while (0) #define local_irq_disable() do { } while (0) #define local_irq_save(flags) ((flags) = 0) -#define local_irq_restore(flags) do { } while (0) +#define local_irq_restore(flags) ((void)(flags)) #define local_save_flags(flags) ((flags) = 0) #define irqs_disabled() (1) -#define irqs_disabled_flags(flags) (0) +#define irqs_disabled_flags(flags) ((void)(flags), 0) #define safe_halt() do { } while (0) #define trace_lock_release(x, y) diff --git a/tools/include/linux/jhash.h b/tools/include/linux/jhash.h new file mode 100644 index 000000000000..348c6f47e4cc --- /dev/null +++ b/tools/include/linux/jhash.h @@ -0,0 +1,175 @@ +#ifndef _LINUX_JHASH_H +#define _LINUX_JHASH_H + +/* jhash.h: Jenkins hash support. + * + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) + * + * http://burtleburtle.net/bob/hash/ + * + * These are the credits from Bob's sources: + * + * lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It's in + * the public domain. It has no warranty. + * + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * + * I've modified Bob's hash to be useful in the Linux kernel, and + * any bugs present are my fault. + * Jozsef + */ +#include <linux/bitops.h> +#include <linux/unaligned/packed_struct.h> + +/* Best hash sizes are of power of two */ +#define jhash_size(n) ((u32)1<<(n)) +/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ +#define jhash_mask(n) (jhash_size(n)-1) + +/* __jhash_mix -- mix 3 32-bit values reversibly. */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +/* An arbitrary initial parameter */ +#define JHASH_INITVAL 0xdeadbeef + +/* jhash - hash an arbitrary key + * @k: sequence of bytes as key + * @length: the length of the key + * @initval: the previous hash, or an arbitray value + * + * The generic version, hashes an arbitrary sequence of bytes. + * No alignment or length assumptions are made about the input key. + * + * Returns the hash value of the key. The result depends on endianness. + */ +static inline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const u8 *k = key; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + length + initval; + + /* All but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) { + a += __get_unaligned_cpu32(k); + b += __get_unaligned_cpu32(k + 4); + c += __get_unaligned_cpu32(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + /* Last block: affect all 32 bits of (c) */ + /* All the case statements fall through */ + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +/* jhash2 - hash an array of u32's + * @k: the key which must be an array of u32's + * @length: the number of u32's in the key + * @initval: the previous hash, or an arbitray value + * + * Returns the hash value of the key. + */ +static inline u32 jhash2(const u32 *k, u32 length, u32 initval) +{ + u32 a, b, c; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + (length<<2) + initval; + + /* Handle most of the key */ + while (length > 3) { + a += k[0]; + b += k[1]; + c += k[2]; + __jhash_mix(a, b, c); + length -= 3; + k += 3; + } + + /* Handle the last 3 u32's: all the case statements fall through */ + switch (length) { + case 3: c += k[2]; + case 2: b += k[1]; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + + +/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + + __jhash_final(a, b, c); + + return c; +} + +static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); +} + +static inline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +static inline u32 jhash_1word(u32 a, u32 initval) +{ + return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2)); +} + +#endif /* _LINUX_JHASH_H */ diff --git a/tools/lib/lockdep/uinclude/linux/kallsyms.h b/tools/include/linux/kallsyms.h index b0f2dbdf1a15..582cc1e5f3a4 100644 --- a/tools/lib/lockdep/uinclude/linux/kallsyms.h +++ b/tools/include/linux/kallsyms.h @@ -3,6 +3,7 @@ #include <linux/kernel.h> #include <stdio.h> +#include <unistd.h> #define KSYM_NAME_LEN 128 @@ -24,7 +25,7 @@ static inline void print_ip_sym(unsigned long ip) name = backtrace_symbols((void **)&ip, 1); - printf("%s\n", *name); + dprintf(STDOUT_FILENO, "%s\n", *name); free(name); } diff --git a/tools/lib/lockdep/uinclude/linux/kern_levels.h b/tools/include/linux/kern_levels.h index 3b9bade28698..3b9bade28698 100644 --- a/tools/lib/lockdep/uinclude/linux/kern_levels.h +++ b/tools/include/linux/kern_levels.h diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index 73ccc48126bb..801b927499f2 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -32,6 +32,7 @@ (type *)((char *)__mptr - offsetof(type, member)); }) #endif +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) #ifndef max @@ -89,4 +90,7 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...); #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) #define round_down(x, y) ((x) & ~__round_mask(x, y)) +#define current_gfp_context(k) 0 +#define synchronize_sched() + #endif diff --git a/tools/lib/lockdep/uinclude/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h index 94d598bc6abe..94d598bc6abe 100644 --- a/tools/lib/lockdep/uinclude/linux/kmemcheck.h +++ b/tools/include/linux/kmemcheck.h diff --git a/tools/include/linux/linkage.h b/tools/include/linux/linkage.h new file mode 100644 index 000000000000..bc763d500262 --- /dev/null +++ b/tools/include/linux/linkage.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_LINKAGE_H +#define _TOOLS_INCLUDE_LINUX_LINKAGE_H + +#endif /* _TOOLS_INCLUDE_LINUX_LINKAGE_H */ diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/include/linux/lockdep.h index c808c7d02d21..8da3e8effafa 100644 --- a/tools/lib/lockdep/uinclude/linux/lockdep.h +++ b/tools/include/linux/lockdep.h @@ -7,8 +7,15 @@ #include <limits.h> #include <linux/utsname.h> #include <linux/compiler.h> +#include <linux/export.h> +#include <linux/kern_levels.h> +#include <linux/err.h> +#include <linux/rcu.h> +#include <linux/list.h> +#include <linux/hardirq.h> +#include <unistd.h> -#define MAX_LOCK_DEPTH 2000UL +#define MAX_LOCK_DEPTH 63UL #define asmlinkage #define __visible @@ -29,31 +36,32 @@ extern struct task_struct *__curr(void); #define current (__curr()) -#define debug_locks_off() 1 +static inline int debug_locks_off(void) +{ + return 1; +} + #define task_pid_nr(tsk) ((tsk)->pid) #define KSYM_NAME_LEN 128 -#define printk printf +#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) +#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#define pr_warn pr_err #define list_del_rcu list_del #define atomic_t unsigned long #define atomic_inc(x) ((*(x))++) -static struct new_utsname *init_utsname(void) -{ - static struct new_utsname n = (struct new_utsname) { - .release = "liblockdep", - .version = LIBLOCKDEP_VERSION, - }; - - return &n; -} - #define print_tainted() "" #define static_obj(x) 1 #define debug_show_all_locks() extern void debug_check_no_locks_held(void); +static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr) +{ + return false; +} + #endif diff --git a/tools/lib/lockdep/uinclude/linux/module.h b/tools/include/linux/module.h index 09c7a7be8ccc..07055db296f3 100644 --- a/tools/lib/lockdep/uinclude/linux/module.h +++ b/tools/include/linux/module.h @@ -3,4 +3,9 @@ #define module_param(name, type, perm) +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + #endif diff --git a/tools/include/linux/mutex.h b/tools/include/linux/mutex.h new file mode 100644 index 000000000000..a8180d25f2fc --- /dev/null +++ b/tools/include/linux/mutex.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_MUTEX_H +#define _TOOLS_INCLUDE_LINUX_MUTEX_H + +#endif /* _TOOLS_INCLUDE_LINUX_MUTEX_H */ diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h new file mode 100644 index 000000000000..8b3b03b64fda --- /dev/null +++ b/tools/include/linux/proc_fs.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H +#define _TOOLS_INCLUDE_LINUX_PROC_FS_H + +#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */ diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/include/linux/rcu.h index 042ee8e463c9..5080649dad04 100644 --- a/tools/lib/lockdep/uinclude/linux/rcu.h +++ b/tools/include/linux/rcu.h @@ -18,4 +18,7 @@ static inline bool rcu_is_watching(void) return false; } +#define rcu_assign_pointer(p, v) ((p) = (v)) +#define RCU_INIT_POINTER(p, v) p=(v) + #endif diff --git a/tools/include/linux/sched/clock.h b/tools/include/linux/sched/clock.h new file mode 100644 index 000000000000..5837d17c4182 --- /dev/null +++ b/tools/include/linux/sched/clock.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_PERF_LINUX_SCHED_CLOCK_H +#define _TOOLS_PERF_LINUX_SCHED_CLOCK_H + +#endif /* _TOOLS_PERF_LINUX_SCHED_CLOCK_H */ diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h new file mode 100644 index 000000000000..c8d9f19c1f35 --- /dev/null +++ b/tools/include/linux/sched/mm.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H +#define _TOOLS_PERF_LINUX_SCHED_MM_H + +#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */ diff --git a/tools/include/linux/sched/task.h b/tools/include/linux/sched/task.h new file mode 100644 index 000000000000..a97890eca110 --- /dev/null +++ b/tools/include/linux/sched/task.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_PERF_LINUX_SCHED_TASK_H +#define _TOOLS_PERF_LINUX_SCHED_TASK_H + +#endif /* _TOOLS_PERF_LINUX_SCHED_TASK_H */ diff --git a/tools/include/linux/seq_file.h b/tools/include/linux/seq_file.h new file mode 100644 index 000000000000..102fd9217f1f --- /dev/null +++ b/tools/include/linux/seq_file.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_LINUX_SEQ_FILE_H +#define _TOOLS_INCLUDE_LINUX_SEQ_FILE_H + +#endif /* _TOOLS_INCLUDE_LINUX_SEQ_FILE_H */ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index 58397dcb19d6..417cda4f793f 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h @@ -1,5 +1,31 @@ +#ifndef __LINUX_SPINLOCK_H_ +#define __LINUX_SPINLOCK_H_ + +#include <pthread.h> +#include <stdbool.h> + #define spinlock_t pthread_mutex_t #define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) + +#define arch_spinlock_t pthread_mutex_t +#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER + +static inline void arch_spin_lock(arch_spinlock_t *mutex) +{ + pthread_mutex_lock(mutex); +} + +static inline void arch_spin_unlock(arch_spinlock_t *mutex) +{ + pthread_mutex_unlock(mutex); +} + +static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) +{ + return true; +} + +#endif diff --git a/tools/lib/lockdep/uinclude/linux/stacktrace.h b/tools/include/linux/stacktrace.h index 39aecc6b19d1..39aecc6b19d1 100644 --- a/tools/lib/lockdep/uinclude/linux/stacktrace.h +++ b/tools/include/linux/stacktrace.h diff --git a/tools/include/linux/unaligned/packed_struct.h b/tools/include/linux/unaligned/packed_struct.h new file mode 100644 index 000000000000..c0d817de4df2 --- /dev/null +++ b/tools/include/linux/unaligned/packed_struct.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +#include <linux/kernel.h> + +struct __una_u16 { u16 x; } __packed; +struct __una_u32 { u32 x; } __packed; +struct __una_u64 { u64 x; } __packed; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ diff --git a/tools/include/trace/events/lock.h b/tools/include/trace/events/lock.h new file mode 100644 index 000000000000..5b15fd5ee1af --- /dev/null +++ b/tools/include/trace/events/lock.h @@ -0,0 +1,4 @@ +#ifndef _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H +#define _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H + +#endif /* _TOOLS_INCLUDE_TRACE_EVENTS_LOCK_H */ diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index 3bc0ef9f8923..ed9ace59d112 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -79,6 +79,7 @@ INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g CFLAGS += -fPIC +CFLAGS += -Wall override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ) @@ -100,7 +101,7 @@ include $(srctree)/tools/build/Makefile.include do_compile_shared_library = \ ($(print_shared_lib_compile) \ - $(CC) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='"$@"';$(shell ln -sf $@ liblockdep.so)) + $(CC) $(LDFLAGS) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='$(@F)';$(shell ln -sf $(@F) $(@D)/liblockdep.so)) do_build_static_lib = \ ($(print_static_lib_build) \ @@ -118,10 +119,10 @@ all_cmd: $(CMD_TARGETS) $(LIB_IN): force $(Q)$(MAKE) $(build)=liblockdep -liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN) +$(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN) $(Q)$(do_compile_shared_library) -liblockdep.a: $(LIB_IN) +$(OUTPUT)liblockdep.a: $(LIB_IN) $(Q)$(do_build_static_lib) tags: force @@ -149,7 +150,7 @@ install_lib: all_cmd install: install_lib clean: - $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d .*.cmd + $(RM) $(OUTPUT)*.o *~ $(TARGETS) $(OUTPUT)*.a $(OUTPUT)*liblockdep*.so* $(VERSION_FILES) $(OUTPUT).*.d $(OUTPUT).*.cmd $(RM) tags TAGS PHONY += force diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c index a0a2e3a266af..ced6d7443cea 100644 --- a/tools/lib/lockdep/lockdep.c +++ b/tools/lib/lockdep/lockdep.c @@ -1,8 +1,27 @@ #include <linux/lockdep.h> +#include <stdlib.h> /* Trivial API wrappers, we don't (yet) have RCU in user-space: */ #define hlist_for_each_entry_rcu hlist_for_each_entry #define hlist_add_head_rcu hlist_add_head #define hlist_del_rcu hlist_del +#define list_for_each_entry_rcu list_for_each_entry +#define list_add_tail_rcu list_add_tail + +u32 prandom_u32(void) +{ + /* Used only by lock_pin_lock() which is dead code */ + abort(); +} + +static struct new_utsname *init_utsname(void) +{ + static struct new_utsname n = (struct new_utsname) { + .release = "liblockdep", + .version = LIBLOCKDEP_VERSION, + }; + + return &n; +} #include "../../../kernel/locking/lockdep.c" diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index 52844847569c..6a2d3c5d4e92 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -4,6 +4,7 @@ #include <dlfcn.h> #include <stdlib.h> #include <sysexits.h> +#include <unistd.h> #include "include/liblockdep/mutex.h" #include "../../include/linux/rbtree.h" @@ -122,8 +123,6 @@ static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent) #define LIBLOCKDEP_STATIC_ENTRIES 1024 #endif -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES]; static int __locks_nr; @@ -149,7 +148,7 @@ static struct lock_lookup *alloc_lock(void) int idx = __locks_nr++; if (idx >= ARRAY_SIZE(__locks)) { - fprintf(stderr, + dprintf(STDERR_FILENO, "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n"); exit(EX_UNAVAILABLE); } diff --git a/tools/lib/lockdep/rbtree.c b/tools/lib/lockdep/rbtree.c index f7f43033c8b7..297c304571f8 100644 --- a/tools/lib/lockdep/rbtree.c +++ b/tools/lib/lockdep/rbtree.c @@ -1 +1 @@ -#include "../../../lib/rbtree.c" +#include "../../lib/rbtree.c" diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh index 1069d96248c1..f9b94098fc98 100755 --- a/tools/lib/lockdep/run_tests.sh +++ b/tools/lib/lockdep/run_tests.sh @@ -4,9 +4,9 @@ make &> /dev/null for i in `ls tests/*.c`; do testname=$(basename "$i" .c) - gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null + gcc -o tests/$testname -pthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null echo -ne "$testname... " - if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then + if [ $(timeout 1 ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then echo "PASSED!" else echo "FAILED!" @@ -18,9 +18,9 @@ done for i in `ls tests/*.c`; do testname=$(basename "$i" .c) - gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null + gcc -o tests/$testname -pthread -Iinclude $i &> /dev/null echo -ne "(PRELOAD) $testname... " - if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then + if [ $(timeout 1 ./lockdep ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then echo "PASSED!" else echo "FAILED!" diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h deleted file mode 100644 index d82b170bb216..000000000000 --- a/tools/lib/lockdep/uinclude/asm/hash.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -/* Stub */ - -#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/lib/lockdep/uinclude/asm/hweight.h b/tools/lib/lockdep/uinclude/asm/hweight.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/asm/hweight.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/asm/sections.h b/tools/lib/lockdep/uinclude/asm/sections.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/asm/sections.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/bitops.h b/tools/lib/lockdep/uinclude/linux/bitops.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/bitops.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/compiler.h b/tools/lib/lockdep/uinclude/linux/compiler.h deleted file mode 100644 index fd3e56a83fc2..000000000000 --- a/tools/lib/lockdep/uinclude/linux/compiler.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _LIBLOCKDEP_LINUX_COMPILER_H_ -#define _LIBLOCKDEP_LINUX_COMPILER_H_ - -#define __used __attribute__((__unused__)) -#define unlikely -#define READ_ONCE(x) (x) -#define WRITE_ONCE(x, val) x=(val) -#define RCU_INIT_POINTER(p, v) p=(v) - -#endif diff --git a/tools/lib/lockdep/uinclude/linux/delay.h b/tools/lib/lockdep/uinclude/linux/delay.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/delay.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/ftrace.h b/tools/lib/lockdep/uinclude/linux/ftrace.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/ftrace.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/gfp.h b/tools/lib/lockdep/uinclude/linux/gfp.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/gfp.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/hash.h b/tools/lib/lockdep/uinclude/linux/hash.h deleted file mode 100644 index 0f8479858dc0..000000000000 --- a/tools/lib/lockdep/uinclude/linux/hash.h +++ /dev/null @@ -1 +0,0 @@ -#include "../../../include/linux/hash.h" diff --git a/tools/lib/lockdep/uinclude/linux/interrupt.h b/tools/lib/lockdep/uinclude/linux/interrupt.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/interrupt.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h deleted file mode 100644 index 276c7a8b2ed1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/kernel.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef _LIBLOCKDEP_LINUX_KERNEL_H_ -#define _LIBLOCKDEP_LINUX_KERNEL_H_ - -#include <linux/export.h> -#include <linux/types.h> -#include <linux/rcu.h> -#include <linux/hardirq.h> -#include <linux/kern_levels.h> - -#ifndef container_of -#define container_of(ptr, type, member) ({ \ - const typeof(((type *)0)->member) * __mptr = (ptr); \ - (type *)((char *)__mptr - offsetof(type, member)); }) -#endif - -#define max(x, y) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - (void) (&_max1 == &_max2); \ - _max1 > _max2 ? _max1 : _max2; }) - -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#define WARN_ON(x) (x) -#define WARN_ON_ONCE(x) (x) -#define likely(x) (x) -#define WARN(x, y...) (x) -#define uninitialized_var(x) x -#define __init -#define noinline -#define list_add_tail_rcu list_add_tail -#define list_for_each_entry_rcu list_for_each_entry -#define barrier() -#define synchronize_sched() - -#ifndef CALLER_ADDR0 -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#endif - -#ifndef _RET_IP_ -#define _RET_IP_ CALLER_ADDR0 -#endif - -#ifndef _THIS_IP_ -#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) -#endif - -#endif diff --git a/tools/lib/lockdep/uinclude/linux/linkage.h b/tools/lib/lockdep/uinclude/linux/linkage.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/linkage.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/list.h b/tools/lib/lockdep/uinclude/linux/list.h deleted file mode 100644 index 6e9ef31ed82e..000000000000 --- a/tools/lib/lockdep/uinclude/linux/list.h +++ /dev/null @@ -1 +0,0 @@ -#include "../../../include/linux/list.h" diff --git a/tools/lib/lockdep/uinclude/linux/mutex.h b/tools/lib/lockdep/uinclude/linux/mutex.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/mutex.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/poison.h b/tools/lib/lockdep/uinclude/linux/poison.h deleted file mode 100644 index 0c27bdf14233..000000000000 --- a/tools/lib/lockdep/uinclude/linux/poison.h +++ /dev/null @@ -1 +0,0 @@ -#include "../../../include/linux/poison.h" diff --git a/tools/lib/lockdep/uinclude/linux/prefetch.h b/tools/lib/lockdep/uinclude/linux/prefetch.h deleted file mode 100644 index d73fe6f850ac..000000000000 --- a/tools/lib/lockdep/uinclude/linux/prefetch.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _LIBLOCKDEP_LINUX_PREFETCH_H_ -#define _LIBLOCKDEP_LINUX_PREFETCH_H - -static inline void prefetch(void *a __attribute__((unused))) { } - -#endif diff --git a/tools/lib/lockdep/uinclude/linux/proc_fs.h b/tools/lib/lockdep/uinclude/linux/proc_fs.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/proc_fs.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h b/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h deleted file mode 100644 index c3759477379c..000000000000 --- a/tools/lib/lockdep/uinclude/linux/rbtree_augmented.h +++ /dev/null @@ -1,2 +0,0 @@ -#define __always_inline -#include "../../../include/linux/rbtree_augmented.h" diff --git a/tools/lib/lockdep/uinclude/linux/seq_file.h b/tools/lib/lockdep/uinclude/linux/seq_file.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/linux/seq_file.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - diff --git a/tools/lib/lockdep/uinclude/linux/spinlock.h b/tools/lib/lockdep/uinclude/linux/spinlock.h deleted file mode 100644 index 68c1aa2bcba5..000000000000 --- a/tools/lib/lockdep/uinclude/linux/spinlock.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _LIBLOCKDEP_SPINLOCK_H_ -#define _LIBLOCKDEP_SPINLOCK_H_ - -#include <pthread.h> -#include <stdbool.h> - -#define arch_spinlock_t pthread_mutex_t -#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER - -static inline void arch_spin_lock(arch_spinlock_t *mutex) -{ - pthread_mutex_lock(mutex); -} - -static inline void arch_spin_unlock(arch_spinlock_t *mutex) -{ - pthread_mutex_unlock(mutex); -} - -static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) -{ - return true; -} - -#endif diff --git a/tools/lib/lockdep/uinclude/linux/stringify.h b/tools/lib/lockdep/uinclude/linux/stringify.h deleted file mode 100644 index 05dfcd1ac118..000000000000 --- a/tools/lib/lockdep/uinclude/linux/stringify.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _LIBLOCKDEP_LINUX_STRINGIFY_H_ -#define _LIBLOCKDEP_LINUX_STRINGIFY_H_ - -#define __stringify_1(x...) #x -#define __stringify(x...) __stringify_1(x) - -#endif diff --git a/tools/lib/lockdep/uinclude/trace/events/lock.h b/tools/lib/lockdep/uinclude/trace/events/lock.h deleted file mode 100644 index fab00ff936d1..000000000000 --- a/tools/lib/lockdep/uinclude/trace/events/lock.h +++ /dev/null @@ -1,3 +0,0 @@ - -/* empty file */ - |