diff options
-rw-r--r-- | arch/c6x/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/microblaze/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/head_64.S | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/barrier.h | 15 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 6 | ||||
-rw-r--r-- | include/linux/compiler.h | 5 | ||||
-rw-r--r-- | include/linux/lockdep.h | 2 | ||||
-rw-r--r-- | init/main.c | 5 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 75 | ||||
-rw-r--r-- | lib/test_static_keys.c | 62 | ||||
-rw-r--r-- | tools/lib/lockdep/common.c | 5 | ||||
-rw-r--r-- | tools/lib/lockdep/include/liblockdep/common.h | 1 | ||||
-rw-r--r-- | tools/lib/lockdep/preload.c | 2 |
17 files changed, 64 insertions, 136 deletions
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index 72e17f7ebd6f..786e36e2f61d 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -281,8 +281,6 @@ notrace void __init machine_init(unsigned long dt_ptr) */ set_ist(_vectors_start); - lockdep_init(); - /* * dtb is passed in from bootloader. * fdt is linked in blob. diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 89a2a9394927..f31ebb5dc26c 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -130,8 +130,6 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, memset(__bss_start, 0, __bss_stop-__bss_start); memset(_ssbss, 0, _esbss-_ssbss); - lockdep_init(); - /* initialize device tree for usage in early_printk */ early_init_devtree(_fdt_start); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index ad8c9db61237..d544fa311757 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -114,8 +114,6 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */ notrace void __init machine_init(u64 dt_ptr) { - lockdep_init(); - /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 5c03a6a9b054..f98be8383a39 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -255,9 +255,6 @@ void __init early_setup(unsigned long dt_ptr) setup_paca(&boot_paca); fixup_boot_paca(); - /* Initialize lockdep early or else spinlocks will blow */ - lockdep_init(); - /* -------- printk is now safe to use ------- */ /* Enable early debugging if any specified (see udbg.h) */ diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index c55576bbaa1f..a0684de5a93b 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -448,7 +448,6 @@ void __init startup_init(void) rescue_initrd(); clear_bss_section(); init_kernel_storage_key(); - lockdep_init(); lockdep_off(); setup_lowcore_early(); setup_facility_list(); diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index f2d30cab5b3f..cd1f592cd347 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -696,14 +696,6 @@ tlb_fixup_done: call __bzero sub %o1, %o0, %o1 -#ifdef CONFIG_LOCKDEP - /* We have this call this super early, as even prom_init can grab - * spinlocks and thus call into the lockdep code. - */ - call lockdep_init - nop -#endif - call prom_init mov %l7, %o0 ! OpenPROM cif handler diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index a584e1c50918..bfb28caf97b1 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -6,18 +6,17 @@ /* * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking + * And yes, this might be required on UP too when we're talking * to devices. */ #ifdef CONFIG_X86_32 -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") #else #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 9f7c21c22477..9decee2bfdbe 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -418,9 +418,9 @@ static void mwait_idle(void) if (!current_set_polling_and_test()) { trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { - smp_mb(); /* quirk */ + mb(); /* quirk */ clflush((void *)¤t_thread_info()->flags); - smp_mb(); /* quirk */ + mb(); /* quirk */ } __monitor((void *)¤t_thread_info()->flags, 0, 0); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 4ba229ac3f4f..f56cc418c87d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1520,12 +1520,6 @@ __init void lguest_init(void) */ reserve_top_address(lguest_data.reserve_mem); - /* - * If we don't initialize the lock dependency checker now, it crashes - * atomic_notifier_chain_register, then paravirt_disable_iospace. - */ - lockdep_init(); - /* Hook in our special panic hypercall code. */ atomic_notifier_chain_register(&panic_notifier_list, &paniced); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 00b042c49ccd..4291592b6433 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -263,8 +263,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * In contrast to ACCESS_ONCE these two macros will also work on aggregate * data types like structs or unions. If the size of the accessed data * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a - * compile-time warning. + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at + * least two memcpy()s: one for the __builtin_memcpy() and then one for + * the macro doing the copy of variable - '__u' allocated on the stack. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 4dca42fd32f5..d026b190c530 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -261,7 +261,6 @@ struct held_lock { /* * Initialization, self-test and debugging-output methods: */ -extern void lockdep_init(void); extern void lockdep_info(void); extern void lockdep_reset(void); extern void lockdep_reset_lock(struct lockdep_map *lock); @@ -392,7 +391,6 @@ static inline void lockdep_on(void) # define lockdep_set_current_reclaim_state(g) do { } while (0) # define lockdep_clear_current_reclaim_state() do { } while (0) # define lockdep_trace_alloc(g) do { } while (0) -# define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ do { (void)(name); (void)(key); } while (0) diff --git a/init/main.c b/init/main.c index 58c9e374704b..b3008bcfb1dc 100644 --- a/init/main.c +++ b/init/main.c @@ -499,11 +499,6 @@ asmlinkage __visible void __init start_kernel(void) char *command_line; char *after_dashes; - /* - * Need to run as early as possible, to initialize the - * lockdep hash: - */ - lockdep_init(); set_task_stack_end_magic(&init_task); smp_setup_processor_id(); debug_objects_early_init(); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7537e568dabe..3261214323fa 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -123,8 +123,6 @@ static inline int debug_locks_off_graph_unlock(void) return ret; } -static int lockdep_initialized; - unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; @@ -434,19 +432,6 @@ unsigned int max_lockdep_depth; #ifdef CONFIG_DEBUG_LOCKDEP /* - * We cannot printk in early bootup code. Not even early_printk() - * might work. So we mark any initialization errors and printk - * about it later on, in lockdep_info(). - */ -static int lockdep_init_error; -static const char *lock_init_error; -static unsigned long lockdep_init_trace_data[20]; -static struct stack_trace lockdep_init_trace = { - .max_entries = ARRAY_SIZE(lockdep_init_trace_data), - .entries = lockdep_init_trace_data, -}; - -/* * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); @@ -669,20 +654,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) struct hlist_head *hash_head; struct lock_class *class; -#ifdef CONFIG_DEBUG_LOCKDEP - /* - * If the architecture calls into lockdep before initializing - * the hashes then we'll warn about it later. (we cannot printk - * right now) - */ - if (unlikely(!lockdep_initialized)) { - lockdep_init(); - lockdep_init_error = 1; - lock_init_error = lock->name; - save_stack_trace(&lockdep_init_trace); - } -#endif - if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { debug_locks_off(); printk(KERN_ERR @@ -1822,7 +1793,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, int distance, int trylock_loop) + struct held_lock *next, int distance, int *stack_saved) { struct lock_list *entry; int ret; @@ -1883,8 +1854,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, } } - if (!trylock_loop && !save_trace(&trace)) - return 0; + if (!*stack_saved) { + if (!save_trace(&trace)) + return 0; + *stack_saved = 1; + } /* * Ok, all validations passed, add the new lock @@ -1907,6 +1881,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * Debugging printouts: */ if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { + /* We drop graph lock, so another thread can overwrite trace. */ + *stack_saved = 0; graph_unlock(); printk("\n new dependency: "); print_lock_name(hlock_class(prev)); @@ -1929,7 +1905,7 @@ static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { int depth = curr->lockdep_depth; - int trylock_loop = 0; + int stack_saved = 0; struct held_lock *hlock; /* @@ -1956,7 +1932,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) */ if (hlock->read != 2 && hlock->check) { if (!check_prev_add(curr, hlock, next, - distance, trylock_loop)) + distance, &stack_saved)) return 0; /* * Stop after the first non-trylock entry, @@ -1979,7 +1955,6 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) if (curr->held_locks[depth].irq_context != curr->held_locks[depth-1].irq_context) break; - trylock_loop = 1; } return 1; out_bug: @@ -4009,28 +3984,6 @@ out_restore: raw_local_irq_restore(flags); } -void lockdep_init(void) -{ - int i; - - /* - * Some architectures have their own start_kernel() - * code which calls lockdep_init(), while we also - * call lockdep_init() from the start_kernel() itself, - * and we want to initialize the hashes only once: - */ - if (lockdep_initialized) - return; - - for (i = 0; i < CLASSHASH_SIZE; i++) - INIT_HLIST_HEAD(classhash_table + i); - - for (i = 0; i < CHAINHASH_SIZE; i++) - INIT_HLIST_HEAD(chainhash_table + i); - - lockdep_initialized = 1; -} - void __init lockdep_info(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); @@ -4057,14 +4010,6 @@ void __init lockdep_info(void) printk(" per task-struct memory footprint: %lu bytes\n", sizeof(struct held_lock) * MAX_LOCK_DEPTH); - -#ifdef CONFIG_DEBUG_LOCKDEP - if (lockdep_init_error) { - printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error); - printk("Call stack leading to lockdep invocation was:\n"); - print_stack_trace(&lockdep_init_trace, 0); - } -#endif } static void diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c index c61b299e367f..915d75df2086 100644 --- a/lib/test_static_keys.c +++ b/lib/test_static_keys.c @@ -46,8 +46,11 @@ struct test_key { bool (*test_key)(void); }; -#define test_key_func(key, branch) \ - ({bool func(void) { return branch(key); } func; }) +#define test_key_func(key, branch) \ +static bool key ## _ ## branch(void) \ +{ \ + return branch(&key); \ +} static void invert_key(struct static_key *key) { @@ -92,6 +95,25 @@ static int verify_keys(struct test_key *keys, int size, bool invert) return 0; } +test_key_func(old_true_key, static_key_true) +test_key_func(old_false_key, static_key_false) +test_key_func(true_key, static_branch_likely) +test_key_func(true_key, static_branch_unlikely) +test_key_func(false_key, static_branch_likely) +test_key_func(false_key, static_branch_unlikely) +test_key_func(base_old_true_key, static_key_true) +test_key_func(base_inv_old_true_key, static_key_true) +test_key_func(base_old_false_key, static_key_false) +test_key_func(base_inv_old_false_key, static_key_false) +test_key_func(base_true_key, static_branch_likely) +test_key_func(base_true_key, static_branch_unlikely) +test_key_func(base_inv_true_key, static_branch_likely) +test_key_func(base_inv_true_key, static_branch_unlikely) +test_key_func(base_false_key, static_branch_likely) +test_key_func(base_false_key, static_branch_unlikely) +test_key_func(base_inv_false_key, static_branch_likely) +test_key_func(base_inv_false_key, static_branch_unlikely) + static int __init test_static_key_init(void) { int ret; @@ -102,95 +124,95 @@ static int __init test_static_key_init(void) { .init_state = true, .key = &old_true_key, - .test_key = test_key_func(&old_true_key, static_key_true), + .test_key = &old_true_key_static_key_true, }, { .init_state = false, .key = &old_false_key, - .test_key = test_key_func(&old_false_key, static_key_false), + .test_key = &old_false_key_static_key_false, }, /* internal keys - new keys */ { .init_state = true, .key = &true_key.key, - .test_key = test_key_func(&true_key, static_branch_likely), + .test_key = &true_key_static_branch_likely, }, { .init_state = true, .key = &true_key.key, - .test_key = test_key_func(&true_key, static_branch_unlikely), + .test_key = &true_key_static_branch_unlikely, }, { .init_state = false, .key = &false_key.key, - .test_key = test_key_func(&false_key, static_branch_likely), + .test_key = &false_key_static_branch_likely, }, { .init_state = false, .key = &false_key.key, - .test_key = test_key_func(&false_key, static_branch_unlikely), + .test_key = &false_key_static_branch_unlikely, }, /* external keys - old keys */ { .init_state = true, .key = &base_old_true_key, - .test_key = test_key_func(&base_old_true_key, static_key_true), + .test_key = &base_old_true_key_static_key_true, }, { .init_state = false, .key = &base_inv_old_true_key, - .test_key = test_key_func(&base_inv_old_true_key, static_key_true), + .test_key = &base_inv_old_true_key_static_key_true, }, { .init_state = false, .key = &base_old_false_key, - .test_key = test_key_func(&base_old_false_key, static_key_false), + .test_key = &base_old_false_key_static_key_false, }, { .init_state = true, .key = &base_inv_old_false_key, - .test_key = test_key_func(&base_inv_old_false_key, static_key_false), + .test_key = &base_inv_old_false_key_static_key_false, }, /* external keys - new keys */ { .init_state = true, .key = &base_true_key.key, - .test_key = test_key_func(&base_true_key, static_branch_likely), + .test_key = &base_true_key_static_branch_likely, }, { .init_state = true, .key = &base_true_key.key, - .test_key = test_key_func(&base_true_key, static_branch_unlikely), + .test_key = &base_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_inv_true_key.key, - .test_key = test_key_func(&base_inv_true_key, static_branch_likely), + .test_key = &base_inv_true_key_static_branch_likely, }, { .init_state = false, .key = &base_inv_true_key.key, - .test_key = test_key_func(&base_inv_true_key, static_branch_unlikely), + .test_key = &base_inv_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_false_key.key, - .test_key = test_key_func(&base_false_key, static_branch_likely), + .test_key = &base_false_key_static_branch_likely, }, { .init_state = false, .key = &base_false_key.key, - .test_key = test_key_func(&base_false_key, static_branch_unlikely), + .test_key = &base_false_key_static_branch_unlikely, }, { .init_state = true, .key = &base_inv_false_key.key, - .test_key = test_key_func(&base_inv_false_key, static_branch_likely), + .test_key = &base_inv_false_key_static_branch_likely, }, { .init_state = true, .key = &base_inv_false_key.key, - .test_key = test_key_func(&base_inv_false_key, static_branch_unlikely), + .test_key = &base_inv_false_key_static_branch_unlikely, }, }; diff --git a/tools/lib/lockdep/common.c b/tools/lib/lockdep/common.c index 9be663340f0a..d1c89cc06f5f 100644 --- a/tools/lib/lockdep/common.c +++ b/tools/lib/lockdep/common.c @@ -11,11 +11,6 @@ static __thread struct task_struct current_obj; bool debug_locks = true; bool debug_locks_silent; -__attribute__((constructor)) static void liblockdep_init(void) -{ - lockdep_init(); -} - __attribute__((destructor)) static void liblockdep_exit(void) { debug_check_no_locks_held(); diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h index a60c14b9662a..6e66277ec437 100644 --- a/tools/lib/lockdep/include/liblockdep/common.h +++ b/tools/lib/lockdep/include/liblockdep/common.h @@ -44,7 +44,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); extern void debug_check_no_locks_freed(const void *from, unsigned long len); -extern void lockdep_init(void); #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ { .name = (_name), .key = (void *)(_key), } diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index 21cdf869a01b..52844847569c 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -439,7 +439,5 @@ __attribute__((constructor)) static void init_preload(void) ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); #endif - lockdep_init(); - __init_state = done; } |