From 6c9692e2d6a2206d8fd75ea247daa47fb75e4a02 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2015 11:09:37 +0930 Subject: module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING Andrew worried about the overhead on small systems; only use the fancy code when either perf or tracing is enabled. Cc: Rusty Russell Cc: Steven Rostedt Requested-by: Andrew Morton Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Rusty Russell --- include/linux/module.h | 4 +++- init/Kconfig | 4 ++++ kernel/module.c | 30 ++++++++++++++++++++++++++++-- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/include/linux/module.h b/include/linux/module.h index ddf35a3368fb..4c1b02e1361d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -282,7 +282,7 @@ struct module { * * Cacheline align here, such that: * module_init, module_core, init_size, core_size, - * init_text_size, core_text_size and ltn_core.node[0] + * init_text_size, core_text_size and mtn_core::{mod,node[0]} * are on the same cacheline. */ void *module_init ____cacheline_aligned; @@ -296,6 +296,7 @@ struct module { /* The size of the executable code in each section. */ unsigned int init_text_size, core_text_size; +#ifdef CONFIG_MODULES_TREE_LOOKUP /* * We want mtn_core::{mod,node[0]} to be in the same cacheline as the * above entries such that a regular lookup will only touch one @@ -303,6 +304,7 @@ struct module { */ struct mod_tree_node mtn_core; struct mod_tree_node mtn_init; +#endif /* Size of RO sections of the module (text+rodata) */ unsigned int init_ro_size, core_ro_size; diff --git a/init/Kconfig b/init/Kconfig index dc24dec60232..968a001790af 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1989,6 +1989,10 @@ endchoice endif # MODULES +config MODULES_TREE_LOOKUP + def_bool y + depends on PERF_EVENTS || TRACING + config INIT_ALL_POSSIBLE bool help diff --git a/kernel/module.c b/kernel/module.c index e0db5c31cb53..ac3044ceca3f 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -102,6 +102,8 @@ DEFINE_MUTEX(module_mutex); EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); +#ifdef CONFIG_MODULES_TREE_LOOKUP + /* * Use a latched RB-tree for __module_address(); this allows us to use * RCU-sched lookups of the address from any context. @@ -112,6 +114,10 @@ static LIST_HEAD(modules); * * Because init ranges are short lived we mark them unlikely and have placed * them outside the critical cacheline in struct module. + * + * This is conditional on PERF_EVENTS || TRACING because those can really hit + * __module_address() hard by doing a lot of stack unwinding; potentially from + * NMI context. */ static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) @@ -192,7 +198,7 @@ static void mod_tree_remove(struct module *mod) mod_tree_remove_init(mod); } -static struct module *mod_tree_find(unsigned long addr) +static struct module *mod_find(unsigned long addr) { struct latch_tree_node *ltn; @@ -203,6 +209,26 @@ static struct module *mod_tree_find(unsigned long addr) return container_of(ltn, struct mod_tree_node, node)->mod; } +#else /* MODULES_TREE_LOOKUP */ + +static void mod_tree_insert(struct module *mod) { } +static void mod_tree_remove_init(struct module *mod) { } +static void mod_tree_remove(struct module *mod) { } + +static struct module *mod_find(unsigned long addr) +{ + struct module *mod; + + list_for_each_entry_rcu(mod, &modules, list) { + if (within_module(addr, mod)) + return mod; + } + + return NULL; +} + +#endif /* MODULES_TREE_LOOKUP */ + #ifdef CONFIG_KGDB_KDB struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ @@ -3966,7 +3992,7 @@ struct module *__module_address(unsigned long addr) module_assert_mutex_or_preempt(); - mod = mod_tree_find(addr); + mod = mod_find(addr); if (mod) { BUG_ON(!within_module(addr, mod)); if (mod->state == MODULE_STATE_UNFORMED) -- cgit v1.2.3