diff options
author | Josh Poimboeuf <jpoimboe@redhat.com> | 2020-08-18 16:57:42 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-09-01 10:58:04 +0300 |
commit | 9183c3f9ed710a8edf1a61e8a96d497258d26e08 (patch) | |
tree | b3434e44f45bb98d2decb831e77c3e2c48329663 /include/linux/static_call.h | |
parent | 115284d89a436e9b66da0c6c4f6efded806874b2 (diff) | |
download | linux-9183c3f9ed710a8edf1a61e8a96d497258d26e08.tar.xz |
static_call: Add inline static call infrastructure
Add infrastructure for an arch-specific CONFIG_HAVE_STATIC_CALL_INLINE
option, which is a faster version of CONFIG_HAVE_STATIC_CALL. At
runtime, the static call sites are patched directly, rather than using
the out-of-line trampolines.
Compared to out-of-line static calls, the performance benefits are more
modest, but still measurable. Steven Rostedt did some tracepoint
measurements:
https://lkml.kernel.org/r/20181126155405.72b4f718@gandalf.local.home
This code is heavily inspired by the jump label code (aka "static
jumps"), as some of the concepts are very similar.
For more details, see the comments in include/linux/static_call.h.
[peterz: simplified interface; merged trampolines]
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20200818135804.684334440@infradead.org
Diffstat (limited to 'include/linux/static_call.h')
-rw-r--r-- | include/linux/static_call.h | 36 |
1 files changed, 35 insertions, 1 deletions
diff --git a/include/linux/static_call.h b/include/linux/static_call.h index d8892dff2e91..0d7f9efaa3b2 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -95,7 +95,41 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func); STATIC_CALL_TRAMP_ADDR(name), func); \ }) -#if defined(CONFIG_HAVE_STATIC_CALL) +#ifdef CONFIG_HAVE_STATIC_CALL_INLINE + +struct static_call_mod { + struct static_call_mod *next; + struct module *mod; /* for vmlinux, mod == NULL */ + struct static_call_site *sites; +}; + +struct static_call_key { + void *func; + struct static_call_mod *mods; +}; + +extern void __static_call_update(struct static_call_key *key, void *tramp, void *func); +extern int static_call_mod_init(struct module *mod); + +#define DEFINE_STATIC_CALL(name, _func) \ + DECLARE_STATIC_CALL(name, _func); \ + struct static_call_key STATIC_CALL_KEY(name) = { \ + .func = _func, \ + .mods = NULL, \ + }; \ + ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func) + +#define static_call(name) __static_call(name) + +#define EXPORT_STATIC_CALL(name) \ + EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \ + EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)) + +#define EXPORT_STATIC_CALL_GPL(name) \ + EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \ + EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)) + +#elif defined(CONFIG_HAVE_STATIC_CALL) struct static_call_key { void *func; |