diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-05-27 11:09:37 +0930 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2015-05-28 11:32:07 +0930 |
commit | 93c2e105f6bcee231c951ba0e56e84505c4b0483 (patch) | |
tree | c81d9957d95194807d6907b1318047af16c71c5a /include/linux/module.h | |
parent | ade3f510f93a5613b672febe88eff8ea7f1c63b7 (diff) | |
download | talos-op-linux-93c2e105f6bcee231c951ba0e56e84505c4b0483.tar.gz talos-op-linux-93c2e105f6bcee231c951ba0e56e84505c4b0483.zip |
module: Optimize __module_address() using a latched RB-tree
Currently __module_address() is using a linear search through all
modules in order to find the module corresponding to the provided
address. With a lot of modules this can take a lot of time.
One of the users of this is kernel_text_address() which is employed
in many stack unwinders; which in turn are used by perf-callchain and
ftrace (possibly from NMI context).
So by optimizing __module_address() we optimize many stack unwinders
which are used by both perf and tracing in performance sensitive code.
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'include/linux/module.h')
-rw-r--r-- | include/linux/module.h | 29 |
1 files changed, 26 insertions, 3 deletions
diff --git a/include/linux/module.h b/include/linux/module.h index fb56dd85a862..ddf35a3368fb 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -17,6 +17,7 @@ #include <linux/moduleparam.h> #include <linux/jump_label.h> #include <linux/export.h> +#include <linux/rbtree_latch.h> #include <linux/percpu.h> #include <asm/module.h> @@ -210,6 +211,13 @@ enum module_state { MODULE_STATE_UNFORMED, /* Still setting it up. */ }; +struct module; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + struct module { enum module_state state; @@ -269,8 +277,15 @@ struct module { /* Startup function. */ int (*init)(void); - /* If this is non-NULL, vfree after init() returns */ - void *module_init; + /* + * If this is non-NULL, vfree() after init() returns. + * + * Cacheline align here, such that: + * module_init, module_core, init_size, core_size, + * init_text_size, core_text_size and ltn_core.node[0] + * are on the same cacheline. + */ + void *module_init ____cacheline_aligned; /* Here is the actual code + data, vfree'd on unload. */ void *module_core; @@ -281,6 +296,14 @@ struct module { /* The size of the executable code in each section. */ unsigned int init_text_size, core_text_size; + /* + * We want mtn_core::{mod,node[0]} to be in the same cacheline as the + * above entries such that a regular lookup will only touch one + * cacheline. + */ + struct mod_tree_node mtn_core; + struct mod_tree_node mtn_init; + /* Size of RO sections of the module (text+rodata) */ unsigned int init_ro_size, core_ro_size; @@ -367,7 +390,7 @@ struct module { ctor_fn_t *ctors; unsigned int num_ctors; #endif -}; +} ____cacheline_aligned; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} #endif |