lkml.org 
[lkml]   [2015]   [May]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH v6 7/9] module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING
Andrew worried about the overhead on small systems; only use the fancy
code when either perf or tracing is enabled.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Requested-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/module.h | 4 +++-
init/Kconfig | 4 ++++
kernel/module.c | 30 ++++++++++++++++++++++++++++--
3 files changed, 35 insertions(+), 3 deletions(-)

--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -282,7 +282,7 @@ struct module {
*
* Cacheline align here, such that:
* module_init, module_core, init_size, core_size,
- * init_text_size, core_text_size and ltn_core.node[0]
+ * init_text_size, core_text_size and mtn_core::{mod,node[0]}
* are on the same cacheline.
*/
void *module_init ____cacheline_aligned;
@@ -296,6 +296,7 @@ struct module {
/* The size of the executable code in each section. */
unsigned int init_text_size, core_text_size;

+#ifdef CONFIG_MODULES_TREE_LOOKUP
/*
* We want mtn_core::{mod,node[0]} to be in the same cacheline as the
* above entries such that a regular lookup will only touch one
@@ -303,6 +304,7 @@ struct module {
*/
struct mod_tree_node mtn_core;
struct mod_tree_node mtn_init;
+#endif

/* Size of RO sections of the module (text+rodata) */
unsigned int init_ro_size, core_ro_size;
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1978,6 +1978,10 @@ endchoice

endif # MODULES

+config MODULES_TREE_LOOKUP
+ def_bool y
+ depends on PERF_EVENTS || TRACING
+
config INIT_ALL_POSSIBLE
bool
help
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -102,6 +102,8 @@ DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);

+#ifdef CONFIG_MODULES_TREE_LOOKUP
+
/*
* Use a latched RB-tree for __module_address(); this allows us to use
* RCU-sched lookups of the address from any context.
@@ -112,6 +114,10 @@ static LIST_HEAD(modules);
*
* Because init ranges are short lived we mark them unlikely and have placed
* them outside the critical cacheline in struct module.
+ *
+ * This is conditional on PERF_EVENTS || TRACING because those can really hit
+ * __module_address() hard by doing a lot of stack unwinding; potentially from
+ * NMI context.
*/

static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
@@ -192,7 +198,7 @@ static void mod_tree_remove(struct modul
mod_tree_remove_init(mod);
}

-static struct module *mod_tree_find(unsigned long addr)
+static struct module *mod_find(unsigned long addr)
{
struct latch_tree_node *ltn;

@@ -203,6 +209,26 @@ static struct module *mod_tree_find(unsi
return container_of(ltn, struct mod_tree_node, node)->mod;
}

+#else /* MODULES_TREE_LOOKUP */
+
+static void mod_tree_insert(struct module *mod) { }
+static void mod_tree_remove_init(struct module *mod) { }
+static void mod_tree_remove(struct module *mod) { }
+
+static struct module *mod_find(unsigned long addr)
+{
+ struct module *mod;
+
+ list_for_each_entry_rcu(mod, &modules, list) {
+ if (within_module(addr, mod))
+ return mod;
+ }
+
+ return NULL;
+}
+
+#endif /* MODULES_TREE_LOOKUP */
+
#ifdef CONFIG_KGDB_KDB
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */
@@ -3963,7 +3989,7 @@ struct module *__module_address(unsigned
if (addr < module_addr_min || addr > module_addr_max)
return NULL;

- mod = mod_tree_find(addr);
+ mod = mod_find(addr);
if (mod) {
BUG_ON(!within_module(addr, mod));
if (mod->state == MODULE_STATE_UNFORMED)



\
 
 \ /
  Last update: 2015-05-06 16:21    [W:0.426 / U:1.468 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site