Messages in this thread Patch in this message |  | | Date | Thu, 9 Apr 2015 14:13:36 +0200 | From | Peter Zijlstra <> | Subject | Re: [PATCH v4 6/9] rbtree: Implement generic latch_tree |
| |
On Thu, Apr 09, 2015 at 04:55:05PM +0800, Lai Jiangshan wrote: > This sentence is talking about module.c not latch_tree.h. So I guess > it is user(module.c)'s problem, not latch_tree.h's problem. > > The user(module.c) can wrap the struct latch_tree_nodes and add @priv.
OK, took me a while to figure out exactly what and how. You mean something like this, right?
(compile tested only)
--- include/linux/module.h | 11 ++++- include/linux/rbtree_latch.h | 93 ++++++++++++++++++------------------------- kernel/module.c | 33 +++++++++------ 3 files changed, 70 insertions(+), 67 deletions(-)
--- a/include/linux/module.h +++ b/include/linux/module.h @@ -211,6 +211,13 @@ enum module_state { MODULE_STATE_UNFORMED, /* Still setting it up. */ }; +struct module; + +struct mod_tree_node { + struct latch_tree_node node; + struct module *mod; +}; + struct module { enum module_state state; @@ -294,8 +301,8 @@ struct module { * core.node[0] to be in the same cacheline as the above entries, * we also assume ltn_init has a higher address than ltn_core. */ - struct latch_tree_nodes ltn_core; - struct latch_tree_nodes ltn_init; + struct mod_tree_node mtn_core; + struct mod_tree_node mtn_init; /* Size of RO sections of the module (text+rodata) */ unsigned int init_ro_size, core_ro_size; --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -36,17 +36,7 @@ #include <linux/seqlock.h> struct latch_tree_node { - /* - * Because we have an array of two entries in struct latch_tree_nodes - * it's not possible to use container_of() to get back to the - * encapsulating structure; therefore we have to put in a back pointer. - */ - void *priv; - struct rb_node node; -}; - -struct latch_tree_nodes { - struct latch_tree_node node[2]; + struct rb_node node[2]; }; struct latch_tree_root { @@ -74,17 +64,25 @@ struct latch_tree_ops { int (*comp)(void *key, struct latch_tree_node *b); }; +static __always_inline struct latch_tree_node * +__lt_from_rb(struct rb_node *node, int idx) +{ + return container_of(node, struct latch_tree_node, node[idx]); +} + static __always_inline void -__lt_insert(struct latch_tree_node *ltn, struct rb_root *root, +__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx, bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b)) { + struct rb_root *root = <r->tree[idx]; struct rb_node **link = &root->rb_node; + struct rb_node *node = <n->node[idx]; struct rb_node *parent = NULL; struct latch_tree_node *ltp; while (*link) { parent = *link; - ltp = container_of(parent, struct latch_tree_node, node); + ltp = __lt_from_rb(parent, idx); if (less(ltn, ltp)) link = &parent->rb_left; @@ -92,32 +90,32 @@ __lt_insert(struct latch_tree_node *ltn, link = &parent->rb_right; } - rb_link_node_rcu(<n->node, parent, link); - rb_insert_color(<n->node, root); + rb_link_node_rcu(node, parent, link); + rb_insert_color(node, root); } static __always_inline void -__lt_erase(struct latch_tree_node *ltn, struct rb_root *root) +__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx) { - rb_erase(<n->node, root); + rb_erase(<n->node[idx], <r->tree[idx]); } static __always_inline struct latch_tree_node * -__lt_find(void *key, struct rb_root *root, - int (*comp)(void *key, struct latch_tree_node *ltn)) +__lt_find(void *key, struct latch_tree_root *ltr, int idx, + int (*comp)(void *key, struct latch_tree_node *node)) { - struct rb_node *n = rcu_dereference_raw(root->rb_node); + struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); struct latch_tree_node *ltn; int c; - while (n) { - ltn = container_of(n, struct latch_tree_node, node); + while (node) { + ltn = __lt_from_rb(node, idx); c = comp(key, ltn); if (c < 0) - n = rcu_dereference_raw(n->rb_left); + node = rcu_dereference_raw(node->rb_left); else if (c > 0) - n = rcu_dereference_raw(n->rb_right); + node = rcu_dereference_raw(node->rb_right); else return ltn; } @@ -126,65 +124,56 @@ __lt_find(void *key, struct rb_root *roo } /** - * latch_tree_insert() - insert @nodes into the trees @root - * @nodes: nodes to insert - * @root: trees to insert @nodes into - * @priv: pointer to node private data + * latch_tree_insert() - insert @node into the trees @root + * @node: nodes to insert + * @root: trees to insert @node into * @ops: operators defining the node order * - * Initializes @nodes private pointer with @priv; which typically is a back - * pointer to the containing structure, used by @ops to find the search key. + * It inserts @node into @root in an ordered fashion such that we can always + * observe one complete tree. See the comment for raw_write_seqcount_latch(). * - * Then it inserts @nodes into @root in an ordered fashion such that we can - * always observe one complete tree. See the comment for - * raw_write_seqcount_latch(). - * - * The inserts use rcu_assign_pointer() to publish the element such that - * both the @priv pointer values in @nodes as the tree structure is stored - * before we can observe the new @nodes. + * The inserts use rcu_assign_pointer() to publish the element such that the + * tree structure is stored before we can observe the new @node. * * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be * serialized. */ static __always_inline void -latch_tree_insert(struct latch_tree_nodes *nodes, +latch_tree_insert(struct latch_tree_node *node, struct latch_tree_root *root, - void *priv, const struct latch_tree_ops *ops) { - nodes->node[0].priv = nodes->node[1].priv = priv; - raw_write_seqcount_latch(&root->seq); - __lt_insert(&nodes->node[0], &root->tree[0], ops->less); + __lt_insert(node, root, 0, ops->less); raw_write_seqcount_latch(&root->seq); - __lt_insert(&nodes->node[1], &root->tree[1], ops->less); + __lt_insert(node, root, 1, ops->less); } /** - * latch_tree_erase() - removes @nodes from the trees @root - * @nodes: nodes to remote - * @root: trees to remove @nodes from + * latch_tree_erase() - removes @node from the trees @root + * @node: nodes to remote + * @root: trees to remove @node from * @ops: operators defining the node order * - * Removes @nodes from the trees @root in an ordered fashion such that we can + * Removes @node from the trees @root in an ordered fashion such that we can * always observe one complete tree. See the comment for * raw_write_seqcount_latch(). * - * It is assumed that @nodes will observe one RCU quiescent state before being + * It is assumed that @node will observe one RCU quiescent state before being * reused of freed. * * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be * serialized. */ static __always_inline void -latch_tree_erase(struct latch_tree_nodes *nodes, +latch_tree_erase(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { raw_write_seqcount_latch(&root->seq); - __lt_erase(&nodes->node[0], &root->tree[0]); + __lt_erase(node, root, 0); raw_write_seqcount_latch(&root->seq); - __lt_erase(&nodes->node[1], &root->tree[1]); + __lt_erase(node, root, 1); } /** @@ -214,7 +203,7 @@ latch_tree_find(void *key, struct latch_ do { seq = raw_read_seqcount(&root->seq); - node = __lt_find(key, &root->tree[seq & 1], ops->comp); + node = __lt_find(key, root, seq & 1, ops->comp); } while (read_seqcount_retry(&root->seq, seq)); return node; --- a/kernel/module.c +++ b/kernel/module.c @@ -119,22 +119,24 @@ static LIST_HEAD(modules); static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) { - struct module *mod = n->priv; + struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); + struct module *mod = mtn->mod; - if (n >= mod->ltn_init.node) + if (unlikely(mtn == &mod->mtn_init)) return (unsigned long)mod->module_init; - else - return (unsigned long)mod->module_core; + + return (unsigned long)mod->module_core; } static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) { - struct module *mod = n->priv; + struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); + struct module *mod = mtn->mod; - if (n >= mod->ltn_init.node) + if (unlikely(mtn == &mod->mtn_init)) return (unsigned long)mod->init_size; - else - return (unsigned long)mod->core_size; + + return (unsigned long)mod->core_size; } static __always_inline bool @@ -174,20 +176,23 @@ static struct latch_tree_root mod_tree _ */ static void mod_tree_insert(struct module *mod) { - latch_tree_insert(&mod->ltn_core, &mod_tree, mod, &mod_tree_ops); + mod->mtn_core.mod = mod; + mod->mtn_init.mod = mod; + + latch_tree_insert(&mod->mtn_core.node, &mod_tree, &mod_tree_ops); if (mod->init_size) - latch_tree_insert(&mod->ltn_init, &mod_tree, mod, &mod_tree_ops); + latch_tree_insert(&mod->mtn_init.node, &mod_tree, &mod_tree_ops); } static void mod_tree_remove_init(struct module *mod) { if (mod->init_size) - latch_tree_erase(&mod->ltn_init, &mod_tree, &mod_tree_ops); + latch_tree_erase(&mod->mtn_init.node, &mod_tree, &mod_tree_ops); } static void mod_tree_remove(struct module *mod) { - latch_tree_erase(&mod->ltn_core, &mod_tree, &mod_tree_ops); + latch_tree_erase(&mod->mtn_core.node, &mod_tree, &mod_tree_ops); mod_tree_remove_init(mod); } @@ -196,8 +201,10 @@ static struct module *mod_find(unsigned struct latch_tree_node *ltn; ltn = latch_tree_find((void *)addr, &mod_tree, &mod_tree_ops); + if (!ltn) + return NULL; - return ltn ? ltn->priv : NULL; + return container_of(ltn, struct mod_tree_node, node)->mod; } #else /* !(PERF_EVENTS || TRACING) */
|  |