lkml.org 
[lkml]   [2011]   [May]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC][PATCH 09/13] ftrace: Free hash with call_rcu_sched()
    From: Steven Rostedt <srostedt@redhat.com>

    When a hash is modified and might be in use, we need to perform
    a schedule RCU operation on it, as the hashes will soon be used
    directly in the function tracer callback.

    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
    ---
    kernel/trace/ftrace.c | 55 +++++++++++++++++++++++++------------------------
    1 files changed, 28 insertions(+), 27 deletions(-)

    diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
    index dcce0bf..92b6fdf 100644
    --- a/kernel/trace/ftrace.c
    +++ b/kernel/trace/ftrace.c
    @@ -913,6 +913,7 @@ struct ftrace_hash {
    unsigned long size_bits;
    struct hlist_head *buckets;
    unsigned long count;
    + struct rcu_head rcu;
    };

    /*
    @@ -1058,6 +1059,21 @@ static void free_ftrace_hash(struct ftrace_hash *hash)
    kfree(hash);
    }

    +static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
    +{
    + struct ftrace_hash *hash;
    +
    + hash = container_of(rcu, struct ftrace_hash, rcu);
    + free_ftrace_hash(hash);
    +}
    +
    +static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
    +{
    + if (!hash || hash == EMPTY_HASH)
    + return;
    + call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
    +}
    +
    static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
    {
    struct ftrace_hash *hash;
    @@ -1122,7 +1138,8 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
    struct ftrace_func_entry *entry;
    struct hlist_node *tp, *tn;
    struct hlist_head *hhd;
    - struct ftrace_hash *hash = *dst;
    + struct ftrace_hash *old_hash;
    + struct ftrace_hash *new_hash;
    unsigned long key;
    int size = src->count;
    int bits = 0;
    @@ -1133,13 +1150,11 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
    * the empty_hash.
    */
    if (!src->count) {
    - free_ftrace_hash(*dst);
    - *dst = EMPTY_HASH;
    + free_ftrace_hash_rcu(*dst);
    + rcu_assign_pointer(*dst, EMPTY_HASH);
    return 0;
    }

    - ftrace_hash_clear(hash);
    -
    /*
    * Make the hash size about 1/2 the # found
    */
    @@ -1150,27 +1165,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
    if (bits > FTRACE_HASH_MAX_BITS)
    bits = FTRACE_HASH_MAX_BITS;

    - /* We can't modify the empty_hash */
    - if (hash == EMPTY_HASH) {
    - /* Create a new hash */
    - *dst = alloc_ftrace_hash(bits);
    - if (!*dst) {
    - *dst = EMPTY_HASH;
    - return -ENOMEM;
    - }
    - hash = *dst;
    - } else {
    - size = 1 << bits;
    -
    - /* Use the old hash, but create new buckets */
    - hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
    - if (!hhd)
    - return -ENOMEM;
    -
    - kfree(hash->buckets);
    - hash->buckets = hhd;
    - hash->size_bits = bits;
    - }
    + new_hash = alloc_ftrace_hash(bits);
    + if (!new_hash)
    + return -ENOMEM;

    size = 1 << src->size_bits;
    for (i = 0; i < size; i++) {
    @@ -1181,10 +1178,14 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
    else
    key = 0;
    remove_hash_entry(src, entry);
    - __add_hash_entry(hash, entry);
    + __add_hash_entry(new_hash, entry);
    }
    }

    + old_hash = *dst;
    + rcu_assign_pointer(*dst, new_hash);
    + free_ftrace_hash_rcu(old_hash);
    +
    return 0;
    }

    --
    1.7.2.3



    \
     
     \ /
      Last update: 2011-05-06 17:51    [W:0.027 / U:59.156 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site