lkml.org 
[lkml]   [2015]   [Jun]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: call_rcu from trace_preempt
    On Tue, 16 Jun 2015 14:38:53 +0200
    Daniel Wagner <wagi@monom.org> wrote:

    > diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
    > index 83c209d..8d73be3 100644
    > --- a/kernel/bpf/hashtab.c
    > +++ b/kernel/bpf/hashtab.c
    > @@ -13,6 +13,8 @@
    > #include <linux/jhash.h>
    > #include <linux/filter.h>
    > #include <linux/vmalloc.h>
    > +#include <linux/kthread.h>
    > +#include <linux/spinlock.h>
    >
    > struct bpf_htab {
    > struct bpf_map map;
    > @@ -27,10 +29,14 @@ struct bpf_htab {
    > struct htab_elem {
    > struct hlist_node hash_node;
    > struct rcu_head rcu;
    > + struct list_head list;
    > u32 hash;
    > char key[0] __aligned(8);
    > };
    >
    > +static LIST_HEAD(elem_freelist);
    > +static DEFINE_SPINLOCK(elem_freelist_lock);
    > +
    > /* Called from syscall */
    > static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
    > {
    > @@ -228,6 +234,7 @@ static int htab_map_update_elem(struct bpf_map *map,
    > void *key, void *value,
    > memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
    >
    > l_new->hash = htab_map_hash(l_new->key, key_size);
    > + INIT_LIST_HEAD(&l_new->list);
    >
    > /* bpf_map_update_elem() can be called in_irq() */
    > spin_lock_irqsave(&htab->lock, flags);
    > @@ -300,11 +307,17 @@ static int htab_map_delete_elem(struct bpf_map
    > *map, void *key)
    > if (l) {
    > hlist_del_rcu(&l->hash_node);
    > htab->count--;
    > - kfree_rcu(l, rcu);
    > + /* kfree_rcu(l, rcu); */

    So this kfree_rcu() is only being used to defer a free, and has nothing
    to do with having to free 'l' from rcu?

    > ret = 0;
    > }
    >
    > spin_unlock_irqrestore(&htab->lock, flags);
    > +
    > + if (l) {
    > + spin_lock_irqsave(&elem_freelist_lock, flags);
    > + list_add(&l->list, &elem_freelist);
    > + spin_unlock_irqrestore(&elem_freelist_lock, flags);
    > + }
    > return ret;
    > }
    >
    > @@ -359,9 +372,31 @@ static struct bpf_map_type_list htab_type
    > __read_mostly = {
    > .type = BPF_MAP_TYPE_HASH,
    > };
    >
    > +static int free_thread(void *arg)
    > +{
    > + unsigned long flags;
    > + struct htab_elem *l;
    > +
    > + while (!kthread_should_stop()) {
    > + spin_lock_irqsave(&elem_freelist_lock, flags);
    > + while (!list_empty(&elem_freelist)) {
    > + l = list_entry(elem_freelist.next,
    > + struct htab_elem, list);
    > + list_del(&l->list);
    > + kfree(l);
    > + }
    > + spin_unlock_irqrestore(&elem_freelist_lock, flags);

    Wow! This is burning up CPU isn't it?

    If you just need to delay the kfree, why not use irq_work for that job?

    -- Steve

    > + }
    > +
    > + return 0;
    > +}
    > +
    > static int __init register_htab_map(void)
    > {
    > bpf_register_map_type(&htab_type);
    > +
    > + kthread_run(free_thread, NULL, "free_thread");
    > +
    > return 0;
    > }
    > late_initcall(register_htab_map);



    \
     
     \ /
      Last update: 2015-06-16 18:01    [W:4.244 / U:0.388 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site