lkml.org 
[lkml]   [2014]   [Feb]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 8/9] slab: destroy a slab without holding any alien cache lock
    Date
    I haven't heard that this alien cache lock is contended, but to reduce
    chance of contention would be better generally. And with this change,
    we can simplify complex lockdep annotation in slab code.
    In the following patch, it will be implemented.

    Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

    diff --git a/mm/slab.c b/mm/slab.c
    index ec1df4c..9c9d4d4 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -1000,9 +1000,9 @@ static void free_alien_cache(struct alien_cache **alc_ptr)
    }

    static void __drain_alien_cache(struct kmem_cache *cachep,
    - struct array_cache *ac, int node)
    + struct array_cache *ac, int node,
    + struct list_head *list)
    {
    - LIST_HEAD(list);
    struct kmem_cache_node *n = cachep->node[node];

    if (ac->avail) {
    @@ -1015,10 +1015,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
    if (n->shared)
    transfer_objects(n->shared, ac, ac->limit);

    - free_block(cachep, ac->entry, ac->avail, node, &list);
    + free_block(cachep, ac->entry, ac->avail, node, list);
    ac->avail = 0;
    spin_unlock(&n->list_lock);
    - slabs_destroy(cachep, &list);
    }
    }

    @@ -1036,8 +1035,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
    if (alc) {
    ac = &alc->ac;
    if (ac->avail && spin_trylock_irq(&alc->lock)) {
    - __drain_alien_cache(cachep, ac, node);
    + LIST_HEAD(list);
    +
    + __drain_alien_cache(cachep, ac, node, &list);
    spin_unlock_irq(&alc->lock);
    + slabs_destroy(cachep, &list);
    }
    }
    }
    @@ -1054,10 +1056,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
    for_each_online_node(i) {
    alc = alien[i];
    if (alc) {
    + LIST_HEAD(list);
    +
    ac = &alc->ac;
    spin_lock_irqsave(&alc->lock, flags);
    - __drain_alien_cache(cachep, ac, i);
    + __drain_alien_cache(cachep, ac, i, &list);
    spin_unlock_irqrestore(&alc->lock, flags);
    + slabs_destroy(cachep, &list);
    }
    }
    }
    @@ -1088,10 +1093,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
    spin_lock(&alien->lock);
    if (unlikely(ac->avail == ac->limit)) {
    STATS_INC_ACOVERFLOW(cachep);
    - __drain_alien_cache(cachep, ac, nodeid);
    + __drain_alien_cache(cachep, ac, nodeid, &list);
    }
    ac_put_obj(cachep, ac, objp);
    spin_unlock(&alien->lock);
    + slabs_destroy(cachep, &list);
    } else {
    spin_lock(&(cachep->node[nodeid])->list_lock);
    free_block(cachep, &objp, 1, nodeid, &list);
    --
    1.7.9.5


    \
     
     \ /
      Last update: 2014-02-14 08:41    [W:4.024 / U:0.744 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site