lkml.org 
[lkml]   [2019]   [Mar]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 3/5] slab: Use slab_list instead of lru
    Date
    Currently we use the page->lru list for maintaining lists of slabs.  We
    have a list_head in the page structure (slab_list) that can be used for
    this purpose. Doing so makes the code cleaner since we are not
    overloading the lru list.

    The slab_list is part of a union within the page struct (included here
    stripped down):

    union {
    struct { /* Page cache and anonymous pages */
    struct list_head lru;
    ...
    };
    struct {
    dma_addr_t dma_addr;
    };
    struct { /* slab, slob and slub */
    union {
    struct list_head slab_list;
    struct { /* Partial pages */
    struct page *next;
    int pages; /* Nr of pages left */
    int pobjects; /* Approximate count */
    };
    };
    ...

    Here we see that slab_list and lru are the same bits. We can verify
    that this change is safe to do by examining the object file produced from
    slab.c before and after this patch is applied.

    Steps taken to verify:

    1. checkout current tip of Linus' tree

    commit a667cb7a94d4 ("Merge branch 'akpm' (patches from Andrew)")

    2. configure and build (selecting SLAB allocator)

    CONFIG_SLAB=y
    CONFIG_SLAB_FREELIST_RANDOM=y
    CONFIG_DEBUG_SLAB=y
    CONFIG_DEBUG_SLAB_LEAK=y
    CONFIG_HAVE_DEBUG_KMEMLEAK=y

    3. dissasemble object file `objdump -dr mm/slab.o > before.s
    4. apply patch
    5. build
    6. dissasemble object file `objdump -dr mm/slab.o > after.s
    7. diff before.s after.s

    Use slab_list list_head instead of the lru list_head for maintaining
    lists of slabs.

    Reviewed-by: Roman Gushchin <guro@fb.com>
    Signed-off-by: Tobin C. Harding <tobin@kernel.org>
    ---
    mm/slab.c | 49 +++++++++++++++++++++++++------------------------
    1 file changed, 25 insertions(+), 24 deletions(-)

    diff --git a/mm/slab.c b/mm/slab.c
    index 28652e4218e0..09cc64ef9613 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -1710,8 +1710,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
    {
    struct page *page, *n;

    - list_for_each_entry_safe(page, n, list, lru) {
    - list_del(&page->lru);
    + list_for_each_entry_safe(page, n, list, slab_list) {
    + list_del(&page->slab_list);
    slab_destroy(cachep, page);
    }
    }
    @@ -2265,8 +2265,8 @@ static int drain_freelist(struct kmem_cache *cache,
    goto out;
    }

    - page = list_entry(p, struct page, lru);
    - list_del(&page->lru);
    + page = list_entry(p, struct page, slab_list);
    + list_del(&page->slab_list);
    n->free_slabs--;
    n->total_slabs--;
    /*
    @@ -2726,13 +2726,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
    if (!page)
    return;

    - INIT_LIST_HEAD(&page->lru);
    + INIT_LIST_HEAD(&page->slab_list);
    n = get_node(cachep, page_to_nid(page));

    spin_lock(&n->list_lock);
    n->total_slabs++;
    if (!page->active) {
    - list_add_tail(&page->lru, &(n->slabs_free));
    + list_add_tail(&page->slab_list, &n->slabs_free);
    n->free_slabs++;
    } else
    fixup_slab_list(cachep, n, page, &list);
    @@ -2841,9 +2841,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
    void **list)
    {
    /* move slabp to correct slabp list: */
    - list_del(&page->lru);
    + list_del(&page->slab_list);
    if (page->active == cachep->num) {
    - list_add(&page->lru, &n->slabs_full);
    + list_add(&page->slab_list, &n->slabs_full);
    if (OBJFREELIST_SLAB(cachep)) {
    #if DEBUG
    /* Poisoning will be done without holding the lock */
    @@ -2857,7 +2857,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
    page->freelist = NULL;
    }
    } else
    - list_add(&page->lru, &n->slabs_partial);
    + list_add(&page->slab_list, &n->slabs_partial);
    }

    /* Try to find non-pfmemalloc slab if needed */
    @@ -2880,20 +2880,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
    }

    /* Move pfmemalloc slab to the end of list to speed up next search */
    - list_del(&page->lru);
    + list_del(&page->slab_list);
    if (!page->active) {
    - list_add_tail(&page->lru, &n->slabs_free);
    + list_add_tail(&page->slab_list, &n->slabs_free);
    n->free_slabs++;
    } else
    - list_add_tail(&page->lru, &n->slabs_partial);
    + list_add_tail(&page->slab_list, &n->slabs_partial);

    - list_for_each_entry(page, &n->slabs_partial, lru) {
    + list_for_each_entry(page, &n->slabs_partial, slab_list) {
    if (!PageSlabPfmemalloc(page))
    return page;
    }

    n->free_touched = 1;
    - list_for_each_entry(page, &n->slabs_free, lru) {
    + list_for_each_entry(page, &n->slabs_free, slab_list) {
    if (!PageSlabPfmemalloc(page)) {
    n->free_slabs--;
    return page;
    @@ -2908,11 +2908,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
    struct page *page;

    assert_spin_locked(&n->list_lock);
    - page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
    + page = list_first_entry_or_null(&n->slabs_partial, struct page,
    + slab_list);
    if (!page) {
    n->free_touched = 1;
    page = list_first_entry_or_null(&n->slabs_free, struct page,
    - lru);
    + slab_list);
    if (page)
    n->free_slabs--;
    }
    @@ -3413,29 +3414,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
    objp = objpp[i];

    page = virt_to_head_page(objp);
    - list_del(&page->lru);
    + list_del(&page->slab_list);
    check_spinlock_acquired_node(cachep, node);
    slab_put_obj(cachep, page, objp);
    STATS_DEC_ACTIVE(cachep);

    /* fixup slab chains */
    if (page->active == 0) {
    - list_add(&page->lru, &n->slabs_free);
    + list_add(&page->slab_list, &n->slabs_free);
    n->free_slabs++;
    } else {
    /* Unconditionally move a slab to the end of the
    * partial list on free - maximum time for the
    * other objects to be freed, too.
    */
    - list_add_tail(&page->lru, &n->slabs_partial);
    + list_add_tail(&page->slab_list, &n->slabs_partial);
    }
    }

    while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
    n->free_objects -= cachep->num;

    - page = list_last_entry(&n->slabs_free, struct page, lru);
    - list_move(&page->lru, list);
    + page = list_last_entry(&n->slabs_free, struct page, slab_list);
    + list_move(&page->slab_list, list);
    n->free_slabs--;
    n->total_slabs--;
    }
    @@ -3473,7 +3474,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
    int i = 0;
    struct page *page;

    - list_for_each_entry(page, &n->slabs_free, lru) {
    + list_for_each_entry(page, &n->slabs_free, slab_list) {
    BUG_ON(page->active);

    i++;
    @@ -4336,9 +4337,9 @@ static int leaks_show(struct seq_file *m, void *p)
    check_irq_on();
    spin_lock_irq(&n->list_lock);

    - list_for_each_entry(page, &n->slabs_full, lru)
    + list_for_each_entry(page, &n->slabs_full, slab_list)
    handle_slab(x, cachep, page);
    - list_for_each_entry(page, &n->slabs_partial, lru)
    + list_for_each_entry(page, &n->slabs_partial, slab_list)
    handle_slab(x, cachep, page);
    spin_unlock_irq(&n->list_lock);
    }
    --
    2.21.0
    \
     
     \ /
      Last update: 2019-03-13 06:22    [W:3.962 / U:0.092 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site