lkml.org 
[lkml]   [2021]   [Jun]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC v2 33/34] mm, slub: use migrate_disable() on PREEMPT_RT
    Date
    We currently use preempt_disable() (directly or via get_cpu_ptr()) to stabilize
    the pointer to kmem_cache_cpu. On PREEMPT_RT this would be incompatible with
    the list_lock spinlock. We can use migrate_disable() instead, but that
    increases overhead on !PREEMPT_RT as it's an unconditional function call even
    though it's ultimately a migrate_disable() there.

    In order to get the best available mechanism on both PREEMPT_RT and
    !PREEMPT_RT, introduce private slub_get_cpu_ptr() and slub_put_cpu_ptr()
    wrappers and use them.

    Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    mm/slub.c | 41 +++++++++++++++++++++++++++++++----------
    1 file changed, 31 insertions(+), 10 deletions(-)

    diff --git a/mm/slub.c b/mm/slub.c
    index 12e966f07f19..caa206213e72 100644
    --- a/mm/slub.c
    +++ b/mm/slub.c
    @@ -115,6 +115,26 @@
    * the fast path and disables lockless freelists.
    */

    +/*
    + * We could simply use migrate_disable()/enable() but as long as it's a
    + * function call even on !PREEMPT_RT, use inline preempt_disable() there.
    + */
    +#ifdef CONFIG_PREEMPT_RT
    +#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
    +#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
    +#else
    +#define slub_get_cpu_ptr(var) \
    +({ \
    + migrate_disable(); \
    + this_cpu_ptr(var); \
    +})
    +#define slub_put_cpu_ptr(var) \
    +do { \
    + (void)(var); \
    + migrate_enable(); \
    +} while (0)
    +#endif
    +
    #ifdef CONFIG_SLUB_DEBUG
    #ifdef CONFIG_SLUB_DEBUG_ON
    DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
    @@ -2419,7 +2439,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
    int pages;
    int pobjects;

    - preempt_disable();
    + slub_get_cpu_ptr(s->cpu_slab);
    do {
    pages = 0;
    pobjects = 0;
    @@ -2450,7 +2470,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)

    } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
    != oldpage);
    - preempt_enable();
    + slub_put_cpu_ptr(s->cpu_slab);
    #endif /* CONFIG_SLUB_CPU_PARTIAL */
    }

    @@ -2759,7 +2779,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    if (unlikely(!pfmemalloc_match(page, gfpflags)))
    goto deactivate_slab;

    - /* must check again c->page in case IRQ handler changed it */
    + /* must check again c->page in case we got preempted and it changed */
    local_irq_save(flags);
    if (unlikely(page != c->page)) {
    local_irq_restore(flags);
    @@ -2818,7 +2838,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    }
    if (unlikely(!slub_percpu_partial(c))) {
    local_irq_restore(flags);
    - goto new_objects; /* stolen by an IRQ handler */
    + /* we were preempted and partial list got empty */
    + goto new_objects;
    }

    page = c->page = slub_percpu_partial(c);
    @@ -2834,9 +2855,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    if (freelist)
    goto check_new_page;

    - put_cpu_ptr(s->cpu_slab);
    + slub_put_cpu_ptr(s->cpu_slab);
    page = new_slab(s, gfpflags, node);
    - c = get_cpu_ptr(s->cpu_slab);
    + c = slub_get_cpu_ptr(s->cpu_slab);

    if (unlikely(!page)) {
    slab_out_of_memory(s, gfpflags, node);
    @@ -2919,12 +2940,12 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
    * cpu before disabling preemption. Need to reload cpu area
    * pointer.
    */
    - c = get_cpu_ptr(s->cpu_slab);
    + c = slub_get_cpu_ptr(s->cpu_slab);
    #endif

    p = ___slab_alloc(s, gfpflags, node, addr, c);
    #ifdef CONFIG_PREEMPTION
    - put_cpu_ptr(s->cpu_slab);
    + slub_put_cpu_ptr(s->cpu_slab);
    #endif
    return p;
    }
    @@ -3445,7 +3466,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
    * IRQs, which protects against PREEMPT and interrupts
    * handlers invoking normal fastpath.
    */
    - c = get_cpu_ptr(s->cpu_slab);
    + c = slub_get_cpu_ptr(s->cpu_slab);
    local_irq_disable();

    for (i = 0; i < size; i++) {
    @@ -3491,7 +3512,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
    }
    c->tid = next_tid(c->tid);
    local_irq_enable();
    - put_cpu_ptr(s->cpu_slab);
    + slub_put_cpu_ptr(s->cpu_slab);

    /*
    * memcg and kmem_cache debug support and memory initialization.
    --
    2.31.1
    \
     
     \ /
      Last update: 2021-06-09 14:01    [W:4.184 / U:0.112 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site