lkml.org 
[lkml]   [2022]   [Aug]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 08/17] mm/slab_common: kmalloc_node: pass large requests to page allocator
    Date
    Now that kmalloc_large_node() is in common code, pass large requests
    to page allocator in kmalloc_node() using kmalloc_large_node().

    One problem is that currently there is no tracepoint in
    kmalloc_large_node(). Instead of simply putting tracepoint in it,
    use kmalloc_large_node{,_notrace} depending on its caller to show
    useful address for both inlined kmalloc_node() and
    __kmalloc_node_track_caller() when large objects are allocated.

    Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
    Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    include/linux/slab.h | 26 +++++++++++++++++++-------
    mm/slab.h | 2 ++
    mm/slab_common.c | 11 ++++++++++-
    mm/slub.c | 2 +-
    4 files changed, 32 insertions(+), 9 deletions(-)

    diff --git a/include/linux/slab.h b/include/linux/slab.h
    index 082499306098..fd2e129fc813 100644
    --- a/include/linux/slab.h
    +++ b/include/linux/slab.h
    @@ -571,23 +571,35 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
    return __kmalloc(size, flags);
    }

    +#ifndef CONFIG_SLOB
    static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
    {
    -#ifndef CONFIG_SLOB
    - if (__builtin_constant_p(size) &&
    - size <= KMALLOC_MAX_CACHE_SIZE) {
    - unsigned int i = kmalloc_index(size);
    + if (__builtin_constant_p(size)) {
    + unsigned int index;

    - if (!i)
    + if (size > KMALLOC_MAX_CACHE_SIZE)
    + return kmalloc_large_node(size, flags, node);
    +
    + index = kmalloc_index(size);
    +
    + if (!index)
    return ZERO_SIZE_PTR;

    return kmem_cache_alloc_node_trace(
    - kmalloc_caches[kmalloc_type(flags)][i],
    + kmalloc_caches[kmalloc_type(flags)][index],
    flags, node, size);
    }
    -#endif
    return __kmalloc_node(size, flags, node);
    }
    +#else
    +static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
    +{
    + if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
    + return kmalloc_large_node(size, flags, node);
    +
    + return __kmalloc_node(size, flags, node);
    +}
    +#endif

    /**
    * kmalloc_array - allocate memory for an array.
    diff --git a/mm/slab.h b/mm/slab.h
    index 4ec82bec15ec..40322bcf07be 100644
    --- a/mm/slab.h
    +++ b/mm/slab.h
    @@ -273,6 +273,8 @@ void create_kmalloc_caches(slab_flags_t);

    /* Find the kmalloc slab corresponding for a certain size */
    struct kmem_cache *kmalloc_slab(size_t, gfp_t);
    +
    +void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
    #endif

    gfp_t kmalloc_fix_flags(gfp_t flags);
    diff --git a/mm/slab_common.c b/mm/slab_common.c
    index 1b9101f9cb21..7a0942d54424 100644
    --- a/mm/slab_common.c
    +++ b/mm/slab_common.c
    @@ -928,7 +928,7 @@ void *kmalloc_large(size_t size, gfp_t flags)
    }
    EXPORT_SYMBOL(kmalloc_large);

    -void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    +void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    {
    struct page *page;
    void *ptr = NULL;
    @@ -948,6 +948,15 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)

    return ptr;
    }
    +
    +void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    +{
    + void *ret = kmalloc_large_node_notrace(size, flags, node);
    +
    + trace_kmalloc_node(_RET_IP_, ret, NULL, size,
    + PAGE_SIZE << get_order(size), flags, node);
    + return ret;
    +}
    EXPORT_SYMBOL(kmalloc_large_node);

    #ifdef CONFIG_SLAB_FREELIST_RANDOM
    diff --git a/mm/slub.c b/mm/slub.c
    index 5e7819ade2c4..165fe87af204 100644
    --- a/mm/slub.c
    +++ b/mm/slub.c
    @@ -4401,7 +4401,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
    void *ret;

    if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
    - ret = kmalloc_large_node(size, flags, node);
    + ret = kmalloc_large_node_notrace(size, flags, node);

    trace_kmalloc_node(caller, ret, NULL,
    size, PAGE_SIZE << get_order(size),
    --
    2.32.0
    \
     
     \ /
      Last update: 2022-08-17 12:21    [W:7.870 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site