lkml.org 
[lkml]   [2022]   [Aug]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 09/17] mm/slab_common: cleanup kmalloc_large()
    Date
    Now that kmalloc_large() and kmalloc_large_node() do mostly same job,
    make kmalloc_large() wrapper of kmalloc_large_node_notrace().

    In the meantime, add missing flag fix code in
    kmalloc_large_node_notrace().

    Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
    Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    mm/slab_common.c | 35 +++++++++++++----------------------
    1 file changed, 13 insertions(+), 22 deletions(-)

    diff --git a/mm/slab_common.c b/mm/slab_common.c
    index 7a0942d54424..51ccd0545816 100644
    --- a/mm/slab_common.c
    +++ b/mm/slab_common.c
    @@ -905,28 +905,6 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
    * directly to the page allocator. We use __GFP_COMP, because we will need to
    * know the allocation order to free the pages properly in kfree.
    */
    -void *kmalloc_large(size_t size, gfp_t flags)
    -{
    - void *ret = NULL;
    - struct page *page;
    - unsigned int order = get_order(size);
    -
    - if (unlikely(flags & GFP_SLAB_BUG_MASK))
    - flags = kmalloc_fix_flags(flags);
    -
    - page = alloc_pages(flags | __GFP_COMP, order);
    - if (likely(page)) {
    - ret = page_address(page);
    - mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
    - PAGE_SIZE << order);
    - }
    - ret = kasan_kmalloc_large(ret, size, flags);
    - /* As ret might get tagged, call kmemleak hook after KASAN. */
    - kmemleak_alloc(ret, size, 1, flags);
    - trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
    - return ret;
    -}
    -EXPORT_SYMBOL(kmalloc_large);

    void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    {
    @@ -934,6 +912,9 @@ void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    void *ptr = NULL;
    unsigned int order = get_order(size);

    + if (unlikely(flags & GFP_SLAB_BUG_MASK))
    + flags = kmalloc_fix_flags(flags);
    +
    flags |= __GFP_COMP;
    page = alloc_pages_node(node, flags, order);
    if (page) {
    @@ -949,6 +930,16 @@ void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    return ptr;
    }

    +void *kmalloc_large(size_t size, gfp_t flags)
    +{
    + void *ret = kmalloc_large_node_notrace(size, flags, NUMA_NO_NODE);
    +
    + trace_kmalloc(_RET_IP_, ret, NULL, size,
    + PAGE_SIZE << get_order(size), flags);
    + return ret;
    +}
    +EXPORT_SYMBOL(kmalloc_large);
    +
    void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    {
    void *ret = kmalloc_large_node_notrace(size, flags, node);
    --
    2.32.0
    \
     
     \ /
      Last update: 2022-08-17 12:21    [W:2.188 / U:1.140 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site