lkml.org 
[lkml]   [2020]   [Apr]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 04/19] mm: slub: implement SLUB version of obj_to_index()
    Date
    This commit implements SLUB version of the obj_to_index() function,
    which will be required to calculate the offset of obj_cgroup in the
    obj_cgroups vector to store/obtain the objcg ownership data.

    To make it faster, let's repeat the SLAB's trick introduced by
    commit 6a2d7a955d8d ("[PATCH] SLAB: use a multiply instead of a
    divide in obj_to_index()") and avoid an expensive division.

    Signed-off-by: Roman Gushchin <guro@fb.com>
    Acked-by: Christoph Lameter <cl@linux.com>
    Acked-by: Johannes Weiner <hannes@cmpxchg.org>
    ---
    include/linux/slub_def.h | 9 +++++++++
    mm/slub.c | 1 +
    2 files changed, 10 insertions(+)

    diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
    index d2153789bd9f..200ea292f250 100644
    --- a/include/linux/slub_def.h
    +++ b/include/linux/slub_def.h
    @@ -8,6 +8,7 @@
    * (C) 2007 SGI, Christoph Lameter
    */
    #include <linux/kobject.h>
    +#include <linux/reciprocal_div.h>

    enum stat_item {
    ALLOC_FASTPATH, /* Allocation from cpu slab */
    @@ -86,6 +87,7 @@ struct kmem_cache {
    unsigned long min_partial;
    unsigned int size; /* The size of an object including metadata */
    unsigned int object_size;/* The size of an object without metadata */
    + struct reciprocal_value reciprocal_size;
    unsigned int offset; /* Free pointer offset */
    #ifdef CONFIG_SLUB_CPU_PARTIAL
    /* Number of per cpu partial objects to keep around */
    @@ -182,4 +184,11 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
    return result;
    }

    +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
    + const struct page *page, void *obj)
    +{
    + return reciprocal_divide(kasan_reset_tag(obj) - page_address(page),
    + cache->reciprocal_size);
    +}
    +
    #endif /* _LINUX_SLUB_DEF_H */
    diff --git a/mm/slub.c b/mm/slub.c
    index 03071ae5ff07..8d16babe1829 100644
    --- a/mm/slub.c
    +++ b/mm/slub.c
    @@ -3660,6 +3660,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
    */
    size = ALIGN(size, s->align);
    s->size = size;
    + s->reciprocal_size = reciprocal_value(size);
    if (forced_order >= 0)
    order = forced_order;
    else
    --
    2.25.3
    \
     
     \ /
      Last update: 2020-04-22 22:47    [W:4.150 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site