lkml.org 
[lkml]   [2012]   [Jul]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH 3/4] SLAB: minor code cleanup
    Date
    Minor code cleanup for SLAB allocator.

    Signed-off-by: Jiang Liu <liuj97@gmail.com>
    ---
    mm/slab.c | 26 ++++++--------------------
    1 files changed, 6 insertions(+), 20 deletions(-)

    diff --git a/mm/slab.c b/mm/slab.c
    index e901a36..cd163d1 100644
    --- a/mm/slab.c
    +++ b/mm/slab.c
    @@ -499,34 +499,23 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
    page->lru.next = (struct list_head *)cache;
    }

    -static inline struct kmem_cache *page_get_cache(struct page *page)
    -{
    - page = compound_head(page);
    - BUG_ON(!PageSlab(page));
    - return (struct kmem_cache *)page->lru.next;
    -}
    -
    static inline void page_set_slab(struct page *page, struct slab *slab)
    {
    page->lru.prev = (struct list_head *)slab;
    }

    -static inline struct slab *page_get_slab(struct page *page)
    -{
    - BUG_ON(!PageSlab(page));
    - return (struct slab *)page->lru.prev;
    -}
    -
    static inline struct kmem_cache *virt_to_cache(const void *obj)
    {
    struct page *page = virt_to_head_page(obj);
    - return page_get_cache(page);
    + BUG_ON(!PageSlab(page));
    + return (struct kmem_cache *)page->lru.next;
    }

    static inline struct slab *virt_to_slab(const void *obj)
    {
    struct page *page = virt_to_head_page(obj);
    - return page_get_slab(page);
    + BUG_ON(!PageSlab(page));
    + return (struct slab *)page->lru.prev;
    }

    static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
    @@ -3047,7 +3036,6 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
    static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
    void *caller)
    {
    - struct page *page;
    unsigned int objnr;
    struct slab *slabp;

    @@ -3055,9 +3043,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,

    objp -= obj_offset(cachep);
    kfree_debugcheck(objp);
    - page = virt_to_head_page(objp);
    -
    - slabp = page_get_slab(page);
    + slabp = virt_to_slab(objp);

    if (cachep->flags & SLAB_RED_ZONE) {
    verify_redzone_free(cachep, objp);
    @@ -3261,7 +3247,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
    struct slab *slabp;
    unsigned objnr;

    - slabp = page_get_slab(virt_to_head_page(objp));
    + slabp = virt_to_slab(objp);
    objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
    slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
    }
    --
    1.7.1



    \
     
     \ /
      Last update: 2012-07-03 06:42    [W:3.114 / U:0.060 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site