lkml.org 
[lkml]   [2021]   [Sep]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 3/8] slab: Clean up function prototypes
    Date
    Based on feedback from Joe Perches and Linus Torvalds, regularize the
    slab function prototypes before making attribute changes.

    Cc: Christoph Lameter <cl@linux.com>
    Cc: Pekka Enberg <penberg@kernel.org>
    Cc: David Rientjes <rientjes@google.com>
    Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Vlastimil Babka <vbabka@suse.cz>
    Cc: linux-mm@kvack.org
    Signed-off-by: Kees Cook <keescook@chromium.org>
    ---
    include/linux/slab.h | 68 ++++++++++++++++++++++----------------------
    1 file changed, 34 insertions(+), 34 deletions(-)

    diff --git a/include/linux/slab.h b/include/linux/slab.h
    index 083f3ce550bc..d9f14125d7a2 100644
    --- a/include/linux/slab.h
    +++ b/include/linux/slab.h
    @@ -152,8 +152,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
    slab_flags_t flags,
    unsigned int useroffset, unsigned int usersize,
    void (*ctor)(void *));
    -void kmem_cache_destroy(struct kmem_cache *);
    -int kmem_cache_shrink(struct kmem_cache *);
    +void kmem_cache_destroy(struct kmem_cache *s);
    +int kmem_cache_shrink(struct kmem_cache *s);

    /*
    * Please use this macro to create slab caches. Simply specify the
    @@ -181,11 +181,11 @@ int kmem_cache_shrink(struct kmem_cache *);
    /*
    * Common kmalloc functions provided by all allocators
    */
    -void * __must_check krealloc(const void *, size_t, gfp_t);
    -void kfree(const void *);
    -void kfree_sensitive(const void *);
    -size_t __ksize(const void *);
    -size_t ksize(const void *);
    +void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags);
    +void kfree(const void *objp);
    +void kfree_sensitive(const void *objp);
    +size_t __ksize(const void *objp);
    +size_t ksize(const void *objp);
    #ifdef CONFIG_PRINTK
    bool kmem_valid_obj(void *object);
    void kmem_dump_obj(void *object);
    @@ -426,8 +426,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
    #endif /* !CONFIG_SLOB */

    void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
    -void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
    -void kmem_cache_free(struct kmem_cache *, void *);
    +void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
    +void kmem_cache_free(struct kmem_cache *s, void *objp);

    /*
    * Bulk allocation and freeing operations. These are accelerated in an
    @@ -436,8 +436,8 @@ void kmem_cache_free(struct kmem_cache *, void *);
    *
    * Note that interrupts must be enabled when calling these functions.
    */
    -void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
    -int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
    +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
    +int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);

    /*
    * Caller must not use kfree_bulk() on memory not originally allocated
    @@ -450,7 +450,8 @@ static __always_inline void kfree_bulk(size_t size, void **p)

    #ifdef CONFIG_NUMA
    void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
    -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
    +void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
    + __malloc;
    #else
    static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
    {
    @@ -464,25 +465,24 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
    #endif

    #ifdef CONFIG_TRACING
    -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
    +extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
    + __assume_slab_alignment __malloc;

    #ifdef CONFIG_NUMA
    -extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
    - gfp_t gfpflags,
    - int node, size_t size) __assume_slab_alignment __malloc;
    +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
    + int node, size_t size) __assume_slab_alignment __malloc;
    #else
    -static __always_inline void *
    -kmem_cache_alloc_node_trace(struct kmem_cache *s,
    - gfp_t gfpflags,
    - int node, size_t size)
    +static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
    + gfp_t gfpflags, int node,
    + size_t size)
    {
    return kmem_cache_alloc_trace(s, gfpflags, size);
    }
    #endif /* CONFIG_NUMA */

    #else /* CONFIG_TRACING */
    -static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
    - gfp_t flags, size_t size)
    +static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags,
    + size_t size)
    {
    void *ret = kmem_cache_alloc(s, flags);

    @@ -490,10 +490,8 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
    return ret;
    }

    -static __always_inline void *
    -kmem_cache_alloc_node_trace(struct kmem_cache *s,
    - gfp_t gfpflags,
    - int node, size_t size)
    +static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
    + int node, size_t size)
    {
    void *ret = kmem_cache_alloc_node(s, gfpflags, node);

    @@ -502,13 +500,14 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
    }
    #endif /* CONFIG_TRACING */

    -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
    +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
    + __malloc;

    #ifdef CONFIG_TRACING
    -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
    +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
    + __assume_page_alignment __malloc;
    #else
    -static __always_inline void *
    -kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
    +static __always_inline void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
    {
    return kmalloc_order(size, flags, order);
    }
    @@ -638,8 +637,8 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
    * @new_size: new size of a single member of the array
    * @flags: the type of memory to allocate (see kmalloc)
    */
    -static __must_check inline void *
    -krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
    +static inline void * __must_check krealloc_array(void *p, size_t new_n, size_t new_size,
    + gfp_t flags)
    {
    size_t bytes;

    @@ -668,7 +667,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
    * allocator where we care about the real place the memory allocation
    * request comes from.
    */
    -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
    +extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
    #define kmalloc_track_caller(size, flags) \
    __kmalloc_track_caller(size, flags, _RET_IP_)

    @@ -691,7 +690,8 @@ static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)


    #ifdef CONFIG_NUMA
    -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
    +extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
    + unsigned long caller);
    #define kmalloc_node_track_caller(size, flags, node) \
    __kmalloc_node_track_caller(size, flags, node, \
    _RET_IP_)
    --
    2.30.2
    \
     
     \ /
      Last update: 2021-10-01 00:28    [W:3.888 / U:0.000 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site