lkml.org 
[lkml]   [2022]   [Jul]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH mm v2 12/33] kasan: introduce kasan_init_cache_meta
    Date
    From: Andrey Konovalov <andreyknvl@google.com>

    Add a kasan_init_cache_meta() helper that initializes metadata-related
    cache parameters and use this helper in the common KASAN code.

    Put the implementation of this new helper into generic.c, as only the
    Generic mode uses per-object metadata.

    Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
    ---
    mm/kasan/common.c | 80 ++--------------------------------------------
    mm/kasan/generic.c | 79 +++++++++++++++++++++++++++++++++++++++++++++
    mm/kasan/kasan.h | 2 ++
    3 files changed, 83 insertions(+), 78 deletions(-)

    diff --git a/mm/kasan/common.c b/mm/kasan/common.c
    index d2ec4e6af675..83a04834746f 100644
    --- a/mm/kasan/common.c
    +++ b/mm/kasan/common.c
    @@ -117,28 +117,9 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
    KASAN_PAGE_FREE, init);
    }

    -/*
    - * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
    - * For larger allocations larger redzones are used.
    - */
    -static inline unsigned int optimal_redzone(unsigned int object_size)
    -{
    - return
    - object_size <= 64 - 16 ? 16 :
    - object_size <= 128 - 32 ? 32 :
    - object_size <= 512 - 64 ? 64 :
    - object_size <= 4096 - 128 ? 128 :
    - object_size <= (1 << 14) - 256 ? 256 :
    - object_size <= (1 << 15) - 512 ? 512 :
    - object_size <= (1 << 16) - 1024 ? 1024 : 2048;
    -}
    -
    void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
    slab_flags_t *flags)
    {
    - unsigned int ok_size;
    - unsigned int optimal_size;
    -
    /*
    * SLAB_KASAN is used to mark caches as ones that are sanitized by
    * KASAN. Currently this flag is used in two places:
    @@ -148,65 +129,8 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
    */
    *flags |= SLAB_KASAN;

    - if (!kasan_requires_meta())
    - return;
    -
    - ok_size = *size;
    -
    - /* Add alloc meta into redzone. */
    - cache->kasan_info.alloc_meta_offset = *size;
    - *size += sizeof(struct kasan_alloc_meta);
    -
    - /*
    - * If alloc meta doesn't fit, don't add it.
    - * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
    - * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
    - * larger sizes.
    - */
    - if (*size > KMALLOC_MAX_SIZE) {
    - cache->kasan_info.alloc_meta_offset = 0;
    - *size = ok_size;
    - /* Continue, since free meta might still fit. */
    - }
    -
    - /* Only the generic mode uses free meta or flexible redzones. */
    - if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
    - cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
    - return;
    - }
    -
    - /*
    - * Add free meta into redzone when it's not possible to store
    - * it in the object. This is the case when:
    - * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
    - * be touched after it was freed, or
    - * 2. Object has a constructor, which means it's expected to
    - * retain its content until the next allocation, or
    - * 3. Object is too small.
    - * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
    - */
    - if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
    - cache->object_size < sizeof(struct kasan_free_meta)) {
    - ok_size = *size;
    -
    - cache->kasan_info.free_meta_offset = *size;
    - *size += sizeof(struct kasan_free_meta);
    -
    - /* If free meta doesn't fit, don't add it. */
    - if (*size > KMALLOC_MAX_SIZE) {
    - cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
    - *size = ok_size;
    - }
    - }
    -
    - /* Calculate size with optimal redzone. */
    - optimal_size = cache->object_size + optimal_redzone(cache->object_size);
    - /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
    - if (optimal_size > KMALLOC_MAX_SIZE)
    - optimal_size = KMALLOC_MAX_SIZE;
    - /* Use optimal size if the size with added metas is not large enough. */
    - if (*size < optimal_size)
    - *size = optimal_size;
    + if (kasan_requires_meta())
    + kasan_init_cache_meta(cache, size);
    }

    void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
    diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
    index fa654cb96a0d..73aea784040a 100644
    --- a/mm/kasan/generic.c
    +++ b/mm/kasan/generic.c
    @@ -328,6 +328,85 @@ DEFINE_ASAN_SET_SHADOW(f3);
    DEFINE_ASAN_SET_SHADOW(f5);
    DEFINE_ASAN_SET_SHADOW(f8);

    +/*
    + * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
    + * For larger allocations larger redzones are used.
    + */
    +static inline unsigned int optimal_redzone(unsigned int object_size)
    +{
    + return
    + object_size <= 64 - 16 ? 16 :
    + object_size <= 128 - 32 ? 32 :
    + object_size <= 512 - 64 ? 64 :
    + object_size <= 4096 - 128 ? 128 :
    + object_size <= (1 << 14) - 256 ? 256 :
    + object_size <= (1 << 15) - 512 ? 512 :
    + object_size <= (1 << 16) - 1024 ? 1024 : 2048;
    +}
    +
    +void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size)
    +{
    + unsigned int ok_size;
    + unsigned int optimal_size;
    +
    + ok_size = *size;
    +
    + /* Add alloc meta into redzone. */
    + cache->kasan_info.alloc_meta_offset = *size;
    + *size += sizeof(struct kasan_alloc_meta);
    +
    + /*
    + * If alloc meta doesn't fit, don't add it.
    + * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
    + * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
    + * larger sizes.
    + */
    + if (*size > KMALLOC_MAX_SIZE) {
    + cache->kasan_info.alloc_meta_offset = 0;
    + *size = ok_size;
    + /* Continue, since free meta might still fit. */
    + }
    +
    + /* Only the generic mode uses free meta or flexible redzones. */
    + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
    + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
    + return;
    + }
    +
    + /*
    + * Add free meta into redzone when it's not possible to store
    + * it in the object. This is the case when:
    + * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
    + * be touched after it was freed, or
    + * 2. Object has a constructor, which means it's expected to
    + * retain its content until the next allocation, or
    + * 3. Object is too small.
    + * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
    + */
    + if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
    + cache->object_size < sizeof(struct kasan_free_meta)) {
    + ok_size = *size;
    +
    + cache->kasan_info.free_meta_offset = *size;
    + *size += sizeof(struct kasan_free_meta);
    +
    + /* If free meta doesn't fit, don't add it. */
    + if (*size > KMALLOC_MAX_SIZE) {
    + cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
    + *size = ok_size;
    + }
    + }
    +
    + /* Calculate size with optimal redzone. */
    + optimal_size = cache->object_size + optimal_redzone(cache->object_size);
    + /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
    + if (optimal_size > KMALLOC_MAX_SIZE)
    + optimal_size = KMALLOC_MAX_SIZE;
    + /* Use optimal size if the size with added metas is not large enough. */
    + if (*size < optimal_size)
    + *size = optimal_size;
    +}
    +
    struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
    const void *object)
    {
    diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
    index 1736abd661b6..6da35370ba37 100644
    --- a/mm/kasan/kasan.h
    +++ b/mm/kasan/kasan.h
    @@ -297,12 +297,14 @@ struct page *kasan_addr_to_page(const void *addr);
    struct slab *kasan_addr_to_slab(const void *addr);

    #ifdef CONFIG_KASAN_GENERIC
    +void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
    void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
    struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
    const void *object);
    struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
    const void *object);
    #else
    +static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
    static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
    #endif

    --
    2.25.1
    \
     
     \ /
      Last update: 2022-07-19 02:15    [W:2.696 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site