lkml.org 
[lkml]   [2018]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v13 01/25] kasan, mm: change hooks signatures
Date
Tag-based KASAN changes the value of the top byte of pointers returned
from the kernel allocation functions (such as kmalloc). This patch updates
KASAN hooks signatures and their usage in SLAB and SLUB code to reflect
that.

Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
include/linux/kasan.h | 43 +++++++++++++++++++++++++++++--------------
include/linux/slab.h | 4 ++--
mm/kasan/kasan.c | 30 ++++++++++++++++++------------
mm/slab.c | 12 ++++++------
mm/slab.h | 2 +-
mm/slab_common.c | 4 ++--
mm/slub.c | 15 +++++++--------
7 files changed, 65 insertions(+), 45 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 46aae129917c..52c86a568a4e 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -51,16 +51,16 @@ void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
+void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object);

-void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(void *ptr, unsigned long ip);
void kasan_poison_kfree(void *ptr, unsigned long ip);
-void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
+void *kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
-void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
+void *kasan_krealloc(const void *object, size_t new_size, gfp_t flags);

-void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);

struct kasan_cache {
@@ -105,19 +105,34 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_init_slab_obj(struct kmem_cache *cache,
- const void *object) {}
+static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object)
+{
+ return (void *)object;
+}

-static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
+static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
+{
+ return ptr;
+}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
-static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size, gfp_t flags) {}
-static inline void kasan_krealloc(const void *object, size_t new_size,
- gfp_t flags) {}
+static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags)
+{
+ return (void *)object;
+}
+static inline void *kasan_krealloc(const void *object, size_t new_size,
+ gfp_t flags)
+{
+ return (void *)object;
+}

-static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags) {}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+ return object;
+}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
unsigned long ip)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 918f374e7156..351ac48dabc4 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -444,7 +444,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc(s, flags);

- kasan_kmalloc(s, ret, size, flags);
+ ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}

@@ -455,7 +455,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);

- kasan_kmalloc(s, ret, size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index c3bd5209da38..55deff17a4d9 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -474,20 +474,22 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
return (void *)object + cache->kasan_info.free_meta_offset;
}

-void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
+void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_info;

if (!(cache->flags & SLAB_KASAN))
- return;
+ return (void *)object;

alloc_info = get_alloc_info(cache, object);
__memset(alloc_info, 0, sizeof(*alloc_info));
+
+ return (void *)object;
}

-void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
+void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
- kasan_kmalloc(cache, object, cache->object_size, flags);
+ return kasan_kmalloc(cache, object, cache->object_size, flags);
}

static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
@@ -528,7 +530,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return __kasan_slab_free(cache, object, ip, true);
}

-void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
+void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
@@ -538,7 +540,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
quarantine_reduce();

if (unlikely(object == NULL))
- return;
+ return NULL;

redzone_start = round_up((unsigned long)(object + size),
KASAN_SHADOW_SCALE_SIZE);
@@ -551,10 +553,12 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,

if (cache->flags & SLAB_KASAN)
set_track(&get_alloc_info(cache, object)->alloc_track, flags);
+
+ return (void *)object;
}
EXPORT_SYMBOL(kasan_kmalloc);

-void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
struct page *page;
unsigned long redzone_start;
@@ -564,7 +568,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
quarantine_reduce();

if (unlikely(ptr == NULL))
- return;
+ return NULL;

page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
@@ -574,21 +578,23 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
kasan_unpoison_shadow(ptr, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE);
+
+ return (void *)ptr;
}

-void kasan_krealloc(const void *object, size_t size, gfp_t flags)
+void *kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
struct page *page;

if (unlikely(object == ZERO_SIZE_PTR))
- return;
+ return ZERO_SIZE_PTR;

page = virt_to_head_page(object);

if (unlikely(!PageSlab(page)))
- kasan_kmalloc_large(object, size, flags);
+ return kasan_kmalloc_large(object, size, flags);
else
- kasan_kmalloc(page->slab_cache, object, size, flags);
+ return kasan_kmalloc(page->slab_cache, object, size, flags);
}

void kasan_poison_kfree(void *ptr, unsigned long ip)
diff --git a/mm/slab.c b/mm/slab.c
index 2a5654bb3b3f..26f60a22e5e0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3551,7 +3551,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);

- kasan_slab_alloc(cachep, ret, flags);
+ ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);

@@ -3617,7 +3617,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)

ret = slab_alloc(cachep, flags, _RET_IP_);

- kasan_kmalloc(cachep, ret, size, flags);
+ ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
@@ -3641,7 +3641,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);

- kasan_slab_alloc(cachep, ret, flags);
+ ret = kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
@@ -3660,7 +3660,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,

ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);

- kasan_kmalloc(cachep, ret, size, flags);
+ ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
flags, nodeid);
@@ -3681,7 +3681,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
- kasan_kmalloc(cachep, ret, size, flags);
+ ret = kasan_kmalloc(cachep, ret, size, flags);

return ret;
}
@@ -3719,7 +3719,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep;
ret = slab_alloc(cachep, flags, caller);

- kasan_kmalloc(cachep, ret, size, flags);
+ ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
size, cachep->size, flags);

diff --git a/mm/slab.h b/mm/slab.h
index 58c6c1c2a78e..4190c24ef0e9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -441,7 +441,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,

kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags);
- kasan_slab_alloc(s, object, flags);
+ p[i] = kasan_slab_alloc(s, object, flags);
}

if (memcg_kmem_enabled())
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7eb8dc136c1c..5f3504e26d4c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1204,7 +1204,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
- kasan_kmalloc_large(ret, size, flags);
+ ret = kasan_kmalloc_large(ret, size, flags);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
@@ -1482,7 +1482,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
ks = ksize(p);

if (ks >= new_size) {
- kasan_krealloc((void *)p, new_size, flags);
+ p = kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}

diff --git a/mm/slub.c b/mm/slub.c
index e3629cd7aff1..fdd4a86aa882 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1372,10 +1372,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
-static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
- kasan_kmalloc_large(ptr, size, flags);
+ return kasan_kmalloc_large(ptr, size, flags);
}

static __always_inline void kfree_hook(void *x)
@@ -2768,7 +2768,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
- kasan_kmalloc(s, ret, size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2796,7 +2796,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);

- kasan_kmalloc(s, ret, size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -3784,7 +3784,7 @@ void *__kmalloc(size_t size, gfp_t flags)

trace_kmalloc(_RET_IP_, ret, size, s->size, flags);

- kasan_kmalloc(s, ret, size, flags);
+ ret = kasan_kmalloc(s, ret, size, flags);

return ret;
}
@@ -3801,8 +3801,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);

- kmalloc_large_node_hook(ptr, size, flags);
- return ptr;
+ return kmalloc_large_node_hook(ptr, size, flags);
}

void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3829,7 +3828,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)

trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);

- kasan_kmalloc(s, ret, size, flags);
+ ret = kasan_kmalloc(s, ret, size, flags);

return ret;
}
--
2.20.0.rc1.387.gf8505762e3-goog
\
 
 \ /
  Last update: 2018-12-06 13:25    [W:0.144 / U:9.500 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site