lkml.org 
[lkml]   [2014]   [Sep]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC/PATCH v2 03/10] mm: page_alloc: add kasan hooks on alloc and free pathes
    Date
    Add kernel address sanitizer hooks to mark allocated page's addresses
    as accessible in corresponding shadow region.
    Mark freed pages as unaccessible.

    Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
    ---
    include/linux/kasan.h | 6 ++++++
    mm/compaction.c | 2 ++
    mm/kasan/kasan.c | 14 ++++++++++++++
    mm/kasan/kasan.h | 1 +
    mm/kasan/report.c | 7 +++++++
    mm/page_alloc.c | 3 +++
    6 files changed, 33 insertions(+)

    diff --git a/include/linux/kasan.h b/include/linux/kasan.h
    index f957ee9..c5ae971 100644
    --- a/include/linux/kasan.h
    +++ b/include/linux/kasan.h
    @@ -31,6 +31,9 @@ static inline void kasan_disable_local(void)
    void kasan_unpoison_shadow(const void *address, size_t size);
    void kasan_map_shadow(void);

    +void kasan_alloc_pages(struct page *page, unsigned int order);
    +void kasan_free_pages(struct page *page, unsigned int order);
    +
    #else /* CONFIG_KASAN */

    static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
    @@ -40,6 +43,9 @@ static inline void kasan_disable_local(void) {}

    static inline void kasan_map_shadow(void) {}

    +static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
    +static inline void kasan_free_pages(struct page *page, unsigned int order) {}
    +
    #endif /* CONFIG_KASAN */

    #endif /* LINUX_KASAN_H */
    diff --git a/mm/compaction.c b/mm/compaction.c
    index 7d9d92e..a8c5d6d 100644
    --- a/mm/compaction.c
    +++ b/mm/compaction.c
    @@ -16,6 +16,7 @@
    #include <linux/sysfs.h>
    #include <linux/balloon_compaction.h>
    #include <linux/page-isolation.h>
    +#include <linux/kasan.h>
    #include "internal.h"

    #ifdef CONFIG_COMPACTION
    @@ -59,6 +60,7 @@ static void map_pages(struct list_head *list)
    list_for_each_entry(page, list, lru) {
    arch_alloc_page(page, 0);
    kernel_map_pages(page, 1, 1);
    + kasan_alloc_pages(page, 0);
    }
    }

    diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
    index 65f8145..ed4e925 100644
    --- a/mm/kasan/kasan.c
    +++ b/mm/kasan/kasan.c
    @@ -109,6 +109,20 @@ static __always_inline void check_memory_region(unsigned long addr,
    kasan_report_error(&info);
    }

    +void kasan_alloc_pages(struct page *page, unsigned int order)
    +{
    + if (likely(!PageHighMem(page)))
    + kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
    +}
    +
    +void kasan_free_pages(struct page *page, unsigned int order)
    +{
    + if (likely(!PageHighMem(page)))
    + kasan_poison_shadow(page_address(page),
    + PAGE_SIZE << order,
    + KASAN_FREE_PAGE);
    +}
    +
    void __asan_load1(unsigned long addr)
    {
    check_memory_region(addr, 1, false);
    diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
    index 2ea2ed7..227e9c6 100644
    --- a/mm/kasan/kasan.h
    +++ b/mm/kasan/kasan.h
    @@ -6,6 +6,7 @@
    #define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
    #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)

    +#define KASAN_FREE_PAGE 0xFF /* page was freed */
    #define KASAN_SHADOW_GAP 0xF9 /* address belongs to shadow memory */

    struct access_info {
    diff --git a/mm/kasan/report.c b/mm/kasan/report.c
    index 3bfc8b6..94d79e7 100644
    --- a/mm/kasan/report.c
    +++ b/mm/kasan/report.c
    @@ -57,6 +57,9 @@ static void print_error_description(struct access_info *info)
    case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
    bug_type = "out of bounds access";
    break;
    + case KASAN_FREE_PAGE:
    + bug_type = "use after free";
    + break;
    case KASAN_SHADOW_GAP:
    bug_type = "wild memory access";
    break;
    @@ -78,6 +81,10 @@ static void print_address_description(struct access_info *info)
    page = virt_to_head_page((void *)info->access_addr);

    switch (shadow_val) {
    + case KASAN_FREE_PAGE:
    + dump_page(page, "kasan error");
    + dump_stack();
    + break;
    case KASAN_SHADOW_GAP:
    pr_err("No metainfo is available for this access.\n");
    dump_stack();
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index 3935c9a..63c55c9 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -61,6 +61,7 @@
    #include <linux/page-debug-flags.h>
    #include <linux/hugetlb.h>
    #include <linux/sched/rt.h>
    +#include <linux/kasan.h>

    #include <asm/sections.h>
    #include <asm/tlbflush.h>
    @@ -753,6 +754,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)

    trace_mm_page_free(page, order);
    kmemcheck_free_shadow(page, order);
    + kasan_free_pages(page, order);

    if (PageAnon(page))
    page->mapping = NULL;
    @@ -932,6 +934,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)

    arch_alloc_page(page, order);
    kernel_map_pages(page, 1 << order, 1);
    + kasan_alloc_pages(page, order);

    if (gfp_flags & __GFP_ZERO)
    prep_zero_page(page, order, gfp_flags);
    --
    1.8.5.5


    \
     
     \ /
      Last update: 2014-09-10 17:21    [W:4.319 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site