lkml.org 
[lkml]   [2019]   [Oct]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH 07/18] scs: add accounting
    From
    This change adds accounting for the memory allocated for shadow stacks.

    Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
    ---
    drivers/base/node.c | 6 ++++++
    fs/proc/meminfo.c | 4 ++++
    include/linux/mmzone.h | 3 +++
    kernel/scs.c | 20 ++++++++++++++++++++
    mm/page_alloc.c | 6 ++++++
    mm/vmstat.c | 3 +++
    6 files changed, 42 insertions(+)

    diff --git a/drivers/base/node.c b/drivers/base/node.c
    index 296546ffed6c..111e58ec231e 100644
    --- a/drivers/base/node.c
    +++ b/drivers/base/node.c
    @@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
    "Node %d AnonPages: %8lu kB\n"
    "Node %d Shmem: %8lu kB\n"
    "Node %d KernelStack: %8lu kB\n"
    +#ifdef CONFIG_SHADOW_CALL_STACK
    + "Node %d ShadowCallStack:%8lu kB\n"
    +#endif
    "Node %d PageTables: %8lu kB\n"
    "Node %d NFS_Unstable: %8lu kB\n"
    "Node %d Bounce: %8lu kB\n"
    @@ -438,6 +441,9 @@ static ssize_t node_read_meminfo(struct device *dev,
    nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
    nid, K(i.sharedram),
    nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
    +#ifdef CONFIG_SHADOW_CALL_STACK
    + nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_BYTES) / 1024,
    +#endif
    nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
    nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
    nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
    diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
    index ac9247371871..df352e4bab90 100644
    --- a/fs/proc/meminfo.c
    +++ b/fs/proc/meminfo.c
    @@ -103,6 +103,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
    show_val_kb(m, "SUnreclaim: ", sunreclaim);
    seq_printf(m, "KernelStack: %8lu kB\n",
    global_zone_page_state(NR_KERNEL_STACK_KB));
    +#ifdef CONFIG_SHADOW_CALL_STACK
    + seq_printf(m, "ShadowCallStack:%8lu kB\n",
    + global_zone_page_state(NR_KERNEL_SCS_BYTES) / 1024);
    +#endif
    show_val_kb(m, "PageTables: ",
    global_zone_page_state(NR_PAGETABLE));

    diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
    index bda20282746b..fcb8c1708f9e 100644
    --- a/include/linux/mmzone.h
    +++ b/include/linux/mmzone.h
    @@ -200,6 +200,9 @@ enum zone_stat_item {
    NR_MLOCK, /* mlock()ed pages found and moved off LRU */
    NR_PAGETABLE, /* used for pagetables */
    NR_KERNEL_STACK_KB, /* measured in KiB */
    +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
    + NR_KERNEL_SCS_BYTES, /* measured in bytes */
    +#endif
    /* Second 128 byte cacheline */
    NR_BOUNCE,
    #if IS_ENABLED(CONFIG_ZSMALLOC)
    diff --git a/kernel/scs.c b/kernel/scs.c
    index 47324e8d313b..0e3cba49ea1a 100644
    --- a/kernel/scs.c
    +++ b/kernel/scs.c
    @@ -7,9 +7,11 @@

    #include <linux/cpuhotplug.h>
    #include <linux/mm.h>
    +#include <linux/mmzone.h>
    #include <linux/slab.h>
    #include <linux/scs.h>
    #include <linux/vmalloc.h>
    +#include <linux/vmstat.h>
    #include <asm/scs.h>

    #define SCS_END_MAGIC 0xaf0194819b1635f6UL
    @@ -59,6 +61,11 @@ static void scs_free(void *s)
    vfree_atomic(s);
    }

    +static struct page *__scs_page(struct task_struct *tsk)
    +{
    + return vmalloc_to_page(__scs_base(tsk));
    +}
    +
    static int scs_cleanup(unsigned int cpu)
    {
    int i;
    @@ -92,6 +99,11 @@ static inline void scs_free(void *s)
    kmem_cache_free(scs_cache, s);
    }

    +static struct page *__scs_page(struct task_struct *tsk)
    +{
    + return virt_to_page(__scs_base(tsk));
    +}
    +
    void __init scs_init(void)
    {
    scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, SCS_SIZE,
    @@ -128,6 +140,12 @@ void scs_set_init_magic(struct task_struct *tsk)
    scs_load(tsk);
    }

    +static void scs_account(struct task_struct *tsk, int account)
    +{
    + mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_BYTES,
    + account * SCS_SIZE);
    +}
    +
    int scs_prepare(struct task_struct *tsk, int node)
    {
    void *s;
    @@ -138,6 +156,7 @@ int scs_prepare(struct task_struct *tsk, int node)

    task_set_scs(tsk, s);
    scs_set_magic(tsk);
    + scs_account(tsk, 1);

    return 0;
    }
    @@ -157,6 +176,7 @@ void scs_release(struct task_struct *tsk)

    WARN_ON(scs_corrupted(tsk));

    + scs_account(tsk, -1);
    scs_task_init(tsk);
    scs_free(s);
    }
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index ecc3dbad606b..fe17d69d98a7 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -5361,6 +5361,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
    " managed:%lukB"
    " mlocked:%lukB"
    " kernel_stack:%lukB"
    +#ifdef CONFIG_SHADOW_CALL_STACK
    + " shadow_call_stack:%lukB"
    +#endif
    " pagetables:%lukB"
    " bounce:%lukB"
    " free_pcp:%lukB"
    @@ -5382,6 +5385,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
    K(zone_managed_pages(zone)),
    K(zone_page_state(zone, NR_MLOCK)),
    zone_page_state(zone, NR_KERNEL_STACK_KB),
    +#ifdef CONFIG_SHADOW_CALL_STACK
    + zone_page_state(zone, NR_KERNEL_SCS_BYTES) / 1024,
    +#endif
    K(zone_page_state(zone, NR_PAGETABLE)),
    K(zone_page_state(zone, NR_BOUNCE)),
    K(free_pcp),
    diff --git a/mm/vmstat.c b/mm/vmstat.c
    index 6afc892a148a..9fe4afe670fe 100644
    --- a/mm/vmstat.c
    +++ b/mm/vmstat.c
    @@ -1118,6 +1118,9 @@ const char * const vmstat_text[] = {
    "nr_mlock",
    "nr_page_table_pages",
    "nr_kernel_stack",
    +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
    + "nr_shadow_call_stack_bytes",
    +#endif
    "nr_bounce",
    #if IS_ENABLED(CONFIG_ZSMALLOC)
    "nr_zspages",
    --
    2.23.0.866.gb869b98d4c-goog
    \
     
     \ /
      Last update: 2019-10-18 18:12    [W:4.239 / U:0.000 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site