lkml.org 
[lkml]   [2019]   [Oct]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH v3 06/17] scs: add accounting
On Thu, Oct 31, 2019 at 09:46:26AM -0700, samitolvanen@google.com wrote:
> This change adds accounting for the memory allocated for shadow stacks.
>
> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>

A nice bit of stats to have.

Reviewed-by: Kees Cook <keescook@chromium.org>

-Kees

> ---
> drivers/base/node.c | 6 ++++++
> fs/proc/meminfo.c | 4 ++++
> include/linux/mmzone.h | 3 +++
> kernel/scs.c | 19 +++++++++++++++++++
> mm/page_alloc.c | 6 ++++++
> mm/vmstat.c | 3 +++
> 6 files changed, 41 insertions(+)
>
> diff --git a/drivers/base/node.c b/drivers/base/node.c
> index 296546ffed6c..111e58ec231e 100644
> --- a/drivers/base/node.c
> +++ b/drivers/base/node.c
> @@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
> "Node %d AnonPages: %8lu kB\n"
> "Node %d Shmem: %8lu kB\n"
> "Node %d KernelStack: %8lu kB\n"
> +#ifdef CONFIG_SHADOW_CALL_STACK
> + "Node %d ShadowCallStack:%8lu kB\n"
> +#endif
> "Node %d PageTables: %8lu kB\n"
> "Node %d NFS_Unstable: %8lu kB\n"
> "Node %d Bounce: %8lu kB\n"
> @@ -438,6 +441,9 @@ static ssize_t node_read_meminfo(struct device *dev,
> nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
> nid, K(i.sharedram),
> nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
> +#ifdef CONFIG_SHADOW_CALL_STACK
> + nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_BYTES) / 1024,
> +#endif
> nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
> nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
> nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
> diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
> index 8c1f1bb1a5ce..49768005a79e 100644
> --- a/fs/proc/meminfo.c
> +++ b/fs/proc/meminfo.c
> @@ -103,6 +103,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
> show_val_kb(m, "SUnreclaim: ", sunreclaim);
> seq_printf(m, "KernelStack: %8lu kB\n",
> global_zone_page_state(NR_KERNEL_STACK_KB));
> +#ifdef CONFIG_SHADOW_CALL_STACK
> + seq_printf(m, "ShadowCallStack:%8lu kB\n",
> + global_zone_page_state(NR_KERNEL_SCS_BYTES) / 1024);
> +#endif
> show_val_kb(m, "PageTables: ",
> global_zone_page_state(NR_PAGETABLE));
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index bda20282746b..fcb8c1708f9e 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -200,6 +200,9 @@ enum zone_stat_item {
> NR_MLOCK, /* mlock()ed pages found and moved off LRU */
> NR_PAGETABLE, /* used for pagetables */
> NR_KERNEL_STACK_KB, /* measured in KiB */
> +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
> + NR_KERNEL_SCS_BYTES, /* measured in bytes */
> +#endif
> /* Second 128 byte cacheline */
> NR_BOUNCE,
> #if IS_ENABLED(CONFIG_ZSMALLOC)
> diff --git a/kernel/scs.c b/kernel/scs.c
> index 7c1a40020754..7780fc4e29ac 100644
> --- a/kernel/scs.c
> +++ b/kernel/scs.c
> @@ -11,6 +11,7 @@
> #include <linux/scs.h>
> #include <linux/slab.h>
> #include <linux/vmalloc.h>
> +#include <linux/vmstat.h>
> #include <asm/scs.h>
>
> static inline void *__scs_base(struct task_struct *tsk)
> @@ -74,6 +75,11 @@ static void scs_free(void *s)
> vfree_atomic(s);
> }
>
> +static struct page *__scs_page(struct task_struct *tsk)
> +{
> + return vmalloc_to_page(__scs_base(tsk));
> +}
> +
> static int scs_cleanup(unsigned int cpu)
> {
> int i;
> @@ -107,6 +113,11 @@ static inline void scs_free(void *s)
> kmem_cache_free(scs_cache, s);
> }
>
> +static struct page *__scs_page(struct task_struct *tsk)
> +{
> + return virt_to_page(__scs_base(tsk));
> +}
> +
> void __init scs_init(void)
> {
> scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, SCS_SIZE,
> @@ -135,6 +146,12 @@ void scs_task_reset(struct task_struct *tsk)
> task_set_scs(tsk, __scs_base(tsk));
> }
>
> +static void scs_account(struct task_struct *tsk, int account)
> +{
> + mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_BYTES,
> + account * SCS_SIZE);
> +}
> +
> int scs_prepare(struct task_struct *tsk, int node)
> {
> void *s;
> @@ -145,6 +162,7 @@ int scs_prepare(struct task_struct *tsk, int node)
>
> task_set_scs(tsk, s);
> scs_set_magic(tsk);
> + scs_account(tsk, 1);
>
> return 0;
> }
> @@ -164,6 +182,7 @@ void scs_release(struct task_struct *tsk)
>
> WARN_ON(scs_corrupted(tsk));
>
> + scs_account(tsk, -1);
> task_set_scs(tsk, NULL);
> scs_free(s);
> }
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index ecc3dbad606b..fe17d69d98a7 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5361,6 +5361,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
> " managed:%lukB"
> " mlocked:%lukB"
> " kernel_stack:%lukB"
> +#ifdef CONFIG_SHADOW_CALL_STACK
> + " shadow_call_stack:%lukB"
> +#endif
> " pagetables:%lukB"
> " bounce:%lukB"
> " free_pcp:%lukB"
> @@ -5382,6 +5385,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
> K(zone_managed_pages(zone)),
> K(zone_page_state(zone, NR_MLOCK)),
> zone_page_state(zone, NR_KERNEL_STACK_KB),
> +#ifdef CONFIG_SHADOW_CALL_STACK
> + zone_page_state(zone, NR_KERNEL_SCS_BYTES) / 1024,
> +#endif
> K(zone_page_state(zone, NR_PAGETABLE)),
> K(zone_page_state(zone, NR_BOUNCE)),
> K(free_pcp),
> diff --git a/mm/vmstat.c b/mm/vmstat.c
> index 6afc892a148a..9fe4afe670fe 100644
> --- a/mm/vmstat.c
> +++ b/mm/vmstat.c
> @@ -1118,6 +1118,9 @@ const char * const vmstat_text[] = {
> "nr_mlock",
> "nr_page_table_pages",
> "nr_kernel_stack",
> +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
> + "nr_shadow_call_stack_bytes",
> +#endif
> "nr_bounce",
> #if IS_ENABLED(CONFIG_ZSMALLOC)
> "nr_zspages",
> --
> 2.24.0.rc0.303.g954a862665-goog
>

--
Kees Cook

\
 
 \ /
  Last update: 2019-11-01 04:53    [W:0.559 / U:0.896 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site