lkml.org 
[lkml]   [2017]   [Jun]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 14/23] fork: define usercopy region in thread_stack, task_struct, mm_struct slab caches
Date
From: David Windsor <dave@nullcore.net>

In support of usercopy hardening, this patch defines a region in
the thread_stack, task_struct and mm_struct slab caches in which
userspace copy operations are allowed. Since only a single whitelisted
buffer region is used, this moves the usercopyable fields next to each
other in task_struct so a single window can cover them.

This region is known as the slab cache's usercopy region. Slab
caches can now check that each copy operation involving cache-managed
memory falls entirely within the slab's usercopy region.

This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY
whitelisting code in the last public patch of grsecurity/PaX based on my
understanding of the code. Changes or omissions from the original code are
mine and don't reflect the original grsecurity/PaX code.

Signed-off-by: David Windsor <dave@nullcore.net>
[kees: adjust commit log]
Signed-off-by: Kees Cook <keescook@chromium.org>
---
include/linux/sched.h | 15 ++++++++++++---
kernel/fork.c | 18 +++++++++++++-----
2 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2b69fc650201..345db7983af1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -745,10 +745,19 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct *sighand;
- sigset_t blocked;
sigset_t real_blocked;
- /* Restored if set_restore_sigmask() was used: */
- sigset_t saved_sigmask;
+
+ /*
+ * Usercopy slab whitelisting needs blocked, saved_sigmask
+ * to be adjacent.
+ */
+ struct {
+ sigset_t blocked;
+
+ /* Restored if set_restore_sigmask() was used */
+ sigset_t saved_sigmask;
+ };
+
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
diff --git a/kernel/fork.c b/kernel/fork.c
index e53770d2bf95..172df19baeb5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -282,8 +282,9 @@ static void free_thread_stack(struct task_struct *tsk)

void thread_stack_cache_init(void)
{
- thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
- THREAD_SIZE, 0, NULL);
+ thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
+ THREAD_SIZE, THREAD_SIZE, 0, 0,
+ THREAD_SIZE, NULL);
BUG_ON(thread_stack_cache == NULL);
}
# endif
@@ -467,9 +468,14 @@ void __init fork_init(void)
int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);

/* create a slab on which task_structs can be allocated */
- task_struct_cachep = kmem_cache_create("task_struct",
+ task_struct_cachep = kmem_cache_create_usercopy("task_struct",
arch_task_struct_size, align,
- SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
+ SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+ offsetof(struct task_struct, blocked),
+ offsetof(struct task_struct, saved_sigmask) -
+ offsetof(struct task_struct, blocked) +
+ sizeof(init_task.saved_sigmask),
+ NULL);
#endif

/* do the arch specific task caches init */
@@ -2208,9 +2214,11 @@ void __init proc_caches_init(void)
* maximum number of CPU's we can ever have. The cpumask_allocation
* is at the end of the structure, exactly for that reason.
*/
- mm_cachep = kmem_cache_create("mm_struct",
+ mm_cachep = kmem_cache_create_usercopy("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+ offsetof(struct mm_struct, saved_auxv),
+ sizeof_field(struct mm_struct, saved_auxv),
NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
mmap_init();
--
2.7.4
\
 
 \ /
  Last update: 2017-06-20 01:38    [W:0.227 / U:0.032 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site