lkml.org 
[lkml]   [2016]   [Sep]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH] fs/proc/kcore.c: Omit kernel text area for hardened usercopy feature
On Fri, Sep 02, 2016 at 08:17:13AM -0700, Andi Kleen wrote:
> On Fri, Sep 02, 2016 at 02:25:45PM +0200, Jiri Olsa wrote:
> > One of the bullets for hardened usercopy feature is:
> > - object must not overlap with kernel text
> >
> > which is what we expose via /proc/kcore. We can hit
> > this check and crash the system very easily just by
> > reading the text area in kcore file:
> >
> > usercopy: kernel memory exposure attempt detected from ffffffff8179a01f (<kernel text>) (4065 bytes)
> > kernel BUG at mm/usercopy.c:75!
> >
> > Omitting kernel text area from kcore when there's
> > hardened usercopy feature is enabled.
>
> That will completely break PT decoding, which relies on looking
> at the kernel text in /proc/kcore.
>
> Need a different fix here, perhaps some special copy function
> that is not hardened.

how about something like this

jirka


---
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index c3f291195294..43f5404f0e61 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -726,7 +726,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
}

static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
+copy_to_user_check(void __user *to, const void *from,
+ unsigned long n, bool check)
{
int sz = __compiletime_object_size(from);

@@ -735,7 +736,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault();

if (likely(sz < 0 || sz >= n)) {
- check_object_size(from, n, true);
+ if (check)
+ check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
} else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
@@ -745,6 +747,19 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return n;
}

+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return copy_to_user_check(to, from, n, true);
+}
+
+static inline unsigned long __must_check
+copy_to_user_nocheck(void __user *to, const void *from, unsigned long n)
+{
+ return copy_to_user_check(to, from, n, false);
+}
+
+
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 673059a109fe..e80e4a146b7d 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -50,7 +50,7 @@ __must_check unsigned long
copy_in_user(void __user *to, const void __user *from, unsigned len);

static __always_inline __must_check
-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
+int __copy_from_user_nofaultcheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;

@@ -112,7 +112,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
might_fault();
kasan_check_write(dst, size);
- return __copy_from_user_nocheck(dst, src, size);
+ return __copy_from_user_nofaultcheck(dst, src, size);
}

static __always_inline __must_check
@@ -248,7 +248,7 @@ static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
- return __copy_from_user_nocheck(dst, src, size);
+ return __copy_from_user_nofaultcheck(dst, src, size);
}

static __must_check __always_inline int
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index a939f5ed7f89..c7a22a8a157e 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -516,7 +516,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (kern_addr_valid(start)) {
unsigned long n;

- n = copy_to_user(buffer, (char *)start, tsz);
+ n = copy_to_user_nocheck(buffer, (char *)start, tsz);
/*
* We cannot distinguish between fault on source
* and fault on destination. When this happens
\
 
 \ /
  Last update: 2016-09-17 09:58    [W:2.278 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site