Messages in this thread Patch in this message | | | From | Kees Cook <> | Date | Tue, 6 Sep 2016 13:56:40 -0400 | Subject | Re: [PATCH] fs/proc/kcore.c: Omit kernel text area for hardened usercopy feature |
| |
On Mon, Sep 5, 2016 at 4:47 AM, Jiri Olsa <jolsa@redhat.com> wrote: > On Fri, Sep 02, 2016 at 08:17:13AM -0700, Andi Kleen wrote: >> On Fri, Sep 02, 2016 at 02:25:45PM +0200, Jiri Olsa wrote: >> > One of the bullets for hardened usercopy feature is: >> > - object must not overlap with kernel text >> > >> > which is what we expose via /proc/kcore. We can hit >> > this check and crash the system very easily just by >> > reading the text area in kcore file: >> > >> > usercopy: kernel memory exposure attempt detected from ffffffff8179a01f (<kernel text>) (4065 bytes) >> > kernel BUG at mm/usercopy.c:75!
It looks like it was this one in kcore.c:
} else { if (kern_addr_valid(start)) { unsigned long n;
n = copy_to_user(buffer, (char *)start, tsz);
>> > Omitting kernel text area from kcore when there's >> > hardened usercopy feature is enabled. >> >> That will completely break PT decoding, which relies on looking >> at the kernel text in /proc/kcore. >> >> Need a different fix here, perhaps some special copy function >> that is not hardened. > > how about something like this > > jirka > > > --- > diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h > index c3f291195294..43f5404f0e61 100644 > --- a/arch/x86/include/asm/uaccess.h > +++ b/arch/x86/include/asm/uaccess.h > @@ -726,7 +726,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n) > } > > static inline unsigned long __must_check > -copy_to_user(void __user *to, const void *from, unsigned long n) > +copy_to_user_check(void __user *to, const void *from, > + unsigned long n, bool check) > { > int sz = __compiletime_object_size(from); > > @@ -735,7 +736,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n) > might_fault(); > > if (likely(sz < 0 || sz >= n)) { > - check_object_size(from, n, true); > + if (check) > + check_object_size(from, n, true); > n = _copy_to_user(to, from, n); > } else if (!__builtin_constant_p(n)) > copy_user_overflow(sz, n); > @@ -745,6 +747,19 @@ copy_to_user(void __user *to, const void *from, unsigned long n) > return n; > } > > +static inline unsigned long __must_check > +copy_to_user(void __user *to, const void *from, unsigned long n) > +{ > + return copy_to_user_check(to, from, n, true); > +} > + > +static inline unsigned long __must_check > +copy_to_user_nocheck(void __user *to, const void *from, unsigned long n) > +{ > + return copy_to_user_check(to, from, n, false); > +} > + > + > /* > * We rely on the nested NMI work to allow atomic faults from the NMI path; the > * nested NMI paths are careful to preserve CR2. > diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h > index 673059a109fe..e80e4a146b7d 100644 > --- a/arch/x86/include/asm/uaccess_64.h > +++ b/arch/x86/include/asm/uaccess_64.h > @@ -50,7 +50,7 @@ __must_check unsigned long > copy_in_user(void __user *to, const void __user *from, unsigned len); > > static __always_inline __must_check > -int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) > +int __copy_from_user_nofaultcheck(void *dst, const void __user *src, unsigned size) > { > int ret = 0; > > @@ -112,7 +112,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size) > { > might_fault(); > kasan_check_write(dst, size); > - return __copy_from_user_nocheck(dst, src, size); > + return __copy_from_user_nofaultcheck(dst, src, size); > } > > static __always_inline __must_check > @@ -248,7 +248,7 @@ static __must_check __always_inline int > __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) > { > kasan_check_write(dst, size); > - return __copy_from_user_nocheck(dst, src, size); > + return __copy_from_user_nofaultcheck(dst, src, size); > } > > static __must_check __always_inline int > diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c > index a939f5ed7f89..c7a22a8a157e 100644 > --- a/fs/proc/kcore.c > +++ b/fs/proc/kcore.c > @@ -516,7 +516,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) > if (kern_addr_valid(start)) { > unsigned long n; > > - n = copy_to_user(buffer, (char *)start, tsz); > + n = copy_to_user_nocheck(buffer, (char *)start, tsz); > /* > * We cannot distinguish between fault on source > * and fault on destination. When this happens
This patch is x86-specific (but ARCH_PROC_KCORE_TEXT is on multiple architectures), which I don't think we want. Instead, let's get the usercopy helper code centralized (Al Viro is looking at this already), and then we can design arch-agnostic methods to handle this.
In the meantime, how about continuing to use a bounce buffer like already done in the vmalloc_or_module_addr() case immediately above?
-Kees
-- Kees Cook Nexus Security
| |