lkml.org 
[lkml]   [2015]   [Mar]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH] mm/x86: AMD Bulldozer ASLR fix


El 24/03/15 a las 20:15, Borislav Petkov escribió:
> On Tue, Mar 24, 2015 at 07:00:48PM +0100, Hector Marco-Gisbert wrote:
>> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
>> index 15c5df9..a693d54 100644
>> --- a/arch/x86/kernel/cpu/amd.c
>> +++ b/arch/x86/kernel/cpu/amd.c
>> @@ -5,6 +5,7 @@
>>
>> #include <linux/io.h>
>> #include <linux/sched.h>
>> +#include <linux/random.h>
>> #include <asm/processor.h>
>> #include <asm/apic.h>
>> #include <asm/cpu.h>
>> @@ -18,6 +19,8 @@
>>
>> #include "cpu.h"
>>
>> +unsigned long rnd_bulldozer_bits = 0;
>> +
>> static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
>> {
>> u32 gprs[8] = { 0 };
>> @@ -488,6 +491,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
>>
>> va_align.mask = (upperbit - 1) & PAGE_MASK;
>> va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
>> + /* A random value per boot for bits 12,13 and 14 */
>> + rnd_bulldozer_bits = get_random_int() & va_align.mask;
>
> Hmm, this should be done differently:
>
> va_align should have a ->bits member which gets ORed in into the hole
> made my va_align.mask...
>

Yes, It looks better using va_align.

>> }
>> }
>>
>> diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
>> index 30277e2..5b8ad01 100644
>> --- a/arch/x86/kernel/sys_x86_64.c
>> +++ b/arch/x86/kernel/sys_x86_64.c
>> @@ -18,6 +18,7 @@
>>
>> #include <asm/ia32.h>
>> #include <asm/syscalls.h>
>> +#include <asm/amd_15h.h>
>>
>> /*
>> * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
>> @@ -34,10 +35,16 @@ static unsigned long get_align_mask(void)
>> return va_align.mask;
>> }
>>
>> +static unsigned long get_bulldozer_bits(void){
>> +
>> + return rnd_bulldozer_bits & get_align_mask();
>> +}
>> +
>> unsigned long align_vdso_addr(unsigned long addr)
>> {
>> unsigned long align_mask = get_align_mask();
>> - return (addr + align_mask) & ~align_mask;
>> + addr = (addr + align_mask) & ~align_mask;
>> + return addr | get_bulldozer_bits();
>> }
>>
>> static int __init control_va_addr_alignment(char *str)
>> @@ -137,7 +144,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
>> info.high_limit = end;
>> info.align_mask = filp ? get_align_mask() : 0;
>
> info.align_bits = get_align_bits() : 0;
>

I see your point. The drawback of adding a new field (align_bits) to the info
struct is that all architectures need to initialize the info.align_bits. In
addition, the generic functions unmapped_area()/unmapped_area_topdown() in file
mm/mmap.c, need to be modified to set the bits [12..14] using the new field
info.align_bits.

A possible alternative which does not add a new field, is to use the
"align_offset" variable. By adding the "get_align_bits()" value to the
"align_offset" the bits [12..14] are randomized per boot.

Patch coming.
Hector.

>> info.align_offset = pgoff << PAGE_SHIFT;
>> - return vm_unmapped_area(&info);
>> + addr = vm_unmapped_area(&info);
>> + if (!(addr & ~PAGE_MASK))
>> + return filp ? (addr|get_bulldozer_bits()) : addr;
>> + return addr;
>> }
>>
>> unsigned long
>> @@ -178,7 +188,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
>> info.align_offset = pgoff << PAGE_SHIFT;
>> addr = vm_unmapped_area(&info);
>> if (!(addr & ~PAGE_MASK))
>> - return addr;
>> + return filp ? (addr|get_bulldozer_bits()) : addr;
>
> Ditto.
>


\
 
 \ /
  Last update: 2015-03-25 19:41    [W:0.080 / U:0.204 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site