lkml.org 
[lkml]   [2018]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:x86/pti] x86/retpoline/entry: Convert entry assembler indirect jumps
    Commit-ID:  f3433c1010c6af61c9897f0f0447f81b991feac1
    Gitweb: https://git.kernel.org/tip/f3433c1010c6af61c9897f0f0447f81b991feac1
    Author: David Woodhouse <dwmw@amazon.co.uk>
    AuthorDate: Tue, 9 Jan 2018 14:43:11 +0000
    Committer: Thomas Gleixner <tglx@linutronix.de>
    CommitDate: Tue, 9 Jan 2018 16:17:52 +0100

    x86/retpoline/entry: Convert entry assembler indirect jumps

    Convert indirect jumps in core 32/64bit entry assembler code to use
    non-speculative sequences when CONFIG_RETPOLINE is enabled.

    Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
    address after the 'call' instruction must be *precisely* at the
    .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
    and the use of alternatives will mess that up unless we play horrid
    games to prepend with NOPs and make the variants the same length. It's
    not worth it; in the case where we ALTERNATIVE out the retpoline, the
    first instruction at __x86.indirect_thunk.rax is going to be a bare
    jmp *%rax anyway.

    Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Acked-by: Ingo Molnar <mingo@kernel.org>
    Acked-by: Arjan van de Ven <arjan@linux.intel.com>
    Cc: gnomes@lxorguk.ukuu.org.uk
    Cc: Rik van Riel <riel@redhat.com>
    Cc: Andi Kleen <ak@linux.intel.com>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Jiri Kosina <jikos@kernel.org>
    Cc: Andy Lutomirski <luto@amacapital.net>
    Cc: Dave Hansen <dave.hansen@intel.com>
    Cc: Kees Cook <keescook@google.com>
    Cc: Tim Chen <tim.c.chen@linux.intel.com>
    Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
    Cc: Paul Turner <pjt@google.com>
    Link: https://lkml.kernel.org/r/1515508997-6154-6-git-send-email-dwmw@amazon.co.uk

    ---
    arch/x86/entry/entry_32.S | 5 +++--
    arch/x86/entry/entry_64.S | 12 +++++++++---
    2 files changed, 12 insertions(+), 5 deletions(-)

    diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
    index ace8f32..a1f28a5 100644
    --- a/arch/x86/entry/entry_32.S
    +++ b/arch/x86/entry/entry_32.S
    @@ -44,6 +44,7 @@
    #include <asm/asm.h>
    #include <asm/smap.h>
    #include <asm/frame.h>
    +#include <asm/nospec-branch.h>

    .section .entry.text, "ax"

    @@ -290,7 +291,7 @@ ENTRY(ret_from_fork)

    /* kernel thread */
    1: movl %edi, %eax
    - call *%ebx
    + CALL_NOSPEC %ebx
    /*
    * A kernel thread is allowed to return here after successfully
    * calling do_execve(). Exit to userspace to complete the execve()
    @@ -919,7 +920,7 @@ common_exception:
    movl %ecx, %es
    TRACE_IRQS_OFF
    movl %esp, %eax # pt_regs pointer
    - call *%edi
    + CALL_NOSPEC %edi
    jmp ret_from_exception
    END(common_exception)

    diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
    index ed31d00..59874bc 100644
    --- a/arch/x86/entry/entry_64.S
    +++ b/arch/x86/entry/entry_64.S
    @@ -37,6 +37,7 @@
    #include <asm/pgtable_types.h>
    #include <asm/export.h>
    #include <asm/frame.h>
    +#include <asm/nospec-branch.h>
    #include <linux/err.h>

    #include "calling.h"
    @@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
    */
    pushq %rdi
    movq $entry_SYSCALL_64_stage2, %rdi
    - jmp *%rdi
    + JMP_NOSPEC %rdi
    END(entry_SYSCALL_64_trampoline)

    .popsection
    @@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath:
    * It might end up jumping to the slow path. If it jumps, RAX
    * and all argument registers are clobbered.
    */
    +#ifdef CONFIG_RETPOLINE
    + movq sys_call_table(, %rax, 8), %rax
    + call __x86_indirect_thunk_rax
    +#else
    call *sys_call_table(, %rax, 8)
    +#endif
    .Lentry_SYSCALL_64_after_fastpath_call:

    movq %rax, RAX(%rsp)
    @@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64)
    jmp entry_SYSCALL64_slow_path

    1:
    - jmp *%rax /* Called from C */
    + JMP_NOSPEC %rax /* Called from C */
    END(stub_ptregs_64)

    .macro ptregs_stub func
    @@ -517,7 +523,7 @@ ENTRY(ret_from_fork)
    1:
    /* kernel thread */
    movq %r12, %rdi
    - call *%rbx
    + CALL_NOSPEC %rbx
    /*
    * A kernel thread is allowed to return here after successfully
    * calling do_execve(). Exit to userspace to complete the execve()
    \
     
     \ /
      Last update: 2018-01-14 23:19    [W:3.803 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site