lkml.org 
[lkml]   [2018]   [Jun]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Date
    Subject[PATCH 3.16 050/410] x86/speculation: Use IBRS if available before calling into firmware
    3.16.57-rc1 review patch.  If anyone has any objections, please let me know.

    ------------------

    From: David Woodhouse <dwmw@amazon.co.uk>

    commit dd84441a797150dcc49298ec95c459a8891d8bb1 upstream.

    Retpoline means the kernel is safe because it has no indirect branches.
    But firmware isn't, so use IBRS for firmware calls if it's available.

    Block preemption while IBRS is set, although in practice the call sites
    already had to be doing that.

    Ignore hpwdt.c for now. It's taking spinlocks and calling into firmware
    code, from an NMI handler. I don't want to touch that with a bargepole.

    Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: arjan.van.de.ven@intel.com
    Cc: bp@alien8.de
    Cc: dave.hansen@intel.com
    Cc: jmattson@google.com
    Cc: karahmed@amazon.de
    Cc: kvm@vger.kernel.org
    Cc: pbonzini@redhat.com
    Cc: rkrcmar@redhat.com
    Link: http://lkml.kernel.org/r/1519037457-7643-2-git-send-email-dwmw@amazon.co.uk
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    [bwh: Backported to 3.16:
    - x86 defines {,__}efi_call_virt() itself; update those definitions
    - Renumber the feature bit
    - Adjust context]
    Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
    ---
    arch/x86/include/asm/apm.h | 6 +++++
    arch/x86/include/asm/cpufeature.h | 1 +
    arch/x86/include/asm/efi.h | 8 ++++++
    arch/x86/include/asm/nospec-branch.h | 39 +++++++++++++++++++++-------
    arch/x86/kernel/cpu/bugs.c | 12 ++++++++-
    5 files changed, 56 insertions(+), 10 deletions(-)

    --- a/arch/x86/include/asm/apm.h
    +++ b/arch/x86/include/asm/apm.h
    @@ -6,6 +6,8 @@
    #ifndef _ASM_X86_MACH_DEFAULT_APM_H
    #define _ASM_X86_MACH_DEFAULT_APM_H

    +#include <asm/nospec-branch.h>
    +
    #ifdef APM_ZERO_SEGS
    # define APM_DO_ZERO_SEGS \
    "pushl %%ds\n\t" \
    @@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32
    * N.B. We do NOT need a cld after the BIOS call
    * because we always save and restore the flags.
    */
    + firmware_restrict_branch_speculation_start();
    __asm__ __volatile__(APM_DO_ZERO_SEGS
    "pushl %%edi\n\t"
    "pushl %%ebp\n\t"
    @@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32
    "=S" (*esi)
    : "a" (func), "b" (ebx_in), "c" (ecx_in)
    : "memory", "cc");
    + firmware_restrict_branch_speculation_end();
    }

    static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
    @@ -55,6 +59,7 @@ static inline u8 apm_bios_call_simple_as
    * N.B. We do NOT need a cld after the BIOS call
    * because we always save and restore the flags.
    */
    + firmware_restrict_branch_speculation_start();
    __asm__ __volatile__(APM_DO_ZERO_SEGS
    "pushl %%edi\n\t"
    "pushl %%ebp\n\t"
    @@ -67,6 +72,7 @@ static inline u8 apm_bios_call_simple_as
    "=S" (si)
    : "a" (func), "b" (ebx_in), "c" (ecx_in)
    : "memory", "cc");
    + firmware_restrict_branch_speculation_end();
    return error;
    }

    --- a/arch/x86/include/asm/cpufeature.h
    +++ b/arch/x86/include/asm/cpufeature.h
    @@ -190,6 +190,7 @@
    #define X86_FEATURE_RSB_CTXSW (7*32+11) /* "" Fill RSB on context switches */

    #define X86_FEATURE_USE_IBPB (7*32+12) /* "" Indirect Branch Prediction Barrier enabled */
    +#define X86_FEATURE_USE_IBRS_FW (7*32+13) /* "" Use IBRS during runtime firmware calls */

    #define X86_FEATURE_RETPOLINE (7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
    #define X86_FEATURE_RETPOLINE_AMD (7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
    --- a/arch/x86/include/asm/efi.h
    +++ b/arch/x86/include/asm/efi.h
    @@ -2,6 +2,8 @@
    #define _ASM_X86_EFI_H

    #include <asm/i387.h>
    +#include <asm/nospec-branch.h>
    +
    /*
    * We map the EFI regions needed for runtime services non-contiguously,
    * with preserved alignment on virtual addresses starting from -4G down
    @@ -37,8 +39,10 @@ extern unsigned long asmlinkage efi_call
    ({ \
    efi_status_t __s; \
    kernel_fpu_begin(); \
    + firmware_restrict_branch_speculation_start(); \
    __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
    efi.systab->runtime->f)(args); \
    + firmware_restrict_branch_speculation_end(); \
    kernel_fpu_end(); \
    __s; \
    })
    @@ -47,8 +51,10 @@ extern unsigned long asmlinkage efi_call
    #define __efi_call_virt(f, args...) \
    ({ \
    kernel_fpu_begin(); \
    + firmware_restrict_branch_speculation_start(); \
    ((efi_##f##_t __attribute__((regparm(0)))*) \
    efi.systab->runtime->f)(args); \
    + firmware_restrict_branch_speculation_end(); \
    kernel_fpu_end(); \
    })

    @@ -69,7 +75,9 @@ extern u64 asmlinkage efi_call(void *fp,
    efi_sync_low_kernel_mappings(); \
    preempt_disable(); \
    __kernel_fpu_begin(); \
    + firmware_restrict_branch_speculation_start(); \
    __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
    + firmware_restrict_branch_speculation_end(); \
    __kernel_fpu_end(); \
    preempt_enable(); \
    __s; \
    --- a/arch/x86/include/asm/nospec-branch.h
    +++ b/arch/x86/include/asm/nospec-branch.h
    @@ -194,17 +194,38 @@ static inline void vmexit_fill_RSB(void)
    #endif
    }

    +#define alternative_msr_write(_msr, _val, _feature) \
    + asm volatile(ALTERNATIVE("", \
    + "movl %[msr], %%ecx\n\t" \
    + "movl %[val], %%eax\n\t" \
    + "movl $0, %%edx\n\t" \
    + "wrmsr", \
    + _feature) \
    + : : [msr] "i" (_msr), [val] "i" (_val) \
    + : "eax", "ecx", "edx", "memory")
    +
    static inline void indirect_branch_prediction_barrier(void)
    {
    - asm volatile(ALTERNATIVE("",
    - "movl %[msr], %%ecx\n\t"
    - "movl %[val], %%eax\n\t"
    - "movl $0, %%edx\n\t"
    - "wrmsr",
    - X86_FEATURE_USE_IBPB)
    - : : [msr] "i" (MSR_IA32_PRED_CMD),
    - [val] "i" (PRED_CMD_IBPB)
    - : "eax", "ecx", "edx", "memory");
    + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
    + X86_FEATURE_USE_IBPB);
    +}
    +
    +/*
    + * With retpoline, we must use IBRS to restrict branch prediction
    + * before calling into firmware.
    + */
    +static inline void firmware_restrict_branch_speculation_start(void)
    +{
    + preempt_disable();
    + alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,
    + X86_FEATURE_USE_IBRS_FW);
    +}
    +
    +static inline void firmware_restrict_branch_speculation_end(void)
    +{
    + alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,
    + X86_FEATURE_USE_IBRS_FW);
    + preempt_enable();
    }

    #endif /* __ASSEMBLY__ */
    --- a/arch/x86/kernel/cpu/bugs.c
    +++ b/arch/x86/kernel/cpu/bugs.c
    @@ -364,6 +364,15 @@ retpoline_auto:
    setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
    pr_info("Enabling Indirect Branch Prediction Barrier\n");
    }
    +
    + /*
    + * Retpoline means the kernel is safe because it has no indirect
    + * branches. But firmware isn't, so use IBRS to protect that.
    + */
    + if (boot_cpu_has(X86_FEATURE_IBRS)) {
    + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
    + pr_info("Enabling Restricted Speculation for firmware calls\n");
    + }
    }

    #undef pr_fmt
    @@ -393,8 +402,9 @@ ssize_t cpu_show_spectre_v2(struct devic
    if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
    return sprintf(buf, "Not affected\n");

    - return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
    + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
    boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
    + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
    spectre_v2_module_string());
    }
    #endif
    \
     
     \ /
      Last update: 2018-06-07 17:13    [W:4.069 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site