lkml.org 
[lkml]   [2018]   [Feb]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [PATCH 1/3] KVM: x86: use native MSR ops for SPEC_CTRL
    On Wed, Feb 21, 2018 at 10:41:35PM +0100, Paolo Bonzini wrote:
    > Having a paravirt indirect call in the IBRS restore path is not a
    > good idea, since we are trying to protect from speculative execution
    > of bogus indirect branch targets. It is also slower, so use
    > native_wrmsrl on the vmentry path too.

    But it gets replaced during patching. As in once the machine boots
    the assembler changes from:

    callq *0xfffflbah

    to
    wrmsr

    ? I don't think you need this patch.

    >
    > Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
    > Cc: x86@kernel.org
    > Cc: Radim Krčmář <rkrcmar@redhat.com>
    > Cc: KarimAllah Ahmed <karahmed@amazon.de>
    > Cc: David Woodhouse <dwmw@amazon.co.uk>
    > Cc: Jim Mattson <jmattson@google.com>
    > Cc: Thomas Gleixner <tglx@linutronix.de>
    > Cc: Ingo Molnar <mingo@kernel.org>
    > Cc: stable@vger.kernel.org
    > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    > ---
    > arch/x86/kvm/svm.c | 7 ++++---
    > arch/x86/kvm/vmx.c | 7 ++++---
    > 2 files changed, 8 insertions(+), 6 deletions(-)
    >
    > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
    > index b3e488a74828..1598beeda11c 100644
    > --- a/arch/x86/kvm/svm.c
    > +++ b/arch/x86/kvm/svm.c
    > @@ -49,6 +49,7 @@
    > #include <asm/debugreg.h>
    > #include <asm/kvm_para.h>
    > #include <asm/irq_remapping.h>
    > +#include <asm/microcode.h>
    > #include <asm/nospec-branch.h>
    >
    > #include <asm/virtext.h>
    > @@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
    > * being speculatively taken.
    > */
    > if (svm->spec_ctrl)
    > - wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    > + native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    >
    > asm volatile (
    > "push %%" _ASM_BP "; \n\t"
    > @@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
    > * save it.
    > */
    > if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
    > - rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    > + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
    >
    > if (svm->spec_ctrl)
    > - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
    > + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
    >
    > /* Eliminate branch target predictions from guest mode */
    > vmexit_fill_RSB();
    > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
    > index 67b028d8e726..5caeb8dc5bda 100644
    > --- a/arch/x86/kvm/vmx.c
    > +++ b/arch/x86/kvm/vmx.c
    > @@ -51,6 +51,7 @@
    > #include <asm/apic.h>
    > #include <asm/irq_remapping.h>
    > #include <asm/mmu_context.h>
    > +#include <asm/microcode.h>
    > #include <asm/nospec-branch.h>
    >
    > #include "trace.h"
    > @@ -9453,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
    > * being speculatively taken.
    > */
    > if (vmx->spec_ctrl)
    > - wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
    > + native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
    >
    > vmx->__launched = vmx->loaded_vmcs->launched;
    > asm(
    > @@ -9589,10 +9590,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
    > * save it.
    > */
    > if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
    > - rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
    > + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
    >
    > if (vmx->spec_ctrl)
    > - wrmsrl(MSR_IA32_SPEC_CTRL, 0);
    > + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
    >
    > /* Eliminate branch target predictions from guest mode */
    > vmexit_fill_RSB();
    > --
    > 1.8.3.1
    >
    >

    \
     
     \ /
      Last update: 2018-02-22 18:08    [W:2.990 / U:0.064 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site