lkml.org 
[lkml]   [2018]   [Feb]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v6 5/5] KVM: SVM: Allow direct access to MSR_IA32_SPEC_CTRL
    Date
    [ Based on a patch from Paolo Bonzini <pbonzini@redhat.com> ]

    ... basically doing exactly what we do for VMX:

    - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID)
    - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest
    actually used it.

    Cc: Asit Mallick <asit.k.mallick@intel.com>
    Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
    Cc: Dave Hansen <dave.hansen@intel.com>
    Cc: Andi Kleen <ak@linux.intel.com>
    Cc: Andrea Arcangeli <aarcange@redhat.com>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Tim Chen <tim.c.chen@linux.intel.com>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: Dan Williams <dan.j.williams@intel.com>
    Cc: Jun Nakajima <jun.nakajima@intel.com>
    Cc: Paolo Bonzini <pbonzini@redhat.com>
    Cc: David Woodhouse <dwmw@amazon.co.uk>
    Cc: Greg KH <gregkh@linuxfoundation.org>
    Cc: Andy Lutomirski <luto@kernel.org>
    Cc: Ashok Raj <ashok.raj@intel.com>
    Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
    Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
    ---
    v5:
    - Add SPEC_CTRL to direct_access_msrs.
    ---
    arch/x86/kvm/svm.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
    1 file changed, 59 insertions(+)

    diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
    index 254eefb..c6ab343 100644
    --- a/arch/x86/kvm/svm.c
    +++ b/arch/x86/kvm/svm.c
    @@ -184,6 +184,9 @@ struct vcpu_svm {
    u64 gs_base;
    } host;

    + u64 spec_ctrl;
    + bool save_spec_ctrl_on_exit;
    +
    u32 *msrpm;

    ulong nmi_iret_rip;
    @@ -249,6 +252,7 @@ static const struct svm_direct_access_msrs {
    { .index = MSR_CSTAR, .always = true },
    { .index = MSR_SYSCALL_MASK, .always = true },
    #endif
    + { .index = MSR_IA32_SPEC_CTRL, .always = false },
    { .index = MSR_IA32_PRED_CMD, .always = false },
    { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
    { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
    @@ -1584,6 +1588,8 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
    u32 dummy;
    u32 eax = 1;

    + svm->spec_ctrl = 0;
    +
    if (!init_event) {
    svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
    MSR_IA32_APICBASE_ENABLE;
    @@ -3605,6 +3611,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
    case MSR_VM_CR:
    msr_info->data = svm->nested.vm_cr_msr;
    break;
    + case MSR_IA32_SPEC_CTRL:
    + if (!msr_info->host_initiated &&
    + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
    + return 1;
    +
    + msr_info->data = svm->spec_ctrl;
    + break;
    case MSR_IA32_UCODE_REV:
    msr_info->data = 0x01000065;
    break;
    @@ -3696,6 +3709,30 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
    case MSR_IA32_TSC:
    kvm_write_tsc(vcpu, msr);
    break;
    + case MSR_IA32_SPEC_CTRL:
    + if (!msr->host_initiated &&
    + !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
    + return 1;
    +
    + /* The STIBP bit doesn't fault even if it's not advertised */
    + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
    + return 1;
    +
    + svm->spec_ctrl = data;
    +
    + /*
    + * When it's written (to non-zero) for the first time, pass
    + * it through. This means we don't have to take the perf
    + * hit of saving it on vmexit for the common case of guests
    + * that don't use it.
    + */
    + if (data && !svm->save_spec_ctrl_on_exit) {
    + svm->save_spec_ctrl_on_exit = true;
    + if (is_guest_mode(vcpu))
    + break;
    + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
    + }
    + break;
    case MSR_IA32_PRED_CMD:
    if (!msr->host_initiated &&
    !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
    @@ -4964,6 +5001,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)

    local_irq_enable();

    + /*
    + * If this vCPU has touched SPEC_CTRL, restore the guest's value if
    + * it's non-zero. Since vmentry is serialising on affected CPUs, there
    + * is no need to worry about the conditional branch over the wrmsr
    + * being speculatively taken.
    + */
    + if (svm->spec_ctrl)
    + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    +
    asm volatile (
    "push %%" _ASM_BP "; \n\t"
    "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
    @@ -5056,6 +5102,19 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
    #endif
    );

    + /*
    + * We do not use IBRS in the kernel. If this vCPU has used the
    + * SPEC_CTRL MSR it may have left it on; save the value and
    + * turn it off. This is much more efficient than blindly adding
    + * it to the atomic save/restore list. Especially as the former
    + * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
    + */
    + if (svm->save_spec_ctrl_on_exit)
    + rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    +
    + if (svm->spec_ctrl)
    + wrmsrl(MSR_IA32_SPEC_CTRL, 0);
    +
    /* Eliminate branch target predictions from guest mode */
    vmexit_fill_RSB();

    --
    2.7.4
    \
     
     \ /
      Last update: 2018-02-01 23:02    [W:3.781 / U:0.112 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site