lkml.org 
[lkml]   [2018]   [Feb]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.9 85/92] KVM/SVM: Allow direct access to MSR_IA32_SPEC_CTRL
    Date
    4.9-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: KarimAllah Ahmed <karahmed@amazon.de>


    (cherry picked from commit b2ac58f90540e39324e7a29a7ad471407ae0bf48)

    [ Based on a patch from Paolo Bonzini <pbonzini@redhat.com> ]

    ... basically doing exactly what we do for VMX:

    - Passthrough SPEC_CTRL to guests (if enabled in guest CPUID)
    - Save and restore SPEC_CTRL around VMExit and VMEntry only if the guest
    actually used it.

    Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
    Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
    Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
    Cc: Andrea Arcangeli <aarcange@redhat.com>
    Cc: Andi Kleen <ak@linux.intel.com>
    Cc: Jun Nakajima <jun.nakajima@intel.com>
    Cc: kvm@vger.kernel.org
    Cc: Dave Hansen <dave.hansen@intel.com>
    Cc: Tim Chen <tim.c.chen@linux.intel.com>
    Cc: Andy Lutomirski <luto@kernel.org>
    Cc: Asit Mallick <asit.k.mallick@intel.com>
    Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
    Cc: Greg KH <gregkh@linuxfoundation.org>
    Cc: Paolo Bonzini <pbonzini@redhat.com>
    Cc: Dan Williams <dan.j.williams@intel.com>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Ashok Raj <ashok.raj@intel.com>
    Link: https://lkml.kernel.org/r/1517669783-20732-1-git-send-email-karahmed@amazon.de
    Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/x86/kvm/svm.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++++
    1 file changed, 88 insertions(+)

    --- a/arch/x86/kvm/svm.c
    +++ b/arch/x86/kvm/svm.c
    @@ -183,6 +183,8 @@ struct vcpu_svm {
    u64 gs_base;
    } host;

    + u64 spec_ctrl;
    +
    u32 *msrpm;

    ulong nmi_iret_rip;
    @@ -248,6 +250,7 @@ static const struct svm_direct_access_ms
    { .index = MSR_CSTAR, .always = true },
    { .index = MSR_SYSCALL_MASK, .always = true },
    #endif
    + { .index = MSR_IA32_SPEC_CTRL, .always = false },
    { .index = MSR_IA32_PRED_CMD, .always = false },
    { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
    { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
    @@ -863,6 +866,25 @@ static bool valid_msr_intercept(u32 inde
    return false;
    }

    +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
    +{
    + u8 bit_write;
    + unsigned long tmp;
    + u32 offset;
    + u32 *msrpm;
    +
    + msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
    + to_svm(vcpu)->msrpm;
    +
    + offset = svm_msrpm_offset(msr);
    + bit_write = 2 * (msr & 0x0f) + 1;
    + tmp = msrpm[offset];
    +
    + BUG_ON(offset == MSR_INVALID);
    +
    + return !!test_bit(bit_write, &tmp);
    +}
    +
    static void set_msr_interception(u32 *msrpm, unsigned msr,
    int read, int write)
    {
    @@ -1537,6 +1559,8 @@ static void svm_vcpu_reset(struct kvm_vc
    u32 dummy;
    u32 eax = 1;

    + svm->spec_ctrl = 0;
    +
    if (!init_event) {
    svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
    MSR_IA32_APICBASE_ENABLE;
    @@ -3520,6 +3544,13 @@ static int svm_get_msr(struct kvm_vcpu *
    case MSR_VM_CR:
    msr_info->data = svm->nested.vm_cr_msr;
    break;
    + case MSR_IA32_SPEC_CTRL:
    + if (!msr_info->host_initiated &&
    + !guest_cpuid_has_ibrs(vcpu))
    + return 1;
    +
    + msr_info->data = svm->spec_ctrl;
    + break;
    case MSR_IA32_UCODE_REV:
    msr_info->data = 0x01000065;
    break;
    @@ -3611,6 +3642,33 @@ static int svm_set_msr(struct kvm_vcpu *
    case MSR_IA32_TSC:
    kvm_write_tsc(vcpu, msr);
    break;
    + case MSR_IA32_SPEC_CTRL:
    + if (!msr->host_initiated &&
    + !guest_cpuid_has_ibrs(vcpu))
    + return 1;
    +
    + /* The STIBP bit doesn't fault even if it's not advertised */
    + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
    + return 1;
    +
    + svm->spec_ctrl = data;
    +
    + if (!data)
    + break;
    +
    + /*
    + * For non-nested:
    + * When it's written (to non-zero) for the first time, pass
    + * it through.
    + *
    + * For nested:
    + * The handling of the MSR bitmap for L2 guests is done in
    + * nested_svm_vmrun_msrpm.
    + * We update the L1 MSR bit as well since it will end up
    + * touching the MSR anyway now.
    + */
    + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
    + break;
    case MSR_IA32_PRED_CMD:
    if (!msr->host_initiated &&
    !guest_cpuid_has_ibpb(vcpu))
    @@ -4854,6 +4912,15 @@ static void svm_vcpu_run(struct kvm_vcpu

    local_irq_enable();

    + /*
    + * If this vCPU has touched SPEC_CTRL, restore the guest's value if
    + * it's non-zero. Since vmentry is serialising on affected CPUs, there
    + * is no need to worry about the conditional branch over the wrmsr
    + * being speculatively taken.
    + */
    + if (svm->spec_ctrl)
    + wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    +
    asm volatile (
    "push %%" _ASM_BP "; \n\t"
    "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
    @@ -4946,6 +5013,27 @@ static void svm_vcpu_run(struct kvm_vcpu
    #endif
    );

    + /*
    + * We do not use IBRS in the kernel. If this vCPU has used the
    + * SPEC_CTRL MSR it may have left it on; save the value and
    + * turn it off. This is much more efficient than blindly adding
    + * it to the atomic save/restore list. Especially as the former
    + * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
    + *
    + * For non-nested case:
    + * If the L01 MSR bitmap does not intercept the MSR, then we need to
    + * save it.
    + *
    + * For nested case:
    + * If the L02 MSR bitmap does not intercept the MSR, then we need to
    + * save it.
    + */
    + if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
    + rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
    +
    + if (svm->spec_ctrl)
    + wrmsrl(MSR_IA32_SPEC_CTRL, 0);
    +
    /* Eliminate branch target predictions from guest mode */
    vmexit_fill_RSB();


    \
     
     \ /
      Last update: 2018-02-09 14:57    [W:4.221 / U:1.580 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site