lkml.org 
[lkml]   [2020]   [Sep]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 34/35] KVM: SVM: Provide an updated VMRUN invocation for SEV-ES guests
Date
From: Tom Lendacky <thomas.lendacky@amd.com>

The guest vCPU register state of an SEV-ES guest will be restored on VMRUN
and save saved on VMEXIT. Therefore, there is no need to restore the guest
registers directly and through VMLOAD before VMRUN and no need to save the
guest registers directly and through VMSAVE on VMEXIT.

Update the svm_vcpu_run() function to skip register state saving and
restoring and provide an alternative function for running an SEV-ES guest
in vmenter.S

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
arch/x86/kvm/svm/svm.c | 36 +++++++++++++++++----------
arch/x86/kvm/svm/svm.h | 5 ++++
arch/x86/kvm/svm/vmenter.S | 50 ++++++++++++++++++++++++++++++++++++++
3 files changed, 78 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index efefe8ba9759..5e5f67dd293a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3640,16 +3640,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);

- __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+ if (sev_es_guest(svm->vcpu.kvm)) {
+ __svm_sev_es_vcpu_run(svm->vmcb_pa);
+ } else {
+ __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);

#ifdef CONFIG_X86_64
- native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+ native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
- loadsegment(fs, svm->host.fs);
+ loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
+ loadsegment(gs, svm->host.gs);
#endif
#endif
+ }

/*
* VMEXIT disables interrupts (host state), but tracing and lockdep
@@ -3676,9 +3680,11 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
fastpath_t exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu);

- svm_rax_write(svm, vcpu->arch.regs[VCPU_REGS_RAX]);
- svm_rsp_write(svm, vcpu->arch.regs[VCPU_REGS_RSP]);
- svm_rip_write(svm, vcpu->arch.regs[VCPU_REGS_RIP]);
+ if (!sev_es_guest(svm->vcpu.kvm)) {
+ svm_rax_write(svm, vcpu->arch.regs[VCPU_REGS_RAX]);
+ svm_rsp_write(svm, vcpu->arch.regs[VCPU_REGS_RSP]);
+ svm_rip_write(svm, vcpu->arch.regs[VCPU_REGS_RIP]);
+ }

/*
* Disable singlestep if we're injecting an interrupt/exception.
@@ -3700,7 +3706,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)

sync_lapic_to_cr8(vcpu);

- svm_cr2_write(svm, vcpu->arch.cr2);
+ if (!sev_es_guest(svm->vcpu.kvm))
+ svm_cr2_write(svm, vcpu->arch.cr2);

/*
* Run with all-zero DR6 unless needed, so that we can get the exact cause
@@ -3748,14 +3755,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);

- reload_tss(vcpu);
+ if (!sev_es_guest(svm->vcpu.kvm))
+ reload_tss(vcpu);

x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);

- vcpu->arch.cr2 = svm_cr2_read(svm);
- vcpu->arch.regs[VCPU_REGS_RAX] = svm_rax_read(svm);
- vcpu->arch.regs[VCPU_REGS_RSP] = svm_rsp_read(svm);
- vcpu->arch.regs[VCPU_REGS_RIP] = svm_rip_read(svm);
+ if (!sev_es_guest(svm->vcpu.kvm)) {
+ vcpu->arch.cr2 = svm_cr2_read(svm);
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm_rax_read(svm);
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm_rsp_read(svm);
+ vcpu->arch.regs[VCPU_REGS_RIP] = svm_rip_read(svm);
+ }

if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(&svm->vcpu);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 0812d70085d7..1405ea3549b8 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -584,6 +584,11 @@ void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);

+/* vmenter.S */
+
+void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
+void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
+
/* VMSA Accessor functions */

static inline struct vmcb_save_area *get_vmsa(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 1ec1ac40e328..6feb8c08f45a 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -168,3 +168,53 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %_ASM_BP
ret
SYM_FUNC_END(__svm_vcpu_run)
+
+/**
+ * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
+ * @vmcb_pa: unsigned long
+ */
+SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ push %_ASM_BP
+#ifdef CONFIG_X86_64
+ push %r15
+ push %r14
+ push %r13
+ push %r12
+#else
+ push %edi
+ push %esi
+#endif
+ push %_ASM_BX
+
+ /* Enter guest mode */
+ mov %_ASM_ARG1, %_ASM_AX
+ sti
+
+1: vmrun %_ASM_AX
+ jmp 3f
+2: cmpb $0, kvm_rebooting
+ jne 3f
+ ud2
+ _ASM_EXTABLE(1b, 2b)
+
+3: cli
+
+#ifdef CONFIG_RETPOLINE
+ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+#endif
+
+ pop %_ASM_BX
+
+#ifdef CONFIG_X86_64
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+#else
+ pop %esi
+ pop %edi
+#endif
+ pop %_ASM_BP
+ ret
+SYM_FUNC_END(__svm_sev_es_vcpu_run)
--
2.28.0
\
 
 \ /
  Last update: 2020-09-14 22:26    [W:0.710 / U:0.556 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site