lkml.org 
[lkml]   [2020]   [Apr]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 2/5] KVM: X86: Introduce need_cancel_enter_guest helper
Date
From: Wanpeng Li <wanpengli@tencent.com>

Introduce need_cancel_enter_guest() helper, we need to check some
conditions before doing CONT_RUN, in addition, it can also catch
the case vmexit occurred while another event was being delivered
to guest software since vmx_complete_interrupts() adds the request
bit.

Tested-by: Haiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
arch/x86/kvm/vmx/vmx.c | 12 +++++++-----
arch/x86/kvm/x86.c | 10 ++++++++--
arch/x86/kvm/x86.h | 1 +
3 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f1f6638..5c21027 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6577,7 +6577,7 @@ bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);

static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
- enum exit_fastpath_completion exit_fastpath;
+ enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;

@@ -6754,10 +6754,12 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);

- exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
- /* static call is better with retpolines */
- if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
- goto cont_run;
+ if (!kvm_need_cancel_enter_guest(vcpu)) {
+ exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
+ /* static call is better with retpolines */
+ if (exit_fastpath == EXIT_FASTPATH_CONT_RUN)
+ goto cont_run;
+ }

return exit_fastpath;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59958ce..4561104 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1581,6 +1581,13 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);

+bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
+ || need_resched() || signal_pending(current));
+}
+EXPORT_SYMBOL_GPL(kvm_need_cancel_enter_guest);
+
/*
* The fast path for frequent and performance sensitive wrmsr emulation,
* i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
@@ -8373,8 +8380,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
kvm_x86_ops.sync_pir_to_irr(vcpu);

- if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
- || need_resched() || signal_pending(current)) {
+ if (kvm_need_cancel_enter_guest(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7b5ed8e..1906e7e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -364,5 +364,6 @@ static inline bool kvm_dr7_valid(u64 data)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
+bool kvm_need_cancel_enter_guest(struct kvm_vcpu *vcpu);

#endif
--
2.7.4
\
 
 \ /
  Last update: 2020-04-24 08:23    [W:0.100 / U:0.632 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site