lkml.org 
[lkml]   [2024]   [Apr]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    SubjectRe: [PATCH v19 096/130] KVM: VMX: Move NMI/exception handler to common helper
    From


    On 2/26/2024 4:26 PM, isaku.yamahata@intel.com wrote:
    > From: Sean Christopherson <sean.j.christopherson@intel.com>
    >
    > TDX mostly handles NMI/exception exit mostly the same to VMX case. The
    > difference is how to retrieve exit qualification. To share the code with
    > TDX, move NMI/exception to a common header, common.h.

    Suggest to add "No functional change intended." in the changelog.

    >
    > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
    > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    > ---
    > arch/x86/kvm/vmx/common.h | 59 +++++++++++++++++++++++++++++++++
    > arch/x86/kvm/vmx/vmx.c | 68 +++++----------------------------------
    > 2 files changed, 67 insertions(+), 60 deletions(-)
    >
    > diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
    > index 6f21d0d48809..632af7a76d0a 100644
    > --- a/arch/x86/kvm/vmx/common.h
    > +++ b/arch/x86/kvm/vmx/common.h
    > @@ -4,8 +4,67 @@
    >
    > #include <linux/kvm_host.h>
    >
    > +#include <asm/traps.h>
    > +
    > #include "posted_intr.h"
    > #include "mmu.h"
    > +#include "vmcs.h"
    > +#include "x86.h"
    > +
    > +extern unsigned long vmx_host_idt_base;
    > +void vmx_do_interrupt_irqoff(unsigned long entry);
    > +void vmx_do_nmi_irqoff(void);
    > +
    > +static inline void vmx_handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
    > +{
    > + /*
    > + * Save xfd_err to guest_fpu before interrupt is enabled, so the
    > + * MSR value is not clobbered by the host activity before the guest
    > + * has chance to consume it.
    > + *
    > + * Do not blindly read xfd_err here, since this exception might
    > + * be caused by L1 interception on a platform which doesn't
    > + * support xfd at all.
    > + *
    > + * Do it conditionally upon guest_fpu::xfd. xfd_err matters
    > + * only when xfd contains a non-zero value.
    > + *
    > + * Queuing exception is done in vmx_handle_exit. See comment there.
    > + */
    > + if (vcpu->arch.guest_fpu.fpstate->xfd)
    > + rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
    > +}
    > +
    > +static inline void vmx_handle_exception_irqoff(struct kvm_vcpu *vcpu,
    > + u32 intr_info)
    > +{
    > + /* if exit due to PF check for async PF */
    > + if (is_page_fault(intr_info))
    > + vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
    > + /* if exit due to NM, handle before interrupts are enabled */
    > + else if (is_nm_fault(intr_info))
    > + vmx_handle_nm_fault_irqoff(vcpu);
    > + /* Handle machine checks before interrupts are enabled */
    > + else if (is_machine_check(intr_info))
    > + kvm_machine_check();
    > +}
    > +
    > +static inline void vmx_handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
    > + u32 intr_info)
    > +{
    > + unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
    > + gate_desc *desc = (gate_desc *)vmx_host_idt_base + vector;
    > +
    > + if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
    > + "unexpected VM-Exit interrupt info: 0x%x", intr_info))
    > + return;
    > +
    > + kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
    > + vmx_do_interrupt_irqoff(gate_offset(desc));
    > + kvm_after_interrupt(vcpu);
    > +
    > + vcpu->arch.at_instruction_boundary = true;
    > +}
    >
    > static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
    > unsigned long exit_qualification)
    > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
    > index 29d891e0795e..f8a00a766c40 100644
    > --- a/arch/x86/kvm/vmx/vmx.c
    > +++ b/arch/x86/kvm/vmx/vmx.c
    > @@ -518,7 +518,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
    > vmx->segment_cache.bitmask = 0;
    > }
    >
    > -static unsigned long host_idt_base;
    > +unsigned long vmx_host_idt_base;
    >
    > #if IS_ENABLED(CONFIG_HYPERV)
    > static bool __read_mostly enlightened_vmcs = true;
    > @@ -4273,7 +4273,7 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
    > vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
    > vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
    >
    > - vmcs_writel(HOST_IDTR_BASE, host_idt_base); /* 22.2.4 */
    > + vmcs_writel(HOST_IDTR_BASE, vmx_host_idt_base); /* 22.2.4 */
    >
    > vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
    >
    > @@ -5166,7 +5166,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
    > intr_info = vmx_get_intr_info(vcpu);
    >
    > /*
    > - * Machine checks are handled by handle_exception_irqoff(), or by
    > + * Machine checks are handled by vmx_handle_exception_irqoff(), or by
    > * vmx_vcpu_run() if a #MC occurs on VM-Entry. NMIs are handled by
    > * vmx_vcpu_enter_exit().
    > */
    > @@ -5174,7 +5174,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
    > return 1;
    >
    > /*
    > - * Queue the exception here instead of in handle_nm_fault_irqoff().
    > + * Queue the exception here instead of in vmx_handle_nm_fault_irqoff().
    > * This ensures the nested_vmx check is not skipped so vmexit can
    > * be reflected to L1 (when it intercepts #NM) before reaching this
    > * point.
    > @@ -6889,59 +6889,6 @@ void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
    > vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
    > }
    >
    > -void vmx_do_interrupt_irqoff(unsigned long entry);
    > -void vmx_do_nmi_irqoff(void);
    > -
    > -static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
    > -{
    > - /*
    > - * Save xfd_err to guest_fpu before interrupt is enabled, so the
    > - * MSR value is not clobbered by the host activity before the guest
    > - * has chance to consume it.
    > - *
    > - * Do not blindly read xfd_err here, since this exception might
    > - * be caused by L1 interception on a platform which doesn't
    > - * support xfd at all.
    > - *
    > - * Do it conditionally upon guest_fpu::xfd. xfd_err matters
    > - * only when xfd contains a non-zero value.
    > - *
    > - * Queuing exception is done in vmx_handle_exit. See comment there.
    > - */
    > - if (vcpu->arch.guest_fpu.fpstate->xfd)
    > - rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
    > -}
    > -
    > -static void handle_exception_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
    > -{
    > - /* if exit due to PF check for async PF */
    > - if (is_page_fault(intr_info))
    > - vcpu->arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
    > - /* if exit due to NM, handle before interrupts are enabled */
    > - else if (is_nm_fault(intr_info))
    > - handle_nm_fault_irqoff(vcpu);
    > - /* Handle machine checks before interrupts are enabled */
    > - else if (is_machine_check(intr_info))
    > - kvm_machine_check();
    > -}
    > -
    > -static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
    > - u32 intr_info)
    > -{
    > - unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
    > - gate_desc *desc = (gate_desc *)host_idt_base + vector;
    > -
    > - if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
    > - "unexpected VM-Exit interrupt info: 0x%x", intr_info))
    > - return;
    > -
    > - kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
    > - vmx_do_interrupt_irqoff(gate_offset(desc));
    > - kvm_after_interrupt(vcpu);
    > -
    > - vcpu->arch.at_instruction_boundary = true;
    > -}
    > -
    > void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
    > {
    > struct vcpu_vmx *vmx = to_vmx(vcpu);
    > @@ -6950,9 +6897,10 @@ void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
    > return;
    >
    > if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
    > - handle_external_interrupt_irqoff(vcpu, vmx_get_intr_info(vcpu));
    > + vmx_handle_external_interrupt_irqoff(vcpu,
    > + vmx_get_intr_info(vcpu));
    > else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
    > - handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
    > + vmx_handle_exception_irqoff(vcpu, vmx_get_intr_info(vcpu));
    > }
    >
    > /*
    > @@ -8284,7 +8232,7 @@ __init int vmx_hardware_setup(void)
    > int r;
    >
    > store_idt(&dt);
    > - host_idt_base = dt.address;
    > + vmx_host_idt_base = dt.address;
    >
    > vmx_setup_user_return_msrs();
    >


    \
     
     \ /
      Last update: 2024-05-27 16:30    [W:3.710 / U:0.780 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site