lkml.org 
[lkml]   [2017]   [Aug]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH 2/3] KVM: x86: Avoid guest page table walk when gpa_available is set
2017-08-12 0:52 GMT+08:00 Paolo Bonzini <pbonzini@redhat.com>:
> From: Brijesh Singh <brijesh.singh@amd.com>
>
> When a guest causes a page fault which requires emulation, the
> vcpu->arch.gpa_available flag is set to indicate that cr2 contains a
> valid GPA.
>
> Currently, emulator_read_write_onepage() makes use of gpa_available flag
> to avoid a guest page walk for a known MMIO regions. Lets not limit
> the gpa_available optimization to just MMIO region. The patch extends
> the check to avoid page walk whenever gpa_available flag is set.
>
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> [Fix EPT=0 according to Wanpeng Li's fix, plus ensure VMX also uses the
> new code. - Paolo]
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Reviewed-by: Wanpeng Li <wanpeng.li@hotmail.com>

> ---
> arch/x86/include/asm/kvm_host.h | 3 ++-
> arch/x86/kvm/mmu.c | 9 +++++++++
> arch/x86/kvm/svm.c | 3 +--
> arch/x86/kvm/vmx.c | 3 ---
> arch/x86/kvm/x86.c | 19 ++++++-------------
> 5 files changed, 18 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 9e4862e0e978..6db0ed9cf59e 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -685,8 +685,9 @@ struct kvm_vcpu_arch {
> int pending_ioapic_eoi;
> int pending_external_vector;
>
> - /* GPA available (AMD only) */
> + /* GPA available */
> bool gpa_available;
> + gpa_t gpa_val;
>
> /* be preempted when it's in kernel-mode(cpl=0) */
> bool preempted_in_kernel;
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 5339d83916bf..f5c3f8e7d29f 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -4829,6 +4829,15 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
> enum emulation_result er;
> bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
>
> + /*
> + * With shadow page tables, fault_address contains a GVA
> + * or nested GPA.
> + */
> + if (vcpu->arch.mmu.direct_map) {
> + vcpu->arch.gpa_available = true;
> + vcpu->arch.gpa_val = cr2;
> + }
> +
> if (unlikely(error_code & PFERR_RSVD_MASK)) {
> r = handle_mmio_page_fault(vcpu, cr2, direct);
> if (r == RET_MMIO_PF_EMULATE) {
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 1fa9ee5660f4..c5c6b182cddf 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -4235,8 +4235,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
> u32 exit_code = svm->vmcb->control.exit_code;
>
> trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
> -
> - vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
> + vcpu->arch.gpa_available = false;
>
> if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
> vcpu->arch.cr0 = svm->vmcb->save.cr0;
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 45fb0ea78ee8..79efb00dd70d 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -6393,9 +6393,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
> error_code |= (exit_qualification & 0x100) != 0 ?
> PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
>
> - vcpu->arch.gpa_available = true;
> vcpu->arch.exit_qualification = exit_qualification;
> -
> return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
> }
>
> @@ -6410,7 +6408,6 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
> return kvm_skip_emulated_instruction(vcpu);
> }
>
> - vcpu->arch.gpa_available = true;
> ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
> if (ret >= 0)
> return ret;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index e40a779711a9..bb05b705c295 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4657,25 +4657,18 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
> */
> if (vcpu->arch.gpa_available &&
> emulator_can_use_gpa(ctxt) &&
> - vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) &&
> - (addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) {
> - gpa = exception->address;
> - goto mmio;
> + (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
> + gpa = vcpu->arch.gpa_val;
> + ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
> + } else {
> + ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
> }
>
> - ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
> -
> if (ret < 0)
> return X86EMUL_PROPAGATE_FAULT;
> -
> - /* For APIC access vmexit */
> - if (ret)
> - goto mmio;
> -
> - if (ops->read_write_emulate(vcpu, gpa, val, bytes))
> + if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
> return X86EMUL_CONTINUE;
>
> -mmio:
> /*
> * Is this MMIO handled locally?
> */
> --
> 1.8.3.1
>
>

\
 
 \ /
  Last update: 2017-08-13 01:33    [W:0.078 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site