lkml.org 
[lkml]   [2022]   [Jun]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH Part2 v6 30/49] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX and SNP
    Date
    From: Sean Christopherson <sean.j.christopherson@intel.com>

    Introduce a helper to directly (pun intended) fault-in a TDP page
    without having to go through the full page fault path. This allows
    TDX to get the resulting pfn and also allows the RET_PF_* enums to
    stay in mmu.c where they belong.

    Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
    ---
    arch/x86/kvm/mmu.h | 3 +++
    arch/x86/kvm/mmu/mmu.c | 51 ++++++++++++++++++++++++++++++++++++++++++
    2 files changed, 54 insertions(+)

    diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
    index e6cae6f22683..c99b15e97a0a 100644
    --- a/arch/x86/kvm/mmu.h
    +++ b/arch/x86/kvm/mmu.h
    @@ -204,6 +204,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
    return vcpu->arch.mmu->page_fault(vcpu, &fault);
    }

    +kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
    + u32 error_code, int max_level);
    +
    /*
    * Check if a given access (described through the I/D, W/R and U/S bits of a
    * page fault error code pfec) causes a permission fault with the given PTE
    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index 997318ecebd1..569021af349a 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -4100,6 +4100,57 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    return direct_page_fault(vcpu, fault);
    }

    +kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
    + u32 err, int max_level)
    +{
    + struct kvm_page_fault fault = {
    + .addr = gpa,
    + .error_code = err,
    + .exec = err & PFERR_FETCH_MASK,
    + .write = err & PFERR_WRITE_MASK,
    + .present = err & PFERR_PRESENT_MASK,
    + .rsvd = err & PFERR_RSVD_MASK,
    + .user = err & PFERR_USER_MASK,
    + .prefetch = false,
    + .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
    + .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
    +
    + .max_level = max_level,
    + .req_level = PG_LEVEL_4K,
    + .goal_level = PG_LEVEL_4K,
    + };
    + int r;
    +
    + if (mmu_topup_memory_caches(vcpu, false))
    + return KVM_PFN_ERR_FAULT;
    +
    + /*
    + * Loop on the page fault path to handle the case where an mmu_notifier
    + * invalidation triggers RET_PF_RETRY. In the normal page fault path,
    + * KVM needs to resume the guest in case the invalidation changed any
    + * of the page fault properties, i.e. the gpa or error code. For this
    + * path, the gpa and error code are fixed by the caller, and the caller
    + * expects failure if and only if the page fault can't be fixed.
    + */
    + do {
    + /*
    + * TODO: this should probably go through kvm_mmu_do_page_fault(),
    + * but we need a way to control the max_level, so maybe a direct
    + * call to kvm_tdp_page_fault, which will call into
    + * direct_page_fault() when appropriate.
    + */
    + //r = direct_page_fault(vcpu, &fault);
    +#if CONFIG_RETPOLINE
    + if (fault.is_tdp)
    + r = kvm_tdp_page_fault(vcpu, &fault);
    +#else
    + r = vcpu->arch.mmu->page_fault(vcpu, &fault);
    +#endif
    + } while (r == RET_PF_RETRY && !is_error_noslot_pfn(fault.pfn));
    + return fault.pfn;
    +}
    +EXPORT_SYMBOL_GPL(kvm_mmu_map_tdp_page);
    +
    static void nonpaging_init_context(struct kvm_mmu *context)
    {
    context->page_fault = nonpaging_page_fault;
    --
    2.25.1
    \
     
     \ /
      Last update: 2022-06-21 01:12    [W:4.058 / U:0.000 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site