lkml.org 
[lkml]   [2024]   [Feb]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v19 016/130] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX
    Date
    From: Sean Christopherson <sean.j.christopherson@intel.com>

    Introduce a helper to directly (pun intended) fault-in a TDP page
    without having to go through the full page fault path. This allows
    TDX to get the resulting pfn and also allows the RET_PF_* enums to
    stay in mmu.c where they belong.

    Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    ---
    v19:
    - Move up for KVM_MEMORY_MAPPING.
    - Add goal_level for the caller to know how many pages are mapped.

    v14 -> v15:
    - Remove loop in kvm_mmu_map_tdp_page() and return error code based on
    RET_FP_xxx value to avoid potential infinite loop. The caller should
    loop on -EAGAIN instead now.

    Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
    ---
    arch/x86/kvm/mmu.h | 3 +++
    arch/x86/kvm/mmu/mmu.c | 58 ++++++++++++++++++++++++++++++++++++++++++
    2 files changed, 61 insertions(+)

    diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
    index 60f21bb4c27b..d96c93a25b3b 100644
    --- a/arch/x86/kvm/mmu.h
    +++ b/arch/x86/kvm/mmu.h
    @@ -183,6 +183,9 @@ static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
    __kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
    }

    +int kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
    + u8 max_level, u8 *goal_level);
    +
    /*
    * Check if a given access (described through the I/D, W/R and U/S bits of a
    * page fault error code pfec) causes a permission fault with the given PTE
    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index 61674d6b17aa..ca0c91f14063 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -4615,6 +4615,64 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
    return direct_page_fault(vcpu, fault);
    }

    +int kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
    + u8 max_level, u8 *goal_level)
    +{
    + int r;
    + struct kvm_page_fault fault = (struct kvm_page_fault) {
    + .addr = gpa,
    + .error_code = error_code,
    + .exec = error_code & PFERR_FETCH_MASK,
    + .write = error_code & PFERR_WRITE_MASK,
    + .present = error_code & PFERR_PRESENT_MASK,
    + .rsvd = error_code & PFERR_RSVD_MASK,
    + .user = error_code & PFERR_USER_MASK,
    + .prefetch = false,
    + .is_tdp = true,
    + .is_private = error_code & PFERR_GUEST_ENC_MASK,
    + .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
    + };
    +
    + WARN_ON_ONCE(!vcpu->arch.mmu->root_role.direct);
    + fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
    +
    + r = mmu_topup_memory_caches(vcpu, false);
    + if (r)
    + return r;
    +
    + fault.max_level = max_level;
    + fault.req_level = PG_LEVEL_4K;
    + fault.goal_level = PG_LEVEL_4K;
    +
    +#ifdef CONFIG_X86_64
    + if (tdp_mmu_enabled)
    + r = kvm_tdp_mmu_page_fault(vcpu, &fault);
    + else
    +#endif
    + r = direct_page_fault(vcpu, &fault);
    +
    + if (is_error_noslot_pfn(fault.pfn) || vcpu->kvm->vm_bugged)
    + return -EFAULT;
    +
    + switch (r) {
    + case RET_PF_RETRY:
    + return -EAGAIN;
    +
    + case RET_PF_FIXED:
    + case RET_PF_SPURIOUS:
    + if (goal_level)
    + *goal_level = fault.goal_level;
    + return 0;
    +
    + case RET_PF_CONTINUE:
    + case RET_PF_EMULATE:
    + case RET_PF_INVALID:
    + default:
    + return -EIO;
    + }
    +}
    +EXPORT_SYMBOL_GPL(kvm_mmu_map_tdp_page);
    +
    static void nonpaging_init_context(struct kvm_mmu *context)
    {
    context->page_fault = nonpaging_page_fault;
    --
    2.25.1

    \
     
     \ /
      Last update: 2024-05-27 15:21    [W:7.223 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site