lkml.org 
[lkml]   [2024]   [Apr]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v13 18/26] KVM: SEV: Implement gmem hook for initializing private pages
    Date
    This will handle the RMP table updates needed to put a page into a
    private state before mapping it into an SEV-SNP guest.

    Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Michael Roth <michael.roth@amd.com>
    ---
    arch/x86/kvm/Kconfig | 1 +
    arch/x86/kvm/svm/sev.c | 98 ++++++++++++++++++++++++++++++++++++++++++
    arch/x86/kvm/svm/svm.c | 2 +
    arch/x86/kvm/svm/svm.h | 5 +++
    arch/x86/kvm/x86.c | 5 +++
    virt/kvm/guest_memfd.c | 4 +-
    6 files changed, 113 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
    index 5e72faca4e8f..10768f13b240 100644
    --- a/arch/x86/kvm/Kconfig
    +++ b/arch/x86/kvm/Kconfig
    @@ -137,6 +137,7 @@ config KVM_AMD_SEV
    depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
    select ARCH_HAS_CC_PLATFORM
    select KVM_GENERIC_PRIVATE_MEM
    + select HAVE_KVM_GMEM_PREPARE
    help
    Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
    with Encrypted State (SEV-ES) on AMD processors.
    diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
    index 2e0e825b6436..29f6e8dc29c8 100644
    --- a/arch/x86/kvm/svm/sev.c
    +++ b/arch/x86/kvm/svm/sev.c
    @@ -4357,3 +4357,101 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
    out_no_trace:
    put_page(pfn_to_page(pfn));
    }
    +
    +static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
    +{
    + kvm_pfn_t pfn = start;
    +
    + while (pfn < end) {
    + int ret, rmp_level;
    + bool assigned;
    +
    + ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
    + if (ret) {
    + pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
    + pfn, start, end, rmp_level, ret);
    + return false;
    + }
    +
    + if (assigned) {
    + pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
    + __func__, pfn, start, end, rmp_level);
    + return false;
    + }
    +
    + pfn++;
    + }
    +
    + return true;
    +}
    +
    +static u8 max_level_for_order(int order)
    +{
    + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
    + return PG_LEVEL_2M;
    +
    + return PG_LEVEL_4K;
    +}
    +
    +static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
    +{
    + kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
    +
    + /*
    + * If this is a large folio, and the entire 2M range containing the
    + * PFN is currently shared, then the entire 2M-aligned range can be
    + * set to private via a single 2M RMP entry.
    + */
    + if (max_level_for_order(order) > PG_LEVEL_4K &&
    + is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
    + return true;
    +
    + return false;
    +}
    +
    +int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
    +{
    + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
    + kvm_pfn_t pfn_aligned;
    + gfn_t gfn_aligned;
    + int level, rc;
    + bool assigned;
    +
    + if (!sev_snp_guest(kvm))
    + return 0;
    +
    + rc = snp_lookup_rmpentry(pfn, &assigned, &level);
    + if (rc) {
    + pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
    + gfn, pfn, rc);
    + return -ENOENT;
    + }
    +
    + if (assigned) {
    + pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
    + __func__, gfn, pfn, max_order, level);
    + return 0;
    + }
    +
    + if (is_large_rmp_possible(kvm, pfn, max_order)) {
    + level = PG_LEVEL_2M;
    + pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
    + gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
    + } else {
    + level = PG_LEVEL_4K;
    + pfn_aligned = pfn;
    + gfn_aligned = gfn;
    + }
    +
    + rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
    + if (rc) {
    + pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
    + gfn, pfn, level, rc);
    + return -EINVAL;
    + }
    +
    + pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
    + __func__, gfn, pfn, pfn_aligned, max_order, level);
    +
    + return 0;
    +}
    diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
    index 9dc929316c5d..3e8d0752bf1b 100644
    --- a/arch/x86/kvm/svm/svm.c
    +++ b/arch/x86/kvm/svm/svm.c
    @@ -5080,6 +5080,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
    .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
    .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
    .alloc_apic_backing_page = svm_alloc_apic_backing_page,
    +
    + .gmem_prepare = sev_gmem_prepare,
    };

    /*
    diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
    index 81e335dca281..7712ed90aae8 100644
    --- a/arch/x86/kvm/svm/svm.h
    +++ b/arch/x86/kvm/svm/svm.h
    @@ -730,6 +730,7 @@ extern unsigned int max_sev_asid;
    void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
    void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
    void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
    +int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
    #else
    static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
    return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
    @@ -746,6 +747,10 @@ static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXI
    static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
    static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
    static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
    +static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
    +{
    + return 0;
    +}

    #endif

    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index 436078b9e5aa..2e911dc0a991 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -13610,6 +13610,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
    EXPORT_SYMBOL_GPL(kvm_arch_no_poll);

    #ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
    +bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
    +{
    + return kvm->arch.vm_type == KVM_X86_SNP_VM;
    +}
    +
    int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
    {
    return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);
    diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
    index 9d7c6a70c547..b814e5d61f8e 100644
    --- a/virt/kvm/guest_memfd.c
    +++ b/virt/kvm/guest_memfd.c
    @@ -46,8 +46,8 @@ static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct fol
    gfn = slot->base_gfn + index - slot->gmem.pgoff;
    rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
    if (rc) {
    - pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx, error %d.\n",
    - index, rc);
    + pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
    + index, gfn, pfn, rc);
    return rc;
    }
    }
    --
    2.25.1

    \
     
     \ /
      Last update: 2024-05-27 16:46    [W:6.446 / U:0.056 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site