lkml.org 
[lkml]   [2022]   [Dec]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC v7 11/64] KVM: SEV: Support private pages in LAUNCH_UPDATE_DATA
    Date
    From: Nikunj A Dadhania <nikunj@amd.com>

    Pre-boot guest payload needs to be encrypted and VMM has copied it
    over to the private-fd. Add support to get the pfn from the memfile fd
    for encrypting the payload in-place.

    Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
    Signed-off-by: Michael Roth <michael.roth@amd.com>
    ---
    arch/x86/kvm/svm/sev.c | 79 ++++++++++++++++++++++++++++++++++--------
    1 file changed, 64 insertions(+), 15 deletions(-)

    diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
    index a7e4e3005786..ae4920aeb281 100644
    --- a/arch/x86/kvm/svm/sev.c
    +++ b/arch/x86/kvm/svm/sev.c
    @@ -107,6 +107,11 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
    return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
    }

    +static bool kvm_is_upm_enabled(struct kvm *kvm)
    +{
    + return kvm->arch.upm_mode;
    +}
    +
    /* Must be called with the sev_bitmap_lock held */
    static bool __sev_recycle_asids(int min_asid, int max_asid)
    {
    @@ -382,6 +387,38 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
    return ret;
    }

    +static int sev_get_memfile_pfn_handler(struct kvm *kvm, struct kvm_gfn_range *range, void *data)
    +{
    + struct kvm_memory_slot *memslot = range->slot;
    + struct page **pages = data;
    + int ret = 0, i = 0;
    + kvm_pfn_t pfn;
    + gfn_t gfn;
    +
    + for (gfn = range->start; gfn < range->end; gfn++) {
    + int order;
    +
    + ret = kvm_restricted_mem_get_pfn(memslot, gfn, &pfn, &order);
    + if (ret)
    + return ret;
    +
    + if (is_error_noslot_pfn(pfn))
    + return -EFAULT;
    +
    + pages[i++] = pfn_to_page(pfn);
    + }
    +
    + return ret;
    +}
    +
    +static int sev_get_memfile_pfn(struct kvm *kvm, unsigned long addr,
    + unsigned long size, unsigned long npages,
    + struct page **pages)
    +{
    + return kvm_vm_do_hva_range_op(kvm, addr, size,
    + sev_get_memfile_pfn_handler, pages);
    +}
    +
    static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
    unsigned long ulen, unsigned long *n,
    int write)
    @@ -424,16 +461,25 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
    if (!pages)
    return ERR_PTR(-ENOMEM);

    - /* Pin the user virtual address. */
    - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
    - if (npinned != npages) {
    - pr_err("SEV: Failure locking %lu pages.\n", npages);
    - ret = -ENOMEM;
    - goto err;
    + if (kvm_is_upm_enabled(kvm)) {
    + /* Get the PFN from memfile */
    + if (sev_get_memfile_pfn(kvm, uaddr, ulen, npages, pages)) {
    + pr_err("%s: ERROR: unable to find slot for uaddr %lx", __func__, uaddr);
    + ret = -ENOMEM;
    + goto err;
    + }
    + } else {
    + /* Pin the user virtual address. */
    + npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
    + if (npinned != npages) {
    + pr_err("SEV: Failure locking %lu pages.\n", npages);
    + ret = -ENOMEM;
    + goto err;
    + }
    + sev->pages_locked = locked;
    }

    *n = npages;
    - sev->pages_locked = locked;

    return pages;

    @@ -514,6 +560,7 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm,

    size = (range->end - range->start) << PAGE_SHIFT;
    vaddr_end = vaddr + size;
    + WARN_ON(size < PAGE_SIZE);

    /* Lock the user memory. */
    inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
    @@ -554,13 +601,16 @@ static int sev_launch_update_shared_gfn_handler(struct kvm *kvm,
    }

    e_unpin:
    - /* content of memory is updated, mark pages dirty */
    - for (i = 0; i < npages; i++) {
    - set_page_dirty_lock(inpages[i]);
    - mark_page_accessed(inpages[i]);
    + if (!kvm_is_upm_enabled(kvm)) {
    + /* content of memory is updated, mark pages dirty */
    + for (i = 0; i < npages; i++) {
    + set_page_dirty_lock(inpages[i]);
    + mark_page_accessed(inpages[i]);
    + }
    + /* unlock the user pages */
    + sev_unpin_memory(kvm, inpages, npages);
    }
    - /* unlock the user pages */
    - sev_unpin_memory(kvm, inpages, npages);
    +
    return ret;
    }

    @@ -609,9 +659,8 @@ static int sev_launch_update_priv_gfn_handler(struct kvm *kvm,
    goto e_ret;
    kvm_release_pfn_clean(pfn);
    }
    - kvm_vm_set_region_attr(kvm, range->start, range->end,
    - true /* priv_attr */);

    + kvm_vm_set_region_attr(kvm, range->start, range->end, KVM_MEMORY_ATTRIBUTE_PRIVATE);
    e_ret:
    return ret;
    }
    --
    2.25.1
    \
     
     \ /
      Last update: 2022-12-14 20:49    [W:7.207 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site