lkml.org 
[lkml]   [2017]   [Oct]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [Part2 PATCH v6 32/38] KVM: SVM: Add support for SEV DEBUG_DECRYPT command
    On Thu, Oct 19, 2017 at 09:34:07PM -0500, Brijesh Singh wrote:
    > The command is used for decrypting a guest memory region for debug
    > purposes.
    >
    > Cc: Thomas Gleixner <tglx@linutronix.de>
    > Cc: Ingo Molnar <mingo@redhat.com>
    > Cc: "H. Peter Anvin" <hpa@zytor.com>
    > Cc: Paolo Bonzini <pbonzini@redhat.com>
    > Cc: "Radim Krčmář" <rkrcmar@redhat.com>
    > Cc: Joerg Roedel <joro@8bytes.org>
    > Cc: Borislav Petkov <bp@suse.de>
    > Cc: Tom Lendacky <thomas.lendacky@amd.com>
    > Cc: x86@kernel.org
    > Cc: kvm@vger.kernel.org
    > Cc: linux-kernel@vger.kernel.org
    > Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
    > ---
    > arch/x86/kvm/svm.c | 179 +++++++++++++++++++++++++++++++++++++++++++++++++++++
    > 1 file changed, 179 insertions(+)
    >
    > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
    > index 83a4d0406a4b..f19c4fb2fdc8 100644
    > --- a/arch/x86/kvm/svm.c
    > +++ b/arch/x86/kvm/svm.c
    > @@ -6023,6 +6023,182 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
    > return ret;
    > }
    >
    > +static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
    > + unsigned long dst, int size,
    > + int *error, bool enc)
    > +{
    > + struct kvm_sev_info *sev = &kvm->arch.sev_info;
    > + struct sev_data_dbg *data;
    > + int ret;
    > +
    > + data = kzalloc(sizeof(*data), GFP_KERNEL);
    > + if (!data)
    > + return -ENOMEM;
    > +
    > + data->handle = sev->handle;
    > + data->dst_addr = dst;
    > + data->src_addr = src;
    > + data->len = size;
    > +
    > + ret = sev_issue_cmd(kvm,
    > + enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
    > + data, error);
    > + kfree(data);
    > + return ret;
    > +}
    > +
    > +/*
    > + * Decrypt source memory into userspace or kernel buffer. If destination buffer
    > + * or len is not aligned to 16-byte boundary then it uses intermediate buffer.
    > + */
    > +static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long paddr,
    > + unsigned long __user dst_uaddr,
    > + unsigned long dst_kaddr, unsigned long dst_paddr,
    > + int size, int *error)
    > +{
    > + int ret, offset = 0, len = size;
    > + struct page *tpage = NULL;
    > +
    > + /*
    > + * Debug command works with 16-byte aligned inputs, check if all inputs
    > + * (src, dst and len) are 16-byte aligned. If one of the input is not
    > + * aligned then we decrypt more than requested into a temporary buffer
    > + * and copy the porition of data into destination buffer.
    > + */
    > + if (!IS_ALIGNED(paddr, 16) ||
    > + !IS_ALIGNED(dst_paddr, 16) ||
    > + !IS_ALIGNED(size, 16)) {
    > + tpage = (void *)alloc_page(GFP_KERNEL);
    > + if (!tpage)
    > + return -ENOMEM;
    > +
    > + dst_paddr = __sme_page_pa(tpage);
    > +
    > + /*
    > + * if source buffer is not aligned then offset will be used
    > + * when copying the data from the temporary buffer into
    > + * destination buffer.
    > + */
    > + offset = paddr & 15;
    > +
    > + /* its safe to read more than requested size. */
    > + len = round_up(size + offset, 16);
    > +
    > + paddr = round_down(paddr, 16);
    > +
    > + /*
    > + * The temporary buffer may have mapping with C=0 or C=1 on x86
    > + * side but PSP will will write the memory region with C=0.
    > + * Lets make sure x86 cache for this memory range is flushed so
    > + * that we can see the recent contents after the command
    > + * completes.
    > + */
    > + clflush_cache_range(page_address(tpage), PAGE_SIZE);
    > + }
    > +
    > + ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, false);
    > +
    > + /*
    > + * If temporary buffer is used then copy the data from temporary buffer
    > + * into destination buffer.
    > + */
    > + if (!ret && tpage) {
    > + /*
    > + * If destination buffer is a userspace buffer then use
    > + * copy_to_user otherwise memcpy.
    > + */
    > + if (dst_uaddr) {
    > + if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
    > + page_address(tpage) + offset, size))
    > + ret = -EFAULT;
    > + } else {
    > + memcpy((void *)dst_kaddr, page_address(tpage) + offset, size);

    arch/x86/kvm/svm.c: In function ‘svm_mem_enc_op’:
    arch/x86/kvm/svm.c:6115:4: warning: argument 1 null where non-null expected [-Wnonnull]
    memcpy((void *)dst_kaddr, page_address(tpage) + offset, size);
    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    In file included from ./arch/x86/include/asm/string.h:4:0,
    from ./include/linux/string.h:18,
    from ./include/linux/bitmap.h:8,
    from ./include/linux/cpumask.h:11,
    from ./arch/x86/include/asm/cpumask.h:4,
    from ./arch/x86/include/asm/msr.h:10,
    from ./arch/x86/include/asm/processor.h:20,
    from ./arch/x86/include/asm/cpufeature.h:4,
    from ./arch/x86/include/asm/thread_info.h:52,
    from ./include/linux/thread_info.h:37,
    from ./arch/x86/include/asm/preempt.h:6,
    from ./include/linux/preempt.h:80,
    from ./include/linux/hardirq.h:4,
    from ./include/linux/kvm_host.h:10,
    from arch/x86/kvm/svm.c:20:
    ./arch/x86/include/asm/string_64.h:31:14: note: in a call to function ‘memcpy’ declared here
    extern void *memcpy(void *to, const void *from, size_t len);
    ^~~~~~

    --
    Regards/Gruss,
    Boris.

    ECO tip #101: Trim your mails when you reply.
    --

    \
     
     \ /
      Last update: 2017-10-28 20:20    [W:4.643 / U:25.596 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site