lkml.org 
[lkml]   [2021]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [Resend RFC PATCH V4 09/13] x86/Swiotlb/HV: Add Swiotlb bounce buffer remap function for HV IVM
From
Date

Hi Christoph & Konrad:
Could you review this patch and make sure this is right way to
resolve the memory remap request from AMD SEV-SNP vTOM case?

Thanks.

On 7/7/2021 11:46 PM, Tianyu Lan wrote:
> From: Tianyu Lan <Tianyu.Lan@microsoft.com>
>
> In Isolation VM with AMD SEV, bounce buffer needs to be accessed via
> extra address space which is above shared_gpa_boundary
> (E.G 39 bit address line) reported by Hyper-V CPUID ISOLATION_CONFIG.
> The access physical address will be original physical address +
> shared_gpa_boundary. The shared_gpa_boundary in the AMD SEV SNP
> spec is called virtual top of memory(vTOM). Memory addresses below
> vTOM are automatically treated as private while memory above
> vTOM is treated as shared.
>
> Introduce set_memory_decrypted_map() function to decrypt memory and
> remap memory with platform callback. Use set_memory_decrypted_
> map() in the swiotlb code, store remap address returned by the new
> API and use the remap address to copy data from/to swiotlb bounce buffer.
>
> Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
> ---
> arch/x86/hyperv/ivm.c | 30 ++++++++++++++++++++++++++++++
> arch/x86/include/asm/mshyperv.h | 2 ++
> arch/x86/include/asm/set_memory.h | 2 ++
> arch/x86/mm/pat/set_memory.c | 28 ++++++++++++++++++++++++++++
> include/linux/swiotlb.h | 4 ++++
> kernel/dma/swiotlb.c | 11 ++++++++---
> 6 files changed, 74 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
> index 8a6f4e9e3d6c..ea33935e0c17 100644
> --- a/arch/x86/hyperv/ivm.c
> +++ b/arch/x86/hyperv/ivm.c
> @@ -267,3 +267,33 @@ int hv_set_mem_enc(unsigned long addr, int numpages, bool enc)
> enc ? VMBUS_PAGE_NOT_VISIBLE
> : VMBUS_PAGE_VISIBLE_READ_WRITE);
> }
> +
> +/*
> + * hv_map_memory - map memory to extra space in the AMD SEV-SNP Isolation VM.
> + */
> +unsigned long hv_map_memory(unsigned long addr, unsigned long size)
> +{
> + unsigned long *pfns = kcalloc(size / HV_HYP_PAGE_SIZE,
> + sizeof(unsigned long),
> + GFP_KERNEL);
> + unsigned long vaddr;
> + int i;
> +
> + if (!pfns)
> + return (unsigned long)NULL;
> +
> + for (i = 0; i < size / HV_HYP_PAGE_SIZE; i++)
> + pfns[i] = virt_to_hvpfn((void *)addr + i * HV_HYP_PAGE_SIZE) +
> + (ms_hyperv.shared_gpa_boundary >> HV_HYP_PAGE_SHIFT);
> +
> + vaddr = (unsigned long)vmap_pfn(pfns, size / HV_HYP_PAGE_SIZE,
> + PAGE_KERNEL_IO);
> + kfree(pfns);
> +
> + return vaddr;
> +}
> +
> +void hv_unmap_memory(unsigned long addr)
> +{
> + vunmap((void *)addr);
> +}
> diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
> index fe03e3e833ac..ba3cb9e32fdb 100644
> --- a/arch/x86/include/asm/mshyperv.h
> +++ b/arch/x86/include/asm/mshyperv.h
> @@ -253,6 +253,8 @@ int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
> int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
> int hv_mark_gpa_visibility(u16 count, const u64 pfn[], u32 visibility);
> int hv_set_mem_enc(unsigned long addr, int numpages, bool enc);
> +unsigned long hv_map_memory(unsigned long addr, unsigned long size);
> +void hv_unmap_memory(unsigned long addr);
> void hv_sint_wrmsrl_ghcb(u64 msr, u64 value);
> void hv_sint_rdmsrl_ghcb(u64 msr, u64 *value);
> void hv_signal_eom_ghcb(void);
> diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
> index 43fa081a1adb..7a2117931830 100644
> --- a/arch/x86/include/asm/set_memory.h
> +++ b/arch/x86/include/asm/set_memory.h
> @@ -49,6 +49,8 @@ int set_memory_decrypted(unsigned long addr, int numpages);
> int set_memory_np_noalias(unsigned long addr, int numpages);
> int set_memory_nonglobal(unsigned long addr, int numpages);
> int set_memory_global(unsigned long addr, int numpages);
> +unsigned long set_memory_decrypted_map(unsigned long addr, unsigned long size);
> +int set_memory_encrypted_unmap(unsigned long addr, unsigned long size);
>
> int set_pages_array_uc(struct page **pages, int addrinarray);
> int set_pages_array_wc(struct page **pages, int addrinarray);
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index 6cc83c57383d..5d4d3963f4a2 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -2039,6 +2039,34 @@ int set_memory_decrypted(unsigned long addr, int numpages)
> }
> EXPORT_SYMBOL_GPL(set_memory_decrypted);
>
> +static unsigned long __map_memory(unsigned long addr, unsigned long size)
> +{
> + if (hv_is_isolation_supported())
> + return hv_map_memory(addr, size);
> +
> + return addr;
> +}
> +
> +static void __unmap_memory(unsigned long addr)
> +{
> + if (hv_is_isolation_supported())
> + hv_unmap_memory(addr);
> +}
> +
> +unsigned long set_memory_decrypted_map(unsigned long addr, unsigned long size)
> +{
> + if (__set_memory_enc_dec(addr, size / PAGE_SIZE, false))
> + return (unsigned long)NULL;
> +
> + return __map_memory(addr, size);
> +}
> +
> +int set_memory_encrypted_unmap(unsigned long addr, unsigned long size)
> +{
> + __unmap_memory(addr);
> + return __set_memory_enc_dec(addr, size / PAGE_SIZE, true);
> +}
> +
> int set_pages_uc(struct page *page, int numpages)
> {
> unsigned long addr = (unsigned long)page_address(page);
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index f507e3eacbea..5c6f6c7380ef 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -72,6 +72,9 @@ extern enum swiotlb_force swiotlb_force;
> * @end: The end address of the swiotlb memory pool. Used to do a quick
> * range check to see if the memory was in fact allocated by this
> * API.
> + * @vstart: The virtual start address of the swiotlb memory pool. The swiotlb
> + * memory pool may be remapped in the memory encrypted case and store
> + * virtual address for bounce buffer operation.
> * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
> * @end. For default swiotlb, this is command line adjustable via
> * setup_io_tlb_npages.
> @@ -89,6 +92,7 @@ extern enum swiotlb_force swiotlb_force;
> struct io_tlb_mem {
> phys_addr_t start;
> phys_addr_t end;
> + void *vstart;
> unsigned long nslabs;
> unsigned long used;
> unsigned int index;
> diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
> index d3fa4669229b..9911817250a8 100644
> --- a/kernel/dma/swiotlb.c
> +++ b/kernel/dma/swiotlb.c
> @@ -194,8 +194,13 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
> mem->slots[i].alloc_size = 0;
> }
>
> - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
> - memset(vaddr, 0, bytes);
> + mem->vstart = (void *)set_memory_decrypted_map((unsigned long)vaddr, bytes);
> + if (!mem->vstart) {
> + pr_err("Failed to decrypt memory.\n");
> + return;
> + }
> +
> + memset(mem->vstart, 0, bytes);
> }
>
> int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
> @@ -352,7 +357,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
> phys_addr_t orig_addr = mem->slots[index].orig_addr;
> size_t alloc_size = mem->slots[index].alloc_size;
> unsigned long pfn = PFN_DOWN(orig_addr);
> - unsigned char *vaddr = phys_to_virt(tlb_addr);
> + unsigned char *vaddr = mem->vstart + tlb_addr - mem->start;
>
> if (orig_addr == INVALID_PHYS_ADDR)
> return;
>

\
 
 \ /
  Last update: 2021-07-20 12:48    [W:0.400 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site