lkml.org 
[lkml]   [2020]   [Apr]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v6 10/14] mm: x86: Invoke hypercall when page encryption status is changed
From
Date

On 3/29/20 11:22 PM, Ashish Kalra wrote:
> From: Brijesh Singh <Brijesh.Singh@amd.com>
>
> Invoke a hypercall when a memory region is changed from encrypted ->
> decrypted and vice versa. Hypervisor need to know the page encryption
> status during the guest migration.
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: "Radim Krčmář" <rkrcmar@redhat.com>
> Cc: Joerg Roedel <joro@8bytes.org>
> Cc: Borislav Petkov <bp@suse.de>
> Cc: Tom Lendacky <thomas.lendacky@amd.com>
> Cc: x86@kernel.org
> Cc: kvm@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
> arch/x86/include/asm/paravirt.h | 10 +++++
> arch/x86/include/asm/paravirt_types.h | 2 +
> arch/x86/kernel/paravirt.c | 1 +
> arch/x86/mm/mem_encrypt.c | 57 ++++++++++++++++++++++++++-
> arch/x86/mm/pat/set_memory.c | 7 ++++
> 5 files changed, 76 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
> index 694d8daf4983..8127b9c141bf 100644
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -78,6 +78,12 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
> PVOP_VCALL1(mmu.exit_mmap, mm);
> }
>
> +static inline void page_encryption_changed(unsigned long vaddr, int npages,
> + bool enc)
> +{
> + PVOP_VCALL3(mmu.page_encryption_changed, vaddr, npages, enc);
> +}
> +
> #ifdef CONFIG_PARAVIRT_XXL
> static inline void load_sp0(unsigned long sp0)
> {
> @@ -946,6 +952,10 @@ static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
> static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
> {
> }
> +
> +static inline void page_encryption_changed(unsigned long vaddr, int npages, bool enc)
> +{
> +}
> #endif
> #endif /* __ASSEMBLY__ */
> #endif /* _ASM_X86_PARAVIRT_H */
> diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
> index 732f62e04ddb..03bfd515c59c 100644
> --- a/arch/x86/include/asm/paravirt_types.h
> +++ b/arch/x86/include/asm/paravirt_types.h
> @@ -215,6 +215,8 @@ struct pv_mmu_ops {
>
> /* Hook for intercepting the destruction of an mm_struct. */
> void (*exit_mmap)(struct mm_struct *mm);
> + void (*page_encryption_changed)(unsigned long vaddr, int npages,
> + bool enc);
>
> #ifdef CONFIG_PARAVIRT_XXL
> struct paravirt_callee_save read_cr2;
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index c131ba4e70ef..840c02b23aeb 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -367,6 +367,7 @@ struct paravirt_patch_template pv_ops = {
> (void (*)(struct mmu_gather *, void *))tlb_remove_page,
>
> .mmu.exit_mmap = paravirt_nop,
> + .mmu.page_encryption_changed = paravirt_nop,
>
> #ifdef CONFIG_PARAVIRT_XXL
> .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2),
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index f4bd4b431ba1..c9800fa811f6 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -19,6 +19,7 @@
> #include <linux/kernel.h>
> #include <linux/bitops.h>
> #include <linux/dma-mapping.h>
> +#include <linux/kvm_para.h>
>
> #include <asm/tlbflush.h>
> #include <asm/fixmap.h>
> @@ -29,6 +30,7 @@
> #include <asm/processor-flags.h>
> #include <asm/msr.h>
> #include <asm/cmdline.h>
> +#include <asm/kvm_para.h>
>
> #include "mm_internal.h"
>
> @@ -196,6 +198,47 @@ void __init sme_early_init(void)
> swiotlb_force = SWIOTLB_FORCE;
> }
>
> +static void set_memory_enc_dec_hypercall(unsigned long vaddr, int npages,
> + bool enc)
> +{
> + unsigned long sz = npages << PAGE_SHIFT;
> + unsigned long vaddr_end, vaddr_next;
> +
> + vaddr_end = vaddr + sz;
> +
> + for (; vaddr < vaddr_end; vaddr = vaddr_next) {
> + int psize, pmask, level;
> + unsigned long pfn;
> + pte_t *kpte;
> +
> + kpte = lookup_address(vaddr, &level);
> + if (!kpte || pte_none(*kpte))
> + return;
> +
> + switch (level) {
> + case PG_LEVEL_4K:
> + pfn = pte_pfn(*kpte);
> + break;
> + case PG_LEVEL_2M:
> + pfn = pmd_pfn(*(pmd_t *)kpte);
> + break;
> + case PG_LEVEL_1G:
> + pfn = pud_pfn(*(pud_t *)kpte);
> + break;
> + default:
> + return;
> + }


Is it possible to re-use the code in __set_clr_pte_enc() ?

> +
> + psize = page_level_size(level);
> + pmask = page_level_mask(level);
> +
> + kvm_sev_hypercall3(KVM_HC_PAGE_ENC_STATUS,
> + pfn << PAGE_SHIFT, psize >> PAGE_SHIFT, enc);
> +
> + vaddr_next = (vaddr & pmask) + psize;
> + }
> +}
> +
> static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
> {
> pgprot_t old_prot, new_prot;
> @@ -253,12 +296,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
> static int __init early_set_memory_enc_dec(unsigned long vaddr,
> unsigned long size, bool enc)
> {
> - unsigned long vaddr_end, vaddr_next;
> + unsigned long vaddr_end, vaddr_next, start;
> unsigned long psize, pmask;
> int split_page_size_mask;
> int level, ret;
> pte_t *kpte;
>
> + start = vaddr;
> vaddr_next = vaddr;
> vaddr_end = vaddr + size;
>
> @@ -313,6 +357,8 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
>
> ret = 0;
>
> + set_memory_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT,
> + enc);


If I haven't missed anything, it seems early_set_memory_encrypted()
doesn't have a caller. So is there a possibility that we can end up
calling it in non-SEV context and hence do we need to have the
sev_active() guard here ?

> out:
> __flush_tlb_all();
> return ret;
> @@ -451,6 +497,15 @@ void __init mem_encrypt_init(void)
> if (sev_active())
> static_branch_enable(&sev_enable_key);
>
> +#ifdef CONFIG_PARAVIRT
> + /*
> + * With SEV, we need to make a hypercall when page encryption state is
> + * changed.
> + */
> + if (sev_active())
> + pv_ops.mmu.page_encryption_changed = set_memory_enc_dec_hypercall;
> +#endif
> +
> pr_info("AMD %s active\n",
> sev_active() ? "Secure Encrypted Virtualization (SEV)"
> : "Secure Memory Encryption (SME)");
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index c4aedd00c1ba..86b7804129fc 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -26,6 +26,7 @@
> #include <asm/proto.h>
> #include <asm/memtype.h>
> #include <asm/set_memory.h>
> +#include <asm/paravirt.h>
>
> #include "../mm_internal.h"
>
> @@ -1987,6 +1988,12 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
> */
> cpa_flush(&cpa, 0);
>
> + /* Notify hypervisor that a given memory range is mapped encrypted
> + * or decrypted. The hypervisor will use this information during the
> + * VM migration.
> + */
> + page_encryption_changed(addr, numpages, enc);
> +
> return ret;
> }
>

\
 
 \ /
  Last update: 2020-04-03 23:09    [W:0.780 / U:0.548 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site