lkml.org 
[lkml]   [2016]   [Aug]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v2 12/20] x86: Add support for changing memory encryption attribute
Date
This patch adds support to be change the memory encryption attribute for
one or more memory pages.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
arch/x86/include/asm/cacheflush.h | 3 +
arch/x86/include/asm/mem_encrypt.h | 13 ++++++
arch/x86/mm/mem_encrypt.c | 43 +++++++++++++++++++++
arch/x86/mm/pageattr.c | 75 ++++++++++++++++++++++++++++++++++++
4 files changed, 134 insertions(+)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 61518cf..bfb08e5 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -13,6 +13,7 @@
* Executability : eXeutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
+ * Encryption : ENCrypted, DECrypted
*
* Within a category, the attributes are mutually exclusive.
*
@@ -48,6 +49,8 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
+int set_memory_enc(unsigned long addr, int numpages);
+int set_memory_dec(unsigned long addr, int numpages);

int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 2785493..5616ed1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -23,6 +23,9 @@ extern unsigned long sme_me_mask;

u8 sme_get_me_loss(void);

+int sme_set_mem_enc(void *vaddr, unsigned long size);
+int sme_set_mem_dec(void *vaddr, unsigned long size);
+
void __init sme_early_mem_enc(resource_size_t paddr,
unsigned long size);
void __init sme_early_mem_dec(resource_size_t paddr,
@@ -44,6 +47,16 @@ static inline u8 sme_get_me_loss(void)
return 0;
}

+static inline int sme_set_mem_enc(void *vaddr, unsigned long size)
+{
+ return 0;
+}
+
+static inline int sme_set_mem_dec(void *vaddr, unsigned long size)
+{
+ return 0;
+}
+
static inline void __init sme_early_mem_enc(resource_size_t paddr,
unsigned long size)
{
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index f35a646..b0f39c5 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -14,12 +14,55 @@
#include <linux/mm.h>

#include <asm/mem_encrypt.h>
+#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>

/* Buffer used for early in-place encryption by BSP, no locking needed */
static char me_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);

+int sme_set_mem_enc(void *vaddr, unsigned long size)
+{
+ unsigned long addr, numpages;
+
+ if (!sme_me_mask)
+ return 0;
+
+ addr = (unsigned long)vaddr & PAGE_MASK;
+ numpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ /*
+ * The set_memory_xxx functions take an integer for numpages, make
+ * sure it doesn't exceed that.
+ */
+ if (numpages > INT_MAX)
+ return -EINVAL;
+
+ return set_memory_enc(addr, numpages);
+}
+EXPORT_SYMBOL_GPL(sme_set_mem_enc);
+
+int sme_set_mem_dec(void *vaddr, unsigned long size)
+{
+ unsigned long addr, numpages;
+
+ if (!sme_me_mask)
+ return 0;
+
+ addr = (unsigned long)vaddr & PAGE_MASK;
+ numpages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ /*
+ * The set_memory_xxx functions take an integer for numpages, make
+ * sure it doesn't exceed that.
+ */
+ if (numpages > INT_MAX)
+ return -EINVAL;
+
+ return set_memory_dec(addr, numpages);
+}
+EXPORT_SYMBOL_GPL(sme_set_mem_dec);
+
/*
* This routine does not change the underlying encryption setting of the
* page(s) that map this memory. It assumes that eventually the memory is
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 72c292d..0ba9382 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1728,6 +1728,81 @@ int set_memory_4k(unsigned long addr, int numpages)
__pgprot(0), 1, 0, NULL);
}

+static int __set_memory_enc_dec(struct cpa_data *cpa)
+{
+ unsigned long addr;
+ int numpages;
+ int ret;
+
+ if (*cpa->vaddr & ~PAGE_MASK) {
+ *cpa->vaddr &= PAGE_MASK;
+
+ /* People should not be passing in unaligned addresses */
+ WARN_ON_ONCE(1);
+ }
+
+ addr = *cpa->vaddr;
+ numpages = cpa->numpages;
+
+ /* Must avoid aliasing mappings in the highmem code */
+ kmap_flush_unused();
+ vm_unmap_aliases();
+
+ ret = __change_page_attr_set_clr(cpa, 1);
+
+ /* Check whether we really changed something */
+ if (!(cpa->flags & CPA_FLUSHTLB))
+ goto out;
+
+ /*
+ * On success we use CLFLUSH, when the CPU supports it to
+ * avoid the WBINVD.
+ */
+ if (!ret && static_cpu_has(X86_FEATURE_CLFLUSH))
+ cpa_flush_range(addr, numpages, 1);
+ else
+ cpa_flush_all(1);
+
+out:
+ return ret;
+}
+
+int set_memory_enc(unsigned long addr, int numpages)
+{
+ struct cpa_data cpa;
+
+ if (!sme_me_mask)
+ return 0;
+
+ memset(&cpa, 0, sizeof(cpa));
+ cpa.vaddr = &addr;
+ cpa.numpages = numpages;
+ cpa.mask_set = __pgprot(_PAGE_ENC);
+ cpa.mask_clr = __pgprot(0);
+ cpa.pgd = init_mm.pgd;
+
+ return __set_memory_enc_dec(&cpa);
+}
+EXPORT_SYMBOL(set_memory_enc);
+
+int set_memory_dec(unsigned long addr, int numpages)
+{
+ struct cpa_data cpa;
+
+ if (!sme_me_mask)
+ return 0;
+
+ memset(&cpa, 0, sizeof(cpa));
+ cpa.vaddr = &addr;
+ cpa.numpages = numpages;
+ cpa.mask_set = __pgprot(0);
+ cpa.mask_clr = __pgprot(_PAGE_ENC);
+ cpa.pgd = init_mm.pgd;
+
+ return __set_memory_enc_dec(&cpa);
+}
+EXPORT_SYMBOL(set_memory_dec);
+
int set_pages_uc(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
\
 
 \ /
  Last update: 2016-09-17 09:57    [W:0.723 / U:0.236 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site