lkml.org 
[lkml]   [2021]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/3] x86/sgx: Use sgx_va for the enclave's version array
Date
Use the xarray based sgx_va to maintain VA slots for an enclave. This
effectively removes the need for any complex dynamic behaviour as the
slots are assigned only in during the enclave construction.

Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
---
arch/x86/kernel/cpu/sgx/driver.c | 3 +-
arch/x86/kernel/cpu/sgx/encl.c | 60 ++---------------------------
arch/x86/kernel/cpu/sgx/encl.h | 12 ++----
arch/x86/kernel/cpu/sgx/ioctl.c | 65 ++++++++++++--------------------
arch/x86/kernel/cpu/sgx/main.c | 18 ++-------
5 files changed, 37 insertions(+), 121 deletions(-)

diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
index 8ce6d8371cfb..3fa52d962456 100644
--- a/arch/x86/kernel/cpu/sgx/driver.c
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -26,9 +26,10 @@ static int sgx_open(struct inode *inode, struct file *file)
kref_init(&encl->refcount);
xa_init(&encl->page_array);
mutex_init(&encl->lock);
- INIT_LIST_HEAD(&encl->va_pages);
INIT_LIST_HEAD(&encl->mm_list);
spin_lock_init(&encl->mm_lock);
+ ida_init(&encl->va_index);
+ sgx_va_init(&encl->va);

ret = init_srcu_struct(&encl->srcu);
if (ret) {
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index c8ce3779eaa7..6aef00814afc 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -20,7 +20,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
struct sgx_epc_page *epc_page,
struct sgx_epc_page *secs_page)
{
- unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
+ unsigned long va_offset = 8 * (encl_page->va_index & (SGX_VA_SLOT_COUNT - 1));
struct sgx_encl *encl = encl_page->encl;
struct sgx_pageinfo pginfo;
struct sgx_backing b;
@@ -67,8 +67,6 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
struct sgx_epc_page *secs_page)
{

- unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
- struct sgx_encl *encl = encl_page->encl;
struct sgx_epc_page *epc_page;
int ret;

@@ -82,9 +80,6 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
return ERR_PTR(ret);
}

- sgx_free_va_slot(encl_page->va_page, va_offset);
- list_move(&encl_page->va_page->list, &encl->va_pages);
- encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK;
encl_page->epc_page = epc_page;

return epc_page;
@@ -391,7 +386,6 @@ const struct vm_operations_struct sgx_vm_ops = {
void sgx_encl_release(struct kref *ref)
{
struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
- struct sgx_va_page *va_page;
struct sgx_encl_page *entry;
unsigned long index;

@@ -419,13 +413,8 @@ void sgx_encl_release(struct kref *ref)
encl->secs.epc_page = NULL;
}

- while (!list_empty(&encl->va_pages)) {
- va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
- list);
- list_del(&va_page->list);
- sgx_free_epc_page(va_page->epc_page);
- kfree(va_page);
- }
+ sgx_va_destroy(&encl->va);
+ ida_destroy(&encl->va_index);

if (encl->backing)
fput(encl->backing);
@@ -707,49 +696,6 @@ struct sgx_va_page *sgx_alloc_va_page(void)
return ERR_PTR(ret);
}

-/**
- * sgx_alloc_va_slot - allocate a VA slot
- * @va_page: a &struct sgx_va_page instance
- *
- * Allocates a slot from a &struct sgx_va_page instance.
- *
- * Return: offset of the slot inside the VA page
- */
-unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page)
-{
- int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
-
- if (slot < SGX_VA_SLOT_COUNT)
- set_bit(slot, va_page->slots);
-
- return slot << 3;
-}
-
-/**
- * sgx_free_va_slot - free a VA slot
- * @va_page: a &struct sgx_va_page instance
- * @offset: offset of the slot inside the VA page
- *
- * Frees a slot from a &struct sgx_va_page instance.
- */
-void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset)
-{
- clear_bit(offset >> 3, va_page->slots);
-}
-
-/**
- * sgx_va_page_full - is the VA page full?
- * @va_page: a &struct sgx_va_page instance
- *
- * Return: true if all slots have been taken
- */
-bool sgx_va_page_full(struct sgx_va_page *va_page)
-{
- int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT);
-
- return slot == SGX_VA_SLOT_COUNT;
-}
-
/**
* sgx_va_init() - Initialize a version array (VA)
* @va: A version array.
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 806cf3ff7391..a1d6280029f0 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -8,6 +8,7 @@
#define _X86_ENCL_H

#include <linux/cpumask.h>
+#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mm_types.h>
@@ -24,7 +25,6 @@
struct sgx_va_page {
struct sgx_epc_page *epc_page;
DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
- struct list_head list;
};

struct sgx_va {
@@ -32,9 +32,6 @@ struct sgx_va {
struct mutex lock;
};

-/* 'desc' bits holding the offset in the VA (version array) page. */
-#define SGX_ENCL_PAGE_VA_OFFSET_MASK GENMASK_ULL(11, 3)
-
/* 'desc' bit marking that the page is being reclaimed. */
#define SGX_ENCL_PAGE_BEING_RECLAIMED BIT(3)

@@ -44,6 +41,7 @@ struct sgx_encl_page {
struct sgx_epc_page *epc_page;
struct sgx_encl *encl;
struct sgx_va_page *va_page;
+ int va_index;
};

enum sgx_encl_flags {
@@ -75,7 +73,8 @@ struct sgx_encl {
cpumask_t cpumask;
struct file *backing;
struct kref refcount;
- struct list_head va_pages;
+ struct ida va_index;
+ struct sgx_va va;
unsigned long mm_list_version;
struct list_head mm_list;
spinlock_t mm_lock;
@@ -117,9 +116,6 @@ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
struct sgx_encl_page *page);

struct sgx_va_page *sgx_alloc_va_page(void);
-unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
-void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
-bool sgx_va_page_full(struct sgx_va_page *va_page);

void sgx_va_init(struct sgx_va *va);
void sgx_va_destroy(struct sgx_va *va);
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index db6e2c6ad42d..4a5393b25edd 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -16,47 +16,38 @@
#include "encl.h"
#include "encls.h"

-static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
+static int sgx_encl_alloc_va_slot(struct sgx_encl *encl, struct sgx_encl_page *encl_page)
{
- struct sgx_va_page *va_page = NULL;
-
- BUILD_BUG_ON(SGX_VA_SLOT_COUNT != (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
-
- if (!(encl->page_cnt % SGX_VA_SLOT_COUNT))
- va_page = sgx_alloc_va_page();
+ struct sgx_va_page *va_page;
+ int ret;

- encl->page_cnt++;
+ ret = ida_alloc(&encl->va_index, GFP_KERNEL);
+ if (ret < 0)
+ return ret;

- return va_page;
-}
+ va_page = sgx_va_get(&encl->va, ret);
+ if (IS_ERR(va_page)) {
+ ida_free(&encl->va_index, ret);
+ return PTR_ERR(va_page);
+ }

-static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
-{
- encl->page_cnt--;
+ encl_page->va_page = va_page;
+ encl_page->va_index = ret;

- if (va_page) {
- sgx_free_epc_page(va_page->epc_page);
- list_del(&va_page->list);
- kfree(va_page);
- }
+ return 0;
}

static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
{
struct sgx_epc_page *secs_epc;
- struct sgx_va_page *va_page;
struct sgx_pageinfo pginfo;
struct sgx_secinfo secinfo;
unsigned long encl_size;
struct file *backing;
long ret;

- va_page = sgx_encl_grow(encl);
- if (IS_ERR(va_page))
- return PTR_ERR(va_page);
- else if (va_page)
- list_add(&va_page->list, &encl->va_pages);
- /* else the tail page of the VA page list had free slots. */
+ /* Get a VA slot for swapping the SECS. */
+ ret = sgx_encl_alloc_va_slot(encl, &encl->secs);

/* The extra page goes to SECS. */
encl_size = secs->size + PAGE_SIZE;
@@ -65,7 +56,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
VM_NORESERVE);
if (IS_ERR(backing)) {
ret = PTR_ERR(backing);
- goto err_out_shrink;
+ goto err_out_va;
}

encl->backing = backing;
@@ -112,8 +103,9 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
fput(encl->backing);
encl->backing = NULL;

-err_out_shrink:
- sgx_encl_shrink(encl, va_page);
+err_out_va:
+ sgx_va_put(&encl->va, encl->secs.va_index);
+ ida_free(&encl->va_index, encl->secs.va_index);

return ret;
}
@@ -280,7 +272,6 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
{
struct sgx_encl_page *encl_page;
struct sgx_epc_page *epc_page;
- struct sgx_va_page *va_page;
int ret;

encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags);
@@ -293,22 +284,13 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
return PTR_ERR(epc_page);
}

- va_page = sgx_encl_grow(encl);
- if (IS_ERR(va_page)) {
- ret = PTR_ERR(va_page);
+ ret = sgx_encl_alloc_va_slot(encl, encl_page);
+ if (ret)
goto err_out_free;
- }

mmap_read_lock(current->mm);
mutex_lock(&encl->lock);

- /*
- * Adding to encl->va_pages must be done under encl->lock. Ditto for
- * deleting (via sgx_encl_shrink()) in the error path.
- */
- if (va_page)
- list_add(&va_page->list, &encl->va_pages);
-
/*
* Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
* can't be gracefully unwound, while failure on EADD/EXTEND is limited
@@ -348,7 +330,8 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc));

err_out_unlock:
- sgx_encl_shrink(encl, va_page);
+ sgx_va_put(&encl->va, encl_page->va_index);
+ ida_free(&encl->va_index, encl_page->va_index);
mutex_unlock(&encl->lock);
mmap_read_unlock(current->mm);

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 26dfe6aecd72..5b2e5767419c 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -215,12 +215,9 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,

encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED;

- va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
- list);
- va_offset = sgx_alloc_va_slot(va_page);
+ va_page = encl_page->va_page;
+ va_offset = 8 * (encl_page->va_index & (SGX_VA_SLOT_COUNT - 1));
va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset;
- if (sgx_va_page_full(va_page))
- list_move_tail(&va_page->list, &encl->va_pages);

ret = __sgx_encl_ewb(epc_page, va_slot, backing);
if (ret == SGX_NOT_TRACKED) {
@@ -245,15 +242,8 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
}
}

- if (ret) {
- if (encls_failed(ret))
- ENCLS_WARN(ret, "EWB");
-
- sgx_free_va_slot(va_page, va_offset);
- } else {
- encl_page->desc |= va_offset;
- encl_page->va_page = va_page;
- }
+ if (ret && encls_failed(ret))
+ ENCLS_WARN(ret, "EWB");
}

static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
--
2.30.1
\
 
 \ /
  Last update: 2021-02-24 23:23    [W:0.105 / U:0.256 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site