lkml.org 
[lkml]   [2018]   [Jan]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 08/16] x86/pgtable/32: Allocate 8k page-tables when PTI is enabled
Date
From: Joerg Roedel <jroedel@suse.de>

Allocate a kernel and a user page-table root when PTI is
enabled. Also allocate a full page per root for PAEm because
otherwise the bit to flip in cr3 to switch between them
would be non-constant, which creates a lot of hassle.
Keep that for a later optimization.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
arch/x86/kernel/head_32.S | 23 ++++++++++++++++++-----
arch/x86/mm/pgtable.c | 11 ++++++-----
2 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c29020907886..fc550559bf58 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -512,28 +512,41 @@ ENTRY(initial_code)
ENTRY(setup_once_ref)
.long setup_once

+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define PGD_ALIGN (2 * PAGE_SIZE)
+#define PTI_USER_PGD_FILL 1024
+#else
+#define PGD_ALIGN (PAGE_SIZE)
+#define PTI_USER_PGD_FILL 0
+#endif
/*
* BSS section
*/
__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
+ .align PGD_ALIGN
#ifdef CONFIG_X86_PAE
.globl initial_pg_pmd
initial_pg_pmd:
.fill 1024*KPMDS,4,0
+ .fill PTI_USER_PGD_FILL,4,0
#else
.globl initial_page_table
initial_page_table:
.fill 1024,4,0
+ .fill PTI_USER_PGD_FILL,4,0
#endif
+ .align PGD_ALIGN
initial_pg_fixmap:
.fill 1024,4,0
-.globl empty_zero_page
-empty_zero_page:
- .fill 4096,1,0
+ .fill PTI_USER_PGD_FILL,4,0
.globl swapper_pg_dir
+ .align PGD_ALIGN
swapper_pg_dir:
.fill 1024,4,0
+ .fill PTI_USER_PGD_FILL,4,0
+.globl empty_zero_page
+empty_zero_page:
+ .fill 4096,1,0
EXPORT_SYMBOL(empty_zero_page)

/*
@@ -542,7 +555,7 @@ EXPORT_SYMBOL(empty_zero_page)
#ifdef CONFIG_X86_PAE
__PAGE_ALIGNED_DATA
/* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE
+ .align PGD_ALIGN
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 004abf9ebf12..48abefd95924 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -313,7 +313,7 @@ static int __init pgd_cache_init(void)
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
- if (!SHARED_KERNEL_PMD)
+ if (static_cpu_has(X86_FEATURE_PTI) || !SHARED_KERNEL_PMD)
return 0;

/*
@@ -337,8 +337,9 @@ static inline pgd_t *_pgd_alloc(void)
* If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
* We allocate one page for pgd.
*/
- if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ if (static_cpu_has(X86_FEATURE_PTI) || !SHARED_KERNEL_PMD)
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+ PGD_ALLOCATION_ORDER);

/*
* Now PAE kernel is not running as a Xen domain. We can allocate
@@ -349,8 +350,8 @@ static inline pgd_t *_pgd_alloc(void)

static inline void _pgd_free(pgd_t *pgd)
{
- if (!SHARED_KERNEL_PMD)
- free_page((unsigned long)pgd);
+ if (static_cpu_has(X86_FEATURE_PTI) || !SHARED_KERNEL_PMD)
+ free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
else
kmem_cache_free(pgd_cache, pgd);
}
--
2.13.6
\
 
 \ /
  Last update: 2018-01-16 17:49    [W:0.789 / U:0.184 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site