lkml.org 
[lkml]   [2017]   [Nov]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 6/7] x86/asm: Remap the TSS into the cpu entry area
Date
This has a secondary purpose: it puts the entry stack into a region
with a well-controlled layout. A subsequent patch will take
advantage of this to streamline the SYSCALL entry code to be able to
find it more easily.

XXX: This either needs to not happen on 32-bit or we need to fix the 32-bit
entry code.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
arch/x86/include/asm/fixmap.h | 7 +++++++
arch/x86/kernel/cpu/common.c | 33 +++++++++++++++++++++++++++------
arch/x86/power/cpu.c | 11 ++++++-----
3 files changed, 40 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index fbc9b7f4e35e..8a9ba5553cab 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -52,6 +52,13 @@ extern unsigned long __FIXADDR_TOP;
struct cpu_entry_area
{
char gdt[PAGE_SIZE];
+
+ /*
+ * The gdt is just below cpu_tss and thus serves (on x86_64) as a
+ * a read-only guard page for the SYSENTER stack at the bottom
+ * of the TSS region.
+ */
+ struct tss_struct tss;
};

#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ce3b3c79fc0c..fdf8108791ce 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -466,6 +466,16 @@ void load_percpu_segment(int cpu)
load_stack_canary_segment();
}

+static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, int pages,
+ pgprot_t prot)
+{
+ int i;
+
+ for (i = 0; i < pages; i++)
+ __set_fixmap(fixmap_index - i,
+ per_cpu_ptr_to_phys(ptr + i*PAGE_SIZE), prot);
+}
+
/* Setup the fixmap mappings only once per-processor */
static inline void setup_cpu_entry_area(int cpu)
{
@@ -487,6 +497,12 @@ static inline void setup_cpu_entry_area(int cpu)
#endif

__set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
+
+ BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+ set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
+ &per_cpu(cpu_tss, cpu),
+ sizeof(struct tss_struct) / PAGE_SIZE,
+ PAGE_KERNEL);
}

/* Load the original GDT from the per-cpu structure */
@@ -1236,7 +1252,8 @@ void enable_sep_cpu(void)
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);

wrmsr(MSR_IA32_SYSENTER_ESP,
- (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
+ (unsigned long)&get_cpu_entry_area(cpu)->tss +
+ offsetofend(struct tss_struct, SYSENTER_stack),
0);

wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
@@ -1349,6 +1366,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
+ int cpu = smp_processor_id();
+
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);

@@ -1362,7 +1381,7 @@ void syscall_init(void)
*/
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
- (unsigned long)this_cpu_ptr(&cpu_tss) +
+ (unsigned long)&get_cpu_entry_area(cpu)->tss +
offsetofend(struct tss_struct, SYSENTER_stack));
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else
@@ -1572,11 +1591,13 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, me);

+ setup_cpu_entry_area(cpu);
+
/*
* Initialize the TSS. Don't bother initializing sp0, as the initial
* task never enters user mode.
*/
- set_tss_desc(cpu, &t->x86_tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();

load_mm_ldt(&init_mm);
@@ -1589,7 +1610,6 @@ void cpu_init(void)
if (is_uv_system())
uv_cpu_init();

- setup_cpu_entry_area(cpu);
load_fixmap_gdt(cpu);
}

@@ -1630,11 +1650,13 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, curr);

+ setup_cpu_entry_area(cpu);
+
/*
* Initialize the TSS. Don't bother initializing sp0, as the initial
* task never enters user mode.
*/
- set_tss_desc(cpu, &t->x86_tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();

load_mm_ldt(&init_mm);
@@ -1651,7 +1673,6 @@ void cpu_init(void)

fpu__init_cpu();

- setup_cpu_entry_area(cpu);
load_fixmap_gdt(cpu);
}
#endif
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 50593e138281..04d5157fe7f8 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -160,18 +160,19 @@ static void do_fpu_end(void)
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
- struct tss_struct *t = &per_cpu(cpu_tss, cpu);
#ifdef CONFIG_X86_64
struct desc_struct *desc = get_cpu_gdt_rw(cpu);
tss_desc tss;
#endif

/*
- * This just modifies memory; should not be necessary. But... This is
- * necessary, because 386 hardware has concept of busy TSS or some
- * similar stupidity.
+ * We need to reload TR, which requires that we change the
+ * GDT entry to indicate "available" first.
+ *
+ * XXX: This could probably all be replaced by a call to
+ * force_reload_TR().
*/
- set_tss_desc(cpu, &t->x86_tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);

#ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
--
2.13.6
\
 
 \ /
  Last update: 2017-11-11 05:06    [W:0.132 / U:0.192 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site