lkml.org 
[lkml]   [2017]   [Nov]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 10/43] x86/entry: Remap the TSS into the cpu entry area
    Date
    From: Andy Lutomirski <luto@kernel.org>

    This has a secondary purpose: it puts the entry stack into a region
    with a well-controlled layout. A subsequent patch will take
    advantage of this to streamline the SYSCALL entry code to be able to
    find it more easily.

    Signed-off-by: Andy Lutomirski <luto@kernel.org>
    Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
    Cc: Borislav Petkov <bp@alien8.de>
    Cc: Borislav Petkov <bpetkov@suse.de>
    Cc: Brian Gerst <brgerst@gmail.com>
    Cc: Dave Hansen <dave.hansen@intel.com>
    Cc: Denys Vlasenko <dvlasenk@redhat.com>
    Cc: H. Peter Anvin <hpa@zytor.com>
    Cc: Josh Poimboeuf <jpoimboe@redhat.com>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Link: http://lkml.kernel.org/r/cdcba7e1e82122461b3ca36bb3ef6713ba605e35.1511497875.git.luto@kernel.org
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    arch/x86/entry/entry_32.S | 6 ++++--
    arch/x86/include/asm/fixmap.h | 7 +++++++
    arch/x86/kernel/asm-offsets.c | 3 +++
    arch/x86/kernel/cpu/common.c | 38 ++++++++++++++++++++++++++++++++------
    arch/x86/kernel/dumpstack.c | 3 ++-
    arch/x86/power/cpu.c | 11 ++++++-----
    6 files changed, 54 insertions(+), 14 deletions(-)

    diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
    index 4838037f97f6..0ab316c46806 100644
    --- a/arch/x86/entry/entry_32.S
    +++ b/arch/x86/entry/entry_32.S
    @@ -941,7 +941,8 @@ ENTRY(debug)
    movl %esp, %eax # pt_regs pointer

    /* Are we currently on the SYSENTER stack? */
    - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
    + movl PER_CPU_VAR(cpu_entry_area), %ecx
    + addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
    subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
    cmpl $SIZEOF_SYSENTER_stack, %ecx
    jb .Ldebug_from_sysenter_stack
    @@ -984,7 +985,8 @@ ENTRY(nmi)
    movl %esp, %eax # pt_regs pointer

    /* Are we currently on the SYSENTER stack? */
    - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
    + movl PER_CPU_VAR(cpu_entry_area), %ecx
    + addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
    subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
    cmpl $SIZEOF_SYSENTER_stack, %ecx
    jb .Lnmi_from_sysenter_stack
    diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
    index 0f4c92f02968..3a42da14c2cb 100644
    --- a/arch/x86/include/asm/fixmap.h
    +++ b/arch/x86/include/asm/fixmap.h
    @@ -51,6 +51,13 @@ extern unsigned long __FIXADDR_TOP;
    */
    struct cpu_entry_area {
    char gdt[PAGE_SIZE];
    +
    + /*
    + * The GDT is just below cpu_tss and thus serves (on x86_64) as a
    + * a read-only guard page for the SYSENTER stack at the bottom
    + * of the TSS region.
    + */
    + struct tss_struct tss;
    };

    #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
    diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
    index b275863128eb..55858b277cf6 100644
    --- a/arch/x86/kernel/asm-offsets.c
    +++ b/arch/x86/kernel/asm-offsets.c
    @@ -98,4 +98,7 @@ void common(void) {
    OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
    /* Size of SYSENTER_stack */
    DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
    +
    + /* Layout info for cpu_entry_area */
    + OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
    }
    diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
    index d173f6013467..c67742df569a 100644
    --- a/arch/x86/kernel/cpu/common.c
    +++ b/arch/x86/kernel/cpu/common.c
    @@ -490,6 +490,19 @@ void load_percpu_segment(int cpu)
    load_stack_canary_segment();
    }

    +static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, int pages, pgprot_t prot)
    +{
    + int i;
    +
    + for (i = 0; i < pages; i++)
    + __set_fixmap(fixmap_index - i, per_cpu_ptr_to_phys(ptr + i*PAGE_SIZE), prot);
    +}
    +
    +#ifdef CONFIG_X86_32
    +/* The 32-bit entry code needs to find cpu_entry_area. */
    +DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
    +#endif
    +
    /* Setup the fixmap mappings only once per-processor */
    static inline void setup_cpu_entry_area(int cpu)
    {
    @@ -531,7 +544,15 @@ static inline void setup_cpu_entry_area(int cpu)
    */
    BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
    offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
    + BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
    + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
    + &per_cpu(cpu_tss, cpu),
    + sizeof(struct tss_struct) / PAGE_SIZE,
    + PAGE_KERNEL);

    +#ifdef CONFIG_X86_32
    + this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu));
    +#endif
    }

    /* Load the original GDT from the per-cpu structure */
    @@ -1282,7 +1303,8 @@ void enable_sep_cpu(void)
    wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);

    wrmsr(MSR_IA32_SYSENTER_ESP,
    - (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
    + (unsigned long)&get_cpu_entry_area(cpu)->tss +
    + offsetofend(struct tss_struct, SYSENTER_stack),
    0);

    wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
    @@ -1395,6 +1417,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
    /* May not be marked __init: used by software suspend */
    void syscall_init(void)
    {
    + int cpu = smp_processor_id();
    +
    wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
    wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);

    @@ -1408,7 +1432,7 @@ void syscall_init(void)
    */
    wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
    wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
    - (unsigned long)this_cpu_ptr(&cpu_tss) +
    + (unsigned long)&get_cpu_entry_area(cpu)->tss +
    offsetofend(struct tss_struct, SYSENTER_stack));
    wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
    #else
    @@ -1618,11 +1642,13 @@ void cpu_init(void)
    initialize_tlbstate_and_flush();
    enter_lazy_tlb(&init_mm, me);

    + setup_cpu_entry_area(cpu);
    +
    /*
    * Initialize the TSS. Don't bother initializing sp0, as the initial
    * task never enters user mode.
    */
    - set_tss_desc(cpu, &t->x86_tss);
    + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
    load_TR_desc();

    load_mm_ldt(&init_mm);
    @@ -1635,7 +1661,6 @@ void cpu_init(void)
    if (is_uv_system())
    uv_cpu_init();

    - setup_cpu_entry_area(cpu);
    load_fixmap_gdt(cpu);
    }

    @@ -1676,11 +1701,13 @@ void cpu_init(void)
    initialize_tlbstate_and_flush();
    enter_lazy_tlb(&init_mm, curr);

    + setup_cpu_entry_area(cpu);
    +
    /*
    * Initialize the TSS. Don't bother initializing sp0, as the initial
    * task never enters user mode.
    */
    - set_tss_desc(cpu, &t->x86_tss);
    + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
    load_TR_desc();

    load_mm_ldt(&init_mm);
    @@ -1697,7 +1724,6 @@ void cpu_init(void)

    fpu__init_cpu();

    - setup_cpu_entry_area(cpu);
    load_fixmap_gdt(cpu);
    }
    #endif
    diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
    index a8aa70c05489..bb61919c9335 100644
    --- a/arch/x86/kernel/dumpstack.c
    +++ b/arch/x86/kernel/dumpstack.c
    @@ -45,7 +45,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,

    bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
    {
    - struct tss_struct *tss = this_cpu_ptr(&cpu_tss);
    + int cpu = smp_processor_id();
    + struct tss_struct *tss = &get_cpu_entry_area(cpu)->tss;

    /* Treat the canary as part of the stack for unwinding purposes. */
    void *begin = &tss->SYSENTER_stack_canary;
    diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
    index 50593e138281..04d5157fe7f8 100644
    --- a/arch/x86/power/cpu.c
    +++ b/arch/x86/power/cpu.c
    @@ -160,18 +160,19 @@ static void do_fpu_end(void)
    static void fix_processor_context(void)
    {
    int cpu = smp_processor_id();
    - struct tss_struct *t = &per_cpu(cpu_tss, cpu);
    #ifdef CONFIG_X86_64
    struct desc_struct *desc = get_cpu_gdt_rw(cpu);
    tss_desc tss;
    #endif

    /*
    - * This just modifies memory; should not be necessary. But... This is
    - * necessary, because 386 hardware has concept of busy TSS or some
    - * similar stupidity.
    + * We need to reload TR, which requires that we change the
    + * GDT entry to indicate "available" first.
    + *
    + * XXX: This could probably all be replaced by a call to
    + * force_reload_TR().
    */
    - set_tss_desc(cpu, &t->x86_tss);
    + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);

    #ifdef CONFIG_X86_64
    memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
    --
    2.14.1
    \
     
     \ /
      Last update: 2017-11-24 10:28    [W:3.208 / U:0.268 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site