lkml.org 
[lkml]   [2017]   [Nov]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[PATCH 04/23] x86, kaiser: mark per-cpu data structures required for entry/exit
    From
    Date

    From: Dave Hansen <dave.hansen@linux.intel.com>

    These patches are based on work from a team at Graz University of
    Technology posted here: https://github.com/IAIK/KAISER

    The KAISER approach keeps two copies of the page tables: one for running
    in the kernel and one for running userspace. But, there are a few
    structures that are needed for switching in and out of the kernel and
    a good subset of *those* are per-cpu data.

    Here's a short summary of the things mapped to userspace:
    * The gdt_page's virtual address is pointed to by the LGDT instruction.
    It is needed to define the segments. Deeply required by CPU to run.
    * cpu_tss tells the CPU, among other things, where the new stacks are
    after user<->kernel transitions. Needed by the CPU to make ring
    transitions.
    * exception_stacks are needed at interrupt and exception entry
    so that there is storage for, among other things, some temporary
    space to permit clobbering a register to load the kernel CR3.

    Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
    Cc: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
    Cc: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
    Cc: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
    Cc: Richard Fellner <richard.fellner@student.tugraz.at>
    Cc: Andy Lutomirski <luto@kernel.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Kees Cook <keescook@google.com>
    Cc: Hugh Dickins <hughd@google.com>
    Cc: x86@kernel.org
    ---

    b/arch/x86/include/asm/desc.h | 2 +-
    b/arch/x86/include/asm/processor.h | 2 +-
    b/arch/x86/kernel/cpu/common.c | 4 ++--
    b/arch/x86/kernel/process.c | 2 +-
    4 files changed, 5 insertions(+), 5 deletions(-)

    diff -puN arch/x86/include/asm/desc.h~kaiser-prep-x86-percpu-user-mapped arch/x86/include/asm/desc.h
    --- a/arch/x86/include/asm/desc.h~kaiser-prep-x86-percpu-user-mapped 2017-11-22 15:45:45.913619747 -0800
    +++ b/arch/x86/include/asm/desc.h 2017-11-22 15:45:45.923619747 -0800
    @@ -46,7 +46,7 @@ struct gdt_page {
    struct desc_struct gdt[GDT_ENTRIES];
    } __attribute__((aligned(PAGE_SIZE)));

    -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
    +DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page);

    /* Provide the original GDT */
    static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
    diff -puN arch/x86/include/asm/processor.h~kaiser-prep-x86-percpu-user-mapped arch/x86/include/asm/processor.h
    --- a/arch/x86/include/asm/processor.h~kaiser-prep-x86-percpu-user-mapped 2017-11-22 15:45:45.915619747 -0800
    +++ b/arch/x86/include/asm/processor.h 2017-11-22 15:45:45.923619747 -0800
    @@ -356,7 +356,7 @@ struct tss_struct {
    unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
    } __attribute__((__aligned__(PAGE_SIZE)));

    -DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
    +DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss);

    /*
    * sizeof(unsigned long) coming from an extra "long" at the end
    diff -puN arch/x86/kernel/cpu/common.c~kaiser-prep-x86-percpu-user-mapped arch/x86/kernel/cpu/common.c
    --- a/arch/x86/kernel/cpu/common.c~kaiser-prep-x86-percpu-user-mapped 2017-11-22 15:45:45.917619747 -0800
    +++ b/arch/x86/kernel/cpu/common.c 2017-11-22 15:45:45.924619747 -0800
    @@ -98,7 +98,7 @@ static const struct cpu_dev default_cpu

    static const struct cpu_dev *this_cpu = &default_cpu;

    -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
    +DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(struct gdt_page, gdt_page) = { .gdt = {
    #ifdef CONFIG_X86_64
    /*
    * We need valid kernel segments for data and code in long mode too
    @@ -517,7 +517,7 @@ static const unsigned int exception_stac
    [DEBUG_STACK - 1] = DEBUG_STKSZ
    };

    -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
    +DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(char, exception_stacks
    [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
    #endif

    diff -puN arch/x86/kernel/process.c~kaiser-prep-x86-percpu-user-mapped arch/x86/kernel/process.c
    --- a/arch/x86/kernel/process.c~kaiser-prep-x86-percpu-user-mapped 2017-11-22 15:45:45.919619747 -0800
    +++ b/arch/x86/kernel/process.c 2017-11-22 15:45:45.924619747 -0800
    @@ -47,7 +47,7 @@
    * section. Since TSS's are completely CPU-local, we want them
    * on exact cacheline boundaries, to eliminate cacheline ping-pong.
    */
    -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
    +__visible DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss) = {
    .x86_tss = {
    /*
    * .sp0 is only used when entering ring 0 from a lower
    _
    \
     
     \ /
      Last update: 2017-11-23 01:46    [W:4.107 / U:0.232 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site