lkml.org 
[lkml]   [2018]   [May]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH V3 03/17] x86: Add entry trampolines to kcore
    Date
    Without program headers for PTI entry trampoline pages, the trampoline
    virtual addresses do not map to anything.

    Example before:

    sudo gdb --quiet vmlinux /proc/kcore
    Reading symbols from vmlinux...done.
    [New process 1]
    Core was generated by `BOOT_IMAGE=/boot/vmlinuz-4.16.0 root=UUID=a6096b83-b763-4101-807e-f33daff63233'.
    #0 0x0000000000000000 in irq_stack_union ()
    (gdb) x /21ib 0xfffffe0000006000
    0xfffffe0000006000: Cannot access memory at address 0xfffffe0000006000
    (gdb) quit

    After:

    sudo gdb --quiet vmlinux /proc/kcore
    [sudo] password for ahunter:
    Reading symbols from vmlinux...done.
    [New process 1]
    Core was generated by `BOOT_IMAGE=/boot/vmlinuz-4.16.0-fix-4-00005-gd6e65a8b4072 root=UUID=a6096b83-b7'.
    #0 0x0000000000000000 in irq_stack_union ()
    (gdb) x /21ib 0xfffffe0000006000
    0xfffffe0000006000: swapgs
    0xfffffe0000006003: mov %rsp,-0x3e12(%rip) # 0xfffffe00000021f8
    0xfffffe000000600a: xchg %ax,%ax
    0xfffffe000000600c: mov %cr3,%rsp
    0xfffffe000000600f: bts $0x3f,%rsp
    0xfffffe0000006014: and $0xffffffffffffe7ff,%rsp
    0xfffffe000000601b: mov %rsp,%cr3
    0xfffffe000000601e: mov -0x3019(%rip),%rsp # 0xfffffe000000300c
    0xfffffe0000006025: pushq $0x2b
    0xfffffe0000006027: pushq -0x3e35(%rip) # 0xfffffe00000021f8
    0xfffffe000000602d: push %r11
    0xfffffe000000602f: pushq $0x33
    0xfffffe0000006031: push %rcx
    0xfffffe0000006032: push %rdi
    0xfffffe0000006033: mov $0xffffffff91a00010,%rdi
    0xfffffe000000603a: callq 0xfffffe0000006046
    0xfffffe000000603f: pause
    0xfffffe0000006041: lfence
    0xfffffe0000006044: jmp 0xfffffe000000603f
    0xfffffe0000006046: mov %rdi,(%rsp)
    0xfffffe000000604a: retq
    (gdb) quit

    In addition, entry trampolines all map to the same page. Represent that by
    giving the corresponding program headers in kcore the same offset.

    This has the benefit that, when perf tools uses /proc/kcore as a source for
    kernel object code, samples from different CPU trampolines are aggregated
    together. Note, such aggregation is normal for profiling i.e. people want
    to profile the object code, not every different virtual address the object
    code might be mapped to (across different processes for example).

    Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
    ---
    arch/x86/mm/cpu_entry_area.c | 10 ++++++++++
    fs/proc/kcore.c | 7 +++++--
    include/linux/kcore.h | 13 +++++++++++++
    3 files changed, 28 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
    index d1da5cf4b2de..c727a2fbe613 100644
    --- a/arch/x86/mm/cpu_entry_area.c
    +++ b/arch/x86/mm/cpu_entry_area.c
    @@ -3,6 +3,7 @@
    #include <linux/spinlock.h>
    #include <linux/percpu.h>
    #include <linux/kallsyms.h>
    +#include <linux/kcore.h>

    #include <asm/cpu_entry_area.h>
    #include <asm/pgtable.h>
    @@ -14,6 +15,7 @@
    #ifdef CONFIG_X86_64
    static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
    [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
    +static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
    #endif

    struct cpu_entry_area *get_cpu_entry_area(int cpu)
    @@ -147,6 +149,14 @@ static void __init setup_cpu_entry_area(int cpu)

    cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
    + /*
    + * The cpu_entry_area alias addresses are not in the kernel binary
    + * so they do not show up in /proc/kcore normally. This adds entries
    + * for them manually.
    + */
    + kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
    + _entry_trampoline,
    + &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
    #endif
    percpu_setup_debug_store(cpu);
    }
    diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
    index e64ecb9f2720..00282f134336 100644
    --- a/fs/proc/kcore.c
    +++ b/fs/proc/kcore.c
    @@ -383,8 +383,11 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
    phdr->p_type = PT_LOAD;
    phdr->p_flags = PF_R|PF_W|PF_X;
    phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
    - phdr->p_vaddr = (size_t)m->addr;
    - if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
    + if (m->type == KCORE_REMAP)
    + phdr->p_vaddr = (size_t)m->vaddr;
    + else
    + phdr->p_vaddr = (size_t)m->addr;
    + if (m->type == KCORE_RAM || m->type == KCORE_TEXT || m->type == KCORE_REMAP)
    phdr->p_paddr = __pa(m->addr);
    else
    phdr->p_paddr = (elf_addr_t)-1;
    diff --git a/include/linux/kcore.h b/include/linux/kcore.h
    index 80db19d3a505..3a11ce51e137 100644
    --- a/include/linux/kcore.h
    +++ b/include/linux/kcore.h
    @@ -12,11 +12,13 @@ enum kcore_type {
    KCORE_VMEMMAP,
    KCORE_USER,
    KCORE_OTHER,
    + KCORE_REMAP,
    };

    struct kcore_list {
    struct list_head list;
    unsigned long addr;
    + unsigned long vaddr;
    size_t size;
    int type;
    };
    @@ -30,11 +32,22 @@ struct vmcore {

    #ifdef CONFIG_PROC_KCORE
    extern void kclist_add(struct kcore_list *, void *, size_t, int type);
    +static inline
    +void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
    +{
    + m->vaddr = (unsigned long)vaddr;
    + kclist_add(m, addr, sz, KCORE_REMAP);
    +}
    #else
    static inline
    void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
    {
    }
    +
    +static inline
    +void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
    +{
    +}
    #endif

    #endif /* _LINUX_KCORE_H */
    --
    1.9.1
    \
     
     \ /
      Last update: 2018-05-22 13:00    [W:4.323 / U:1.316 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site