lkml.org 
[lkml]   [2021]   [Nov]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH V6 47/49] x86/entry: Remove the unused ASM macros
    Date
    From: Lai Jiangshan <laijs@linux.alibaba.com>

    They are implemented and used in C code. The ASM version is not needed
    any more.

    FENCE_SWAPGS_USER_ENTRY is not removed because it is still being used
    in the nmi userspace path. It could be possible to be removed in
    future entry code enhancement.

    Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
    ---
    arch/x86/entry/calling.h | 99 ----------------------------------------
    1 file changed, 99 deletions(-)

    diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
    index 9065c31d2875..d42012fc694d 100644
    --- a/arch/x86/entry/calling.h
    +++ b/arch/x86/entry/calling.h
    @@ -210,53 +210,6 @@ For 32-bit we have the following conventions - kernel is built with
    popq %rax
    .endm

    -.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
    - ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
    - movq %cr3, \scratch_reg
    - movq \scratch_reg, \save_reg
    - /*
    - * Test the user pagetable bit. If set, then the user page tables
    - * are active. If clear CR3 already has the kernel page table
    - * active.
    - */
    - bt $PTI_USER_PGTABLE_BIT, \scratch_reg
    - jnc .Ldone_\@
    -
    - ADJUST_KERNEL_CR3 \scratch_reg
    - movq \scratch_reg, %cr3
    -
    -.Ldone_\@:
    -.endm
    -
    -.macro RESTORE_CR3 scratch_reg:req save_reg:req
    - ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
    -
    - /* No need to restore when the saved CR3 is kernel CR3. */
    - bt $PTI_USER_PGTABLE_BIT, \save_reg
    - jnc .Lend_\@
    -
    - ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
    -
    - /*
    - * Check if there's a pending flush for the user ASID we're
    - * about to set.
    - */
    - movq \save_reg, \scratch_reg
    - andq $(0x7FF), \scratch_reg
    - bt \scratch_reg, THIS_CPU_user_pcid_flush_mask
    - jnc .Lnoflush_\@
    -
    - btr \scratch_reg, THIS_CPU_user_pcid_flush_mask
    - jmp .Lwrcr3_\@
    -
    -.Lnoflush_\@:
    - SET_NOFLUSH_BIT \save_reg
    -
    -.Lwrcr3_\@:
    - movq \save_reg, %cr3
    -.Lend_\@:
    -.endm
    -
    #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */

    .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
    @@ -265,10 +218,6 @@ For 32-bit we have the following conventions - kernel is built with
    .endm
    .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
    .endm
    -.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
    -.endm
    -.macro RESTORE_CR3 scratch_reg:req save_reg:req
    -.endm

    #endif

    @@ -277,17 +226,10 @@ For 32-bit we have the following conventions - kernel is built with
    *
    * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
    * prevent a speculative swapgs when coming from kernel space.
    - *
    - * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
    - * to prevent the swapgs from getting speculatively skipped when coming from
    - * user space.
    */
    .macro FENCE_SWAPGS_USER_ENTRY
    ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
    .endm
    -.macro FENCE_SWAPGS_KERNEL_ENTRY
    - ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
    -.endm

    .macro STACKLEAK_ERASE_NOCLOBBER
    #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
    @@ -297,12 +239,6 @@ For 32-bit we have the following conventions - kernel is built with
    #endif
    .endm

    -.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req
    - rdgsbase \save_reg
    - GET_PERCPU_BASE \scratch_reg
    - wrgsbase \scratch_reg
    -.endm
    -
    #else /* CONFIG_X86_64 */
    # undef UNWIND_HINT_IRET_REGS
    # define UNWIND_HINT_IRET_REGS
    @@ -313,38 +249,3 @@ For 32-bit we have the following conventions - kernel is built with
    call stackleak_erase
    #endif
    .endm
    -
    -#ifdef CONFIG_SMP
    -
    -/*
    - * CPU/node NR is loaded from the limit (size) field of a special segment
    - * descriptor entry in GDT.
    - */
    -.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
    - movq $__CPUNODE_SEG, \reg
    - lsl \reg, \reg
    -.endm
    -
    -/*
    - * Fetch the per-CPU GSBASE value for this processor and put it in @reg.
    - * We normally use %gs for accessing per-CPU data, but we are setting up
    - * %gs here and obviously can not use %gs itself to access per-CPU data.
    - *
    - * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and
    - * may not restore the host's value until the CPU returns to userspace.
    - * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives
    - * while running KVM's run loop.
    - */
    -.macro GET_PERCPU_BASE reg:req
    - LOAD_CPU_AND_NODE_SEG_LIMIT \reg
    - andq $VDSO_CPUNODE_MASK, \reg
    - movq __per_cpu_offset(, \reg, 8), \reg
    -.endm
    -
    -#else
    -
    -.macro GET_PERCPU_BASE reg:req
    - movq pcpu_unit_offsets(%rip), \reg
    -.endm
    -
    -#endif /* CONFIG_SMP */
    --
    2.19.1.6.gb485710b
    \
     
     \ /
      Last update: 2021-11-26 11:30    [W:4.270 / U:0.088 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site