lkml.org 
[lkml]   [2016]   [Sep]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/9] x86/entry/head/32: use local labels
Date
Add the local label prefix to all non-function named labels in head_32.S
and entry_32.S. In addition to decluttering the symbol table, it also
will help stack traces to be more sensible. For example, the last
reported function in the idle task stack trace will be startup_32_smp()
instead of is486().

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
---
arch/x86/entry/entry_32.S | 55 ++++++++++++++++++++++++-----------------------
arch/x86/kernel/head_32.S | 32 +++++++++++++--------------
2 files changed, 44 insertions(+), 43 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index b75a8bc..378e912 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -64,7 +64,7 @@
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
# define preempt_stop(clobbers)
-# define resume_kernel restore_all
+# define resume_kernel .Lrestore_all
#endif

.macro TRACE_IRQS_IRET
@@ -255,7 +255,7 @@ ENTRY(ret_from_fork)
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
call syscall_return_slowpath
- jmp restore_all
+ jmp .Lrestore_all

/* kernel thread */
1: movl %edi, %eax
@@ -300,19 +300,19 @@ ENTRY(resume_userspace)
TRACE_IRQS_OFF
movl %esp, %eax
call prepare_exit_to_usermode
- jmp restore_all
+ jmp .Lrestore_all
END(ret_from_exception)

#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
-need_resched:
+.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz restore_all
+ jnz .Lrestore_all
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
+ jz .Lrestore_all
call preempt_schedule_irq
- jmp need_resched
+ jmp .Lneed_resched
END(resume_kernel)
#endif

@@ -333,7 +333,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
*/
ENTRY(xen_sysenter_target)
addl $5*4, %esp /* remove xen-provided frame */
- jmp sysenter_past_esp
+ jmp .Lsysenter_past_esp
#endif

/*
@@ -370,7 +370,7 @@ ENTRY(xen_sysenter_target)
*/
ENTRY(entry_SYSENTER_32)
movl TSS_sysenter_sp0(%esp), %esp
-sysenter_past_esp:
+.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
pushfl /* pt_regs->flags (except IF = 0) */
@@ -501,11 +501,11 @@ ENTRY(entry_INT80_32)
call do_int80_syscall_32
.Lsyscall_32_done:

-restore_all:
+.Lrestore_all:
TRACE_IRQS_IRET
-restore_all_notrace:
+.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX
+ ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX

movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
/*
@@ -517,22 +517,23 @@ restore_all_notrace:
movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
- je ldt_ss # returning to user-space with LDT SS
+ je .Lldt_ss # returning to user-space with LDT SS
#endif
-restore_nocheck:
+.Lrestore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code
-irq_return:
+.Lirq_return:
INTERRUPT_RETURN
+
.section .fixup, "ax"
ENTRY(iret_exc )
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
.previous
- _ASM_EXTABLE(irq_return, iret_exc)
+ _ASM_EXTABLE(.Lirq_return, iret_exc)

#ifdef CONFIG_X86_ESPFIX32
-ldt_ss:
+.Lldt_ss:
/*
* Setup and switch to ESPFIX stack
*
@@ -561,7 +562,7 @@ ldt_ss:
*/
DISABLE_INTERRUPTS(CLBR_EAX)
lss (%esp), %esp /* switch to espfix segment */
- jmp restore_nocheck
+ jmp .Lrestore_nocheck
#endif
ENDPROC(entry_INT80_32)

@@ -881,7 +882,7 @@ ftrace_call:
popl %edx
popl %ecx
popl %eax
-ftrace_ret:
+.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -951,7 +952,7 @@ GLOBAL(ftrace_regs_call)
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
- jmp ftrace_ret
+ jmp .Lftrace_ret

popf
jmp ftrace_stub
@@ -962,7 +963,7 @@ ENTRY(mcount)
jb ftrace_stub /* Paging not enabled yet? */

cmpl $ftrace_stub, ftrace_trace_function
- jnz trace
+ jnz .Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
@@ -975,7 +976,7 @@ ftrace_stub:
ret

/* taken from glibc */
-trace:
+.Ltrace:
pushl %eax
pushl %ecx
pushl %edx
@@ -1114,7 +1115,7 @@ ENTRY(nmi)
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
popl %eax
- je nmi_espfix_stack
+ je .Lnmi_espfix_stack
#endif

pushl %eax # pt_regs->orig_ax
@@ -1130,7 +1131,7 @@ ENTRY(nmi)

/* Not on SYSENTER stack. */
call do_nmi
- jmp restore_all_notrace
+ jmp .Lrestore_all_notrace

.Lnmi_from_sysenter_stack:
/*
@@ -1141,10 +1142,10 @@ ENTRY(nmi)
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
movl %ebp, %esp
- jmp restore_all_notrace
+ jmp .Lrestore_all_notrace

#ifdef CONFIG_X86_ESPFIX32
-nmi_espfix_stack:
+.Lnmi_espfix_stack:
/*
* create the pointer to lss back
*/
@@ -1162,7 +1163,7 @@ nmi_espfix_stack:
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack
- jmp irq_return
+ jmp .Lirq_return
#endif
END(nmi)

diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5f40126..617fba2 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -247,19 +247,19 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
#ifdef CONFIG_PARAVIRT
/* This is can only trip for a broken bootloader... */
cmpw $0x207, pa(boot_params + BP_version)
- jb default_entry
+ jb .Ldefault_entry

/* Paravirt-compatible boot parameters. Look to see what architecture
we're booting under. */
movl pa(boot_params + BP_hardware_subarch), %eax
cmpl $num_subarch_entries, %eax
- jae bad_subarch
+ jae .Lbad_subarch

movl pa(subarch_entries)(,%eax,4), %eax
subl $__PAGE_OFFSET, %eax
jmp *%eax

-bad_subarch:
+.Lbad_subarch:
WEAK(lguest_entry)
WEAK(xen_entry)
/* Unknown implementation; there's really
@@ -269,14 +269,14 @@ WEAK(xen_entry)
__INITDATA

subarch_entries:
- .long default_entry /* normal x86/PC */
+ .long .Ldefault_entry /* normal x86/PC */
.long lguest_entry /* lguest hypervisor */
.long xen_entry /* Xen hypervisor */
- .long default_entry /* Moorestown MID */
+ .long .Ldefault_entry /* Moorestown MID */
num_subarch_entries = (. - subarch_entries) / 4
.previous
#else
- jmp default_entry
+ jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */

#ifdef CONFIG_HOTPLUG_CPU
@@ -316,7 +316,7 @@ ENTRY(startup_32_smp)
call load_ucode_ap
#endif

-default_entry:
+.Ldefault_entry:
#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
X86_CR0_PG)
@@ -346,7 +346,7 @@ default_entry:
pushfl
popl %eax # get EFLAGS
testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
- jz enable_paging # hw disallowed setting of ID bit
+ jz .Lenable_paging # hw disallowed setting of ID bit
# which means no CPUID and no CR4

xorl %eax,%eax
@@ -356,13 +356,13 @@ default_entry:
movl $1,%eax
cpuid
andl $~1,%edx # Ignore CPUID.FPU
- jz enable_paging # No flags or only CPUID.FPU = no CR4
+ jz .Lenable_paging # No flags or only CPUID.FPU = no CR4

movl pa(mmu_cr4_features),%eax
movl %eax,%cr4

testb $X86_CR4_PAE, %al # check if PAE is enabled
- jz enable_paging
+ jz .Lenable_paging

/* Check if extended functions are implemented */
movl $0x80000000, %eax
@@ -370,7 +370,7 @@ default_entry:
/* Value must be in the range 0x80000001 to 0x8000ffff */
subl $0x80000001, %eax
cmpl $(0x8000ffff-0x80000001), %eax
- ja enable_paging
+ ja .Lenable_paging

/* Clear bogus XD_DISABLE bits */
call verify_cpu
@@ -379,7 +379,7 @@ default_entry:
cpuid
/* Execute Disable bit supported? */
btl $(X86_FEATURE_NX & 31), %edx
- jnc enable_paging
+ jnc .Lenable_paging

/* Setup EFER (Extended Feature Enable Register) */
movl $MSR_EFER, %ecx
@@ -389,7 +389,7 @@ default_entry:
/* Make changes effective */
wrmsr

-enable_paging:
+.Lenable_paging:

/*
* Enable paging
@@ -418,7 +418,7 @@ enable_paging:
*/
movb $4,X86 # at least 486
cmpl $-1,X86_CPUID
- je is486
+ je .Lis486

/* get vendor info */
xorl %eax,%eax # call CPUID with 0 -> return vendor ID
@@ -429,7 +429,7 @@ enable_paging:
movl %ecx,X86_VENDOR_ID+8 # last 4 chars

orl %eax,%eax # do we have processor info as well?
- je is486
+ je .Lis486

movl $1,%eax # Use the CPUID instruction to get CPU type
cpuid
@@ -443,7 +443,7 @@ enable_paging:
movb %cl,X86_MASK
movl %edx,X86_CAPABILITY

-is486:
+.Lis486:
movl $0x50022,%ecx # set AM, WP, NE and MP
movl %cr0,%eax
andl $0x80000011,%eax # Save PG,PE,ET
--
2.7.4
\
 
 \ /
  Last update: 2016-09-20 22:11    [W:0.118 / U:0.220 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site