lkml.org 
[lkml]   [2015]   [Mar]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 7/9] x86/asm/entry/32: tidy up some instructions
Date
After TESTs, use logically correct JZ mnemonic instead of JE
(this doesn't change code).

Tidy up CMPW insns:

Modern CPUs are not good with 16-bit operations.
The instructions with 16-bit immediates are especially bad,
on many CPUs they cause length changing prefix stall
in the decoders, costing ~6 cycles to recover.

Replace CMPWs with CMPLs.
Of these, for form with 8-bit sign-extended immediates
it is a win because they are smaller now
(no 0x66 prefix anymore);
ones with 16-bit immediates are faster.

Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
CC: Linus Torvalds <torvalds@linux-foundation.org>
CC: Steven Rostedt <rostedt@goodmis.org>
CC: Ingo Molnar <mingo@kernel.org>
CC: Borislav Petkov <bp@alien8.de>
CC: "H. Peter Anvin" <hpa@zytor.com>
CC: Andy Lutomirski <luto@amacapital.net>
CC: Oleg Nesterov <oleg@redhat.com>
CC: Frederic Weisbecker <fweisbec@gmail.com>
CC: Alexei Starovoitov <ast@plumgrid.com>
CC: Will Drewry <wad@chromium.org>
CC: Kees Cook <keescook@chromium.org>
CC: x86@kernel.org
CC: linux-kernel@vger.kernel.org
---
arch/x86/kernel/entry_32.S | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 4c8cc34..9a31d5e 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -432,7 +432,7 @@ sysenter_after_call:
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx
- jne sysexit_audit
+ jnz sysexit_audit
sysenter_exit:
/* if something modifies registers it must also disable sysexit */
movl PT_EIP(%esp), %edx
@@ -460,7 +460,7 @@ sysenter_audit:

sysexit_audit:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
- jne syscall_exit_work
+ jnz syscall_exit_work
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY)
movl %eax,%edx /* second arg, syscall return value */
@@ -472,7 +472,7 @@ sysexit_audit:
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
- jne syscall_exit_work
+ jnz syscall_exit_work
movl PT_EAX(%esp),%eax /* reload syscall return value */
jmp sysenter_exit
#endif
@@ -510,7 +510,7 @@ syscall_exit:
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx # current->work
- jne syscall_exit_work
+ jnz syscall_exit_work

restore_all:
TRACE_IRQS_IRET
@@ -612,7 +612,7 @@ work_notifysig: # deal with pending signals and
#ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
movl %esp, %eax
- jne work_notifysig_v86 # returning to kernel-space or
+ jnz work_notifysig_v86 # returning to kernel-space or
# vm86-space
1:
#else
@@ -708,7 +708,7 @@ END(sysenter_badsys)
#ifdef CONFIG_X86_ESPFIX32
movl %ss, %eax
/* see if on espfix stack */
- cmpw $__ESPFIX_SS, %ax
+ cmpl $__ESPFIX_SS, %eax
jne 27f
movl $__KERNEL_DS, %eax
movl %eax, %ds
@@ -1275,7 +1275,7 @@ END(page_fault)
* the instruction that would have done it for sysenter.
*/
.macro FIX_STACK offset ok label
- cmpw $__KERNEL_CS, 4(%esp)
+ cmpl $__KERNEL_CS, 4(%esp)
jne \ok
\label:
movl TSS_sysenter_sp0 + \offset(%esp), %esp
@@ -1318,7 +1318,7 @@ ENTRY(nmi)
#ifdef CONFIG_X86_ESPFIX32
pushl_cfi %eax
movl %ss, %eax
- cmpw $__ESPFIX_SS, %ax
+ cmpl $__ESPFIX_SS, %eax
popl_cfi %eax
je nmi_espfix_stack
#endif
@@ -1352,7 +1352,7 @@ nmi_stack_fixup:

nmi_debug_stack_check:
/* We have a RING0_INT_FRAME here */
- cmpw $__KERNEL_CS,16(%esp)
+ cmpl $__KERNEL_CS,16(%esp)
jne nmi_stack_correct
cmpl $debug,(%esp)
jb nmi_stack_correct
--
1.8.1.4


\
 
 \ /
  Last update: 2015-03-31 19:41    [W:0.311 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site