lkml.org 
[lkml]   [2017]   [Aug]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 2/4] x86/asm: Convert some inline asm positional operands to named operands
Date
Convert some inline asm positional operands (e.g., %0, %1) to named
operands.

This is needed in preparation for the new ASM_CALL() macro, which won't
support the positional operands.

This also is generally a good idea anyway, as it makes the code more
readable and robust.

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
---
arch/x86/include/asm/apic.h | 6 +--
arch/x86/include/asm/cmpxchg_32.h | 4 +-
arch/x86/include/asm/mshyperv.h | 19 +++----
arch/x86/include/asm/page_64.h | 5 +-
arch/x86/include/asm/paravirt_types.h | 4 +-
arch/x86/include/asm/percpu.h | 11 ++--
arch/x86/include/asm/processor.h | 20 ++++----
arch/x86/include/asm/rwsem.h | 94 ++++++++++++++++++-----------------
arch/x86/include/asm/special_insns.h | 6 +--
arch/x86/include/asm/uaccess.h | 6 +--
arch/x86/include/asm/uaccess_64.h | 16 +++---
11 files changed, 99 insertions(+), 92 deletions(-)

diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 5f01671c68f2..5a7e0eb38350 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -93,9 +93,9 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);

- alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
- ASM_OUTPUT2("=r" (v), "=m" (*addr)),
- ASM_OUTPUT2("0" (v), "m" (*addr)));
+ alternative_io("movl %[val], %P[reg]", "xchgl %[val], %P[reg]",
+ X86_BUG_11AP,
+ ASM_OUTPUT2([val] "+r" (v), [reg] "+m" (*addr)));
}

static inline u32 native_apic_mem_read(u32 reg)
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index e4959d023af8..8154a317899f 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -85,7 +85,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
- "S" ((ptr)), "0" (__old), \
+ "S" ((ptr)), "A" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
@@ -101,7 +101,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
"cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
- "S" ((ptr)), "0" (__old), \
+ "S" ((ptr)), "A" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 0d4b01c5e438..d0675d58fa32 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -183,11 +183,12 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
if (!hv_hypercall_pg)
return U64_MAX;

- __asm__ __volatile__("mov %4, %%r8\n"
- "call *%5"
+ __asm__ __volatile__("mov %[out], %%r8\n"
+ "call *%[pg]"
: "=a" (hv_status), "+r" (__sp),
"+c" (control), "+d" (input_address)
- : "r" (output_address), "m" (hv_hypercall_pg)
+ : [out] "r" (output_address),
+ [pg] "m" (hv_hypercall_pg)
: "cc", "memory", "r8", "r9", "r10", "r11");
#else
u32 input_address_hi = upper_32_bits(input_address);
@@ -198,13 +199,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
if (!hv_hypercall_pg)
return U64_MAX;

- __asm__ __volatile__("call *%7"
+ __asm__ __volatile__("call *%[pg]"
: "=A" (hv_status),
"+c" (input_address_lo), "+r" (__sp)
: "A" (control),
"b" (input_address_hi),
"D"(output_address_hi), "S"(output_address_lo),
- "m" (hv_hypercall_pg)
+ [pg] "m" (hv_hypercall_pg)
: "cc", "memory");
#endif /* !x86_64 */
return hv_status;
@@ -226,10 +227,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)

#ifdef CONFIG_X86_64
{
- __asm__ __volatile__("call *%4"
+ __asm__ __volatile__("call *%[pg]"
: "=a" (hv_status), "+r" (__sp),
"+c" (control), "+d" (input1)
- : "m" (hv_hypercall_pg)
+ : [pg] "m" (hv_hypercall_pg)
: "cc", "r8", "r9", "r10", "r11");
}
#else
@@ -237,13 +238,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
u32 input1_hi = upper_32_bits(input1);
u32 input1_lo = lower_32_bits(input1);

- __asm__ __volatile__ ("call *%5"
+ __asm__ __volatile__ ("call *%[pg]"
: "=A"(hv_status),
"+c"(input1_lo),
"+r"(__sp)
: "A" (control),
"b" (input1_hi),
- "m" (hv_hypercall_pg)
+ [pg] "m" (hv_hypercall_pg)
: "cc", "edi", "esi");
}
#endif
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index b50df06ad251..f7dbe752f80d 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -44,9 +44,8 @@ static inline void clear_page(void *page)
alternative_call_2(clear_page_orig,
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
- "=D" (page),
- "0" (page)
- : "memory", "rax", "rcx");
+ "+D" (page),
+ ASM_NO_INPUT_CLOBBER("memory", "rax", "rcx"));
}

void copy_page(void *to, void *from);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0d793ef08e3d..a509259a3181 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -654,8 +654,8 @@ int paravirt_disable_iospace(void);
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
- "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
- "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+ "a" ((u32)(arg1)), "d" ((u32)(arg2)), \
+ "c" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
#else
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, "", "", \
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 9fa03604b2b3..2d1753758b0b 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -493,11 +493,14 @@ do { \
bool __ret; \
typeof(pcp1) __o1 = (o1), __n1 = (n1); \
typeof(pcp2) __o2 = (o2), __n2 = (n2); \
- alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
- "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
+ alternative_io("leaq %P[p1], %%rsi\n\t" \
+ "call this_cpu_cmpxchg16b_emu\n\t", \
+ "cmpxchg16b " __percpu_arg([p1]) "\n\t" \
+ "setz %[ret]\n\t", \
X86_FEATURE_CX16, \
- ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
- "+m" (pcp2), "+d" (__o2)), \
+ ASM_OUTPUT2([ret] "=a" (__ret), \
+ [p1] "+m" (pcp1), "+m" (pcp2), \
+ "+d" (__o2)), \
"b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
__ret; \
})
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3fa26a61eabc..51cf1c7e9aca 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -692,18 +692,18 @@ static inline void sync_core(void)

asm volatile (
UNWIND_HINT_SAVE
- "mov %%ss, %0\n\t"
- "pushq %q0\n\t"
+ "mov %%ss, %[tmp]\n\t"
+ "pushq %q[tmp]\n\t"
"pushq %%rsp\n\t"
"addq $8, (%%rsp)\n\t"
"pushfq\n\t"
- "mov %%cs, %0\n\t"
- "pushq %q0\n\t"
+ "mov %%cs, %[tmp]\n\t"
+ "pushq %q[tmp]\n\t"
"pushq $1f\n\t"
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
- : "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
+ : [tmp] "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
#endif
}

@@ -769,7 +769,7 @@ extern char ignore_fpu_irq;
# define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH
#else
-# define BASE_PREFETCH "prefetcht0 %P1"
+# define BASE_PREFETCH "prefetcht0 %P[x]"
#endif

/*
@@ -780,9 +780,9 @@ extern char ignore_fpu_irq;
*/
static inline void prefetch(const void *x)
{
- alternative_input(BASE_PREFETCH, "prefetchnta %P1",
+ alternative_input(BASE_PREFETCH, "prefetchnta %P[x]",
X86_FEATURE_XMM,
- "m" (*(const char *)x));
+ [x] "m" (*(const char *)x));
}

/*
@@ -792,9 +792,9 @@ static inline void prefetch(const void *x)
*/
static inline void prefetchw(const void *x)
{
- alternative_input(BASE_PREFETCH, "prefetchw %P1",
+ alternative_input(BASE_PREFETCH, "prefetchw %P[x]",
X86_FEATURE_3DNOWPREFETCH,
- "m" (*(const char *)x));
+ [x] "m" (*(const char *)x));
}

static inline void spin_lock_prefetch(const void *x)
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index a34e0d4b957d..b715152fb2b5 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -63,14 +63,14 @@
static inline void __down_read(struct rw_semaphore *sem)
{
asm volatile("# beginning down_read\n\t"
- LOCK_PREFIX _ASM_INC "(%1)\n\t"
+ LOCK_PREFIX _ASM_INC "(%[sem])\n\t"
/* adds 0x00000001 */
- " jns 1f\n"
- " call call_rwsem_down_read_failed\n"
+ " jns 1f\n\t"
+ " call call_rwsem_down_read_failed\n\t"
"1:\n\t"
"# ending down_read\n\t"
: "+m" (sem->count)
- : "a" (sem)
+ : [sem] "a" (sem)
: "memory", "cc");
}

@@ -81,17 +81,18 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
{
long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
- " mov %0,%1\n\t"
+ " mov %[count],%[result]\n\t"
"1:\n\t"
- " mov %1,%2\n\t"
- " add %3,%2\n\t"
+ " mov %[result],%[tmp]\n\t"
+ " add %[bias],%[tmp]\n\t"
" jle 2f\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
- : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
- : "i" (RWSEM_ACTIVE_READ_BIAS)
+ : [count] "+m" (sem->count), [result] "=&a" (result),
+ [tmp] "=&r" (tmp)
+ : [bias] "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result >= 0;
}
@@ -99,25 +100,27 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-#define ____down_write(sem, slow_path) \
-({ \
- long tmp; \
- struct rw_semaphore* ret; \
- register void *__sp asm(_ASM_SP); \
- \
- asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%4)\n\t" \
- /* adds 0xffff0001, returns the old value */ \
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
- /* was the active mask 0 before? */\
- " jz 1f\n" \
- " call " slow_path "\n" \
- "1:\n" \
- "# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
- : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
- : "memory", "cc"); \
- ret; \
+#define ____down_write(sem, slow_path) \
+({ \
+ long tmp; \
+ struct rw_semaphore* ret; \
+ register void *__sp asm(_ASM_SP); \
+ \
+ asm volatile("# beginning down_write\n\t" \
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
+ /* adds 0xffff0001, returns the old value */ \
+ " test " __ASM_SEL_RAW(%w,%k) "[tmp]," \
+ __ASM_SEL_RAW(%w,%k) "[tmp]\n\t" \
+ /* was the active mask 0 before? */ \
+ " jz 1f\n" \
+ " call " slow_path "\n\t" \
+ "1:\n\t" \
+ "# ending down_write\n\t" \
+ : "+m" (sem->count), [tmp] "=d" (tmp), "=a" (ret), \
+ "+r" (__sp) \
+ : [sem] "a" (sem), "d" (RWSEM_ACTIVE_WRITE_BIAS) \
+ : "memory", "cc"); \
+ ret; \
})

static inline void __down_write(struct rw_semaphore *sem)
@@ -141,21 +144,22 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem)
bool result;
long tmp0, tmp1;
asm volatile("# beginning __down_write_trylock\n\t"
- " mov %0,%1\n\t"
+ " mov %[count],%[tmp0]\n\t"
"1:\n\t"
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
+ " test " __ASM_SEL_RAW(%w,%k) "[tmp0],"
+ __ASM_SEL_RAW(%w,%k) "[tmp0]\n\t"
/* was the active mask 0 before? */
" jnz 2f\n\t"
- " mov %1,%2\n\t"
- " add %4,%2\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " mov %[tmp0],%[tmp1]\n\t"
+ " add %[bias],%[tmp1]\n\t"
+ LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
CC_SET(e)
"# ending __down_write_trylock\n\t"
- : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
- CC_OUT(e) (result)
- : "er" (RWSEM_ACTIVE_WRITE_BIAS)
+ : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
+ [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
+ : [bias] "er" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory");
return result;
}
@@ -167,14 +171,14 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
/* subtracts 1, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n" /* expects old value in %edx */
"1:\n"
"# ending __up_read\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
+ : "+m" (sem->count), [tmp] "=d" (tmp)
+ : [sem] "a" (sem), "d" (-RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
}

@@ -185,14 +189,14 @@ static inline void __up_write(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_write\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
/* subtracts 0xffff0001, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n" /* expects old value in %edx */
"1:\n\t"
"# ending __up_write\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "+m" (sem->count), [tmp] "=d" (tmp)
+ : [sem] "a" (sem), "d" (-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
}

@@ -202,7 +206,7 @@ static inline void __up_write(struct rw_semaphore *sem)
static inline void __downgrade_write(struct rw_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+ LOCK_PREFIX _ASM_ADD "%[bias],(%[sem])\n\t"
/*
* transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
* 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
@@ -212,7 +216,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
"1:\n\t"
"# ending __downgrade_write\n"
: "+m" (sem->count)
- : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+ : [sem] "a" (sem), [bias] "er" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}

diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 9efaabf5b54b..aeee6517ccc6 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -216,10 +216,10 @@ static inline void clflush(volatile void *__p)

static inline void clflushopt(volatile void *__p)
{
- alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
- ".byte 0x66; clflush %P0",
+ alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P[p]",
+ ".byte 0x66; clflush %P[p]",
X86_FEATURE_CLFLUSHOPT,
- "+m" (*(volatile char __force *)__p));
+ [p] "+m" (*(volatile char __force *)__p));
}

static inline void clwb(volatile void *__p)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 184eb9894dae..12fb37310872 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -169,16 +169,16 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
register void *__sp asm(_ASM_SP); \
__chk_user_ptr(ptr); \
might_fault(); \
- asm volatile("call __get_user_%P4" \
+ asm volatile("call __get_user_%P[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
- : "0" (ptr), "i" (sizeof(*(ptr)))); \
+ : "a" (ptr), [size] "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
})

#define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
- : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+ : "a" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")



diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index b16f6a1d8b26..e09f71424795 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -34,14 +34,14 @@ copy_user_generic(void *to, const void *from, unsigned len)
* Otherwise, use copy_user_generic_unrolled.
*/
alternative_call_2(copy_user_generic_unrolled,
- copy_user_generic_string,
- X86_FEATURE_REP_GOOD,
- copy_user_enhanced_fast_string,
- X86_FEATURE_ERMS,
- ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
- "=d" (len)),
- "1" (to), "2" (from), "3" (len)
- : "memory", "rcx", "r8", "r9", "r10", "r11");
+ copy_user_generic_string,
+ X86_FEATURE_REP_GOOD,
+ copy_user_enhanced_fast_string,
+ X86_FEATURE_ERMS,
+ ASM_OUTPUT2("=a" (ret), "+D" (to), "+S" (from),
+ "+d" (len)),
+ ASM_NO_INPUT_CLOBBER("memory", "rcx", "r8", "r9",
+ "r10", "r11"));
return ret;
}

--
2.13.5
\
 
 \ /
  Last update: 2017-08-31 16:15    [W:0.117 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site