lkml.org 
[lkml]   [2010]   [Nov]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[thisops uV2 04/10] x86: Support for this_cpu_add,sub,dec,inc_return
Supply an implementation for x86 in order to generate more efficient code.

Signed-off-by: Christoph Lameter <cl@linux.com>

---
arch/x86/include/asm/percpu.h | 50 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 50 insertions(+)
Index: linux-2.6/arch/x86/include/asm/percpu.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/percpu.h 2010-11-23 16:35:19.000000000 -0600
+++ linux-2.6/arch/x86/include/asm/percpu.h 2010-11-23 16:36:05.000000000 -0600
@@ -177,6 +177,45 @@ do { \
} \
} while (0)

+
+/*
+ * Add return operation
+ */
+#define percpu_add_return_op(var, val) \
+({ \
+ typedef typeof(var) pao_T__; \
+ typeof(var) pfo_ret__ = val; \
+ if (0) { \
+ pao_T__ pao_tmp__; \
+ pao_tmp__ = (val); \
+ (void)pao_tmp__; \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm("xaddb %0, "__percpu_arg(1) \
+ : "+q" (pfo_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ case 2: \
+ asm("xaddw %0, "__percpu_arg(1) \
+ : "+r" (pfo_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ case 4: \
+ asm("xaddl %0, "__percpu_arg(1) \
+ : "+r"(pfo_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ case 8: \
+ asm("xaddq %0, "__percpu_arg(1) \
+ : "+re" (pfo_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ pfo_ret__ + (val); \
+})
+
#define percpu_from_op(op, var, constraint) \
({ \
typeof(var) pfo_ret__; \
@@ -300,6 +339,14 @@ do { \
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)

+#ifndef CONFIG_M386
+#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op((pcp), val)
+#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op((pcp), val)
+#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op((pcp), val)
+#define this_cpu_add_return_1(pcp, val) percpu_add_return_op((pcp), val)
+#define this_cpu_add_return_2(pcp, val) percpu_add_return_op((pcp), val)
+#define this_cpu_add_return_4(pcp, val) percpu_add_return_op((pcp), val)
+#endif
/*
* Per cpu atomic 64 bit operations are only available under 64 bit.
* 32 bit must fall back to generic operations.
@@ -324,6 +371,9 @@ do { \
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)

+#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op((pcp), val)
+#define this_cpu_add_return_8(pcp, val) percpu_add_return_op((pcp), val)
+
#endif

/* This is not atomic against other CPUs -- CPU preemption needs to be off */


\
 
 \ /
  Last update: 2010-11-26 22:15    [from the cache]
©2003-2011 Jasper Spaans