lkml.org 
[lkml]   [2010]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[cpuops inc_return V1 1/9] percpu: Generic support for this_cpu_add,sub,dec,inc_return
Introduce generic support for this_cpu_add_return etc.

The fallback is to realize these operations with simpler __this_cpu_ops.

Reviewed-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Christoph Lameter <cl@linux.com>

---
include/linux/percpu.h | 77 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 77 insertions(+)

Index: linux-2.6/include/linux/percpu.h
===================================================================
--- linux-2.6.orig/include/linux/percpu.h 2010-11-29 10:19:28.000000000 -0600
+++ linux-2.6/include/linux/percpu.h 2010-11-29 10:24:57.000000000 -0600
@@ -240,6 +240,25 @@ extern void __bad_size_call_parameter(vo
pscr_ret__; \
})

+#define __pcpu_size_call_return2(stem, pcp, ...) \
+({ \
+ typeof(pcp) ret__; \
+ __verify_pcpu_ptr(&(pcp)); \
+ switch(sizeof(pcp)) { \
+ case 1: ret__ = stem##1(pcp, __VA_ARGS__); \
+ break; \
+ case 2: ret__ = stem##2(pcp, __VA_ARGS__); \
+ break; \
+ case 4: ret__ = stem##4(pcp, __VA_ARGS__); \
+ break; \
+ case 8: ret__ = stem##8(pcp, __VA_ARGS__); \
+ break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ ret__; \
+})
+
#define __pcpu_size_call(stem, variable, ...) \
do { \
__verify_pcpu_ptr(&(variable)); \
@@ -529,6 +548,64 @@ do { \
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
#endif

+#define _this_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) ret__; \
+ preempt_disable(); \
+ __this_cpu_add(pcp, val); \
+ ret__ = __this_cpu_read(pcp); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
+#define __this_cpu_generic_add_return(pcp, val) \
+({ \
+ typeof(pcp) ret__; \
+ __this_cpu_add(pcp, val); \
+ ret__ = __this_cpu_read(pcp); \
+ ret__; \
+})
+
+#ifndef __this_cpu_add_return
+# ifndef __this_cpu_add_return_1
+# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_2
+# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_4
+# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_8
+# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
+# endif
+# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
/*
* IRQ safe versions of the per cpu RMW operations. Note that these operations
* are *not* safe against modification of the same variable from another


\
 
 \ /
  Last update: 2010-12-06 18:43    [W:0.175 / U:0.272 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site