Messages in this thread Patch in this message | | | From | Andi Kleen <> | Subject | [PATCH 12/13] x86: move __copy_*_nocache might fault check out of line | Date | Fri, 9 Aug 2013 16:04:19 -0700 |
| |
From: Andi Kleen <ak@linux.intel.com>
Can as well do the normal conditional resched check out of line. This saves one function call.
Signed-off-by: Andi Kleen <ak@linux.intel.com> --- arch/x86/include/asm/uaccess_64.h | 6 ++++-- arch/x86/lib/copy_user_nocache_64.S | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index b327057..831f4a3 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -243,12 +243,14 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); +extern long __copy_user_nocache_might_fault(void *dst, const void __user *src, + unsigned size, int zerorest); static inline int __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { - might_fault(); - return __copy_user_nocache(dst, src, size, 1); + might_fault_debug_only(); + return __copy_user_nocache_might_fault(dst, src, size, 1); } static inline int diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S index 6a4f43c..8e6b0bd 100644 --- a/arch/x86/lib/copy_user_nocache_64.S +++ b/arch/x86/lib/copy_user_nocache_64.S @@ -16,6 +16,7 @@ #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/smap.h> +#include "user-common.h" .macro ALIGN_DESTINATION #ifdef FIX_ALIGNMENT @@ -43,6 +44,12 @@ #endif .endm +ENTRY(__copy_user_nocache_might_fault) + CFI_STARTPROC + GET_THREAD_AND_SCHEDULE %rax + CFI_ENDPROC + /* fall through */ + /* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination/source out of cache for more performance. -- 1.8.3.1
| |