lkml.org 
[lkml]   [1999]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [patch] releasing kernel lock during copy_from/to_user [Re: 2.3.3_andrea2 & 2.2.9_andrea1 [was Re: Bad apache perfomance wtih linux SMP]]
On Thu, 20 May 1999, Andrea Arcangeli wrote:

>But I just had an Oops with the above patch applyed :-(. But I don't think
>the patch is wrong, I think instead there has to be some buggy piece of
>code that is triggered by really allowing other piece of locked-codes to

My patch was wrong... it was deadlocking in reaquire_kernel_lock if for
some reason there would be a fault during the copy_from/to_user (I
easily catched the deadlock by disassembling the .text.lock region).

This new patch again against 2.3.3 at least won't deadlock and seems rock
solid where the previous one was crashing:

Index: linux/arch/i386/lib/usercopy.c
===================================================================
RCS file: /var/cvs/linux/arch/i386/lib/usercopy.c,v
retrieving revision 1.1.2.1
diff -u -r1.1.2.1 usercopy.c
--- linux/arch/i386/lib/usercopy.c 1999/01/18 01:37:02 1.1.2.1
+++ linux/arch/i386/lib/usercopy.c 1999/05/20 15:28:44
@@ -31,6 +31,8 @@
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
int __d0, __d1, __d2; \
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE; \
+ release_kernel_lock_and_schedule(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
@@ -54,6 +56,7 @@
"=&D" (__d2) \
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
: "memory"); \
+ reacquire_kernel_lock_and_schedule(); \
} while (0)

long
@@ -81,6 +84,8 @@
#define __do_clear_user(addr,size) \
do { \
int __d0; \
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE; \
+ release_kernel_lock_and_schedule(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
@@ -97,6 +102,7 @@
".previous" \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
+ reacquire_kernel_lock_and_schedule(); \
} while (0)

unsigned long
@@ -123,7 +129,9 @@
long strlen_user(const char *s)
{
unsigned long res;
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE;

+ release_kernel_lock_and_schedule();
__asm__ __volatile__(
"0: repne; scasb\n"
" notl %0\n"
@@ -138,5 +146,6 @@
".previous"
:"=c" (res), "=D" (s)
:"1" (s), "a" (0), "0" (-__addr_ok(s)));
+ reacquire_kernel_lock_and_schedule();
return res & -__addr_ok(s);
}
Index: linux/include/asm-i386/smplock.h
===================================================================
RCS file: /var/cvs/linux/include/asm-i386/smplock.h,v
retrieving revision 1.1.2.3
diff -u -r1.1.2.3 smplock.h
--- linux/include/asm-i386/smplock.h 1999/05/01 01:47:32 1.1.2.3
+++ linux/include/asm-i386/smplock.h 1999/05/20 15:32:14
@@ -3,6 +3,10 @@
*
* i386 SMP lock implementation
*/
+
+#ifndef __i386_SMPLOCK_H
+#define __i386_SMPLOCK_H
+
#include <linux/interrupt.h>
#include <asm/spinlock.h>

@@ -28,7 +32,25 @@
spin_lock(&kernel_flag); \
} while (0)

+#define DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE int old
+
+#define release_kernel_lock_and_schedule() \
+do { \
+ old = current->lock_depth; \
+ if (old >= 0) \
+ { \
+ spin_unlock(&kernel_flag); \
+ current->lock_depth = -1; \
+ } \
+} while (0)

+#define reacquire_kernel_lock_and_schedule() \
+do { \
+ if (old >= 0) \
+ spin_lock(&kernel_flag); \
+ current->lock_depth = old; \
+} while (0)
+
/*
* Getting the big kernel lock.
*
@@ -57,3 +79,5 @@
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
}
+
+#endif /* __i386_SMPLOCK_H */
Index: linux/include/asm-i386/uaccess.h
===================================================================
RCS file: /var/cvs/linux/include/asm-i386/uaccess.h,v
retrieving revision 1.1.2.1
diff -u -r1.1.2.1 uaccess.h
--- linux/include/asm-i386/uaccess.h 1999/01/18 01:33:31 1.1.2.1
+++ linux/include/asm-i386/uaccess.h 1999/05/20 15:25:59
@@ -7,6 +7,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <asm/page.h>
+#include <asm/smplock.h>

#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -253,6 +254,8 @@
#define __copy_user(to,from,size) \
do { \
int __d0, __d1; \
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE; \
+ release_kernel_lock_and_schedule(); \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movl %3,%0\n" \
@@ -270,11 +273,14 @@
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
+ reacquire_kernel_lock_and_schedule(); \
} while (0)

#define __copy_user_zeroing(to,from,size) \
do { \
int __d0, __d1; \
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE; \
+ release_kernel_lock_and_schedule(); \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movl %3,%0\n" \
@@ -298,6 +304,7 @@
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
+ reacquire_kernel_lock_and_schedule(); \
} while (0)

/* We let the __ versions of copy_from/to_user inline, because they're often
@@ -323,7 +330,9 @@
do { \
int __d0, __d1; \
switch (size & 3) { \
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE; \
default: \
+ release_kernel_lock_and_schedule(); \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1:\n" \
@@ -338,6 +347,7 @@
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
+ reacquire_kernel_lock_and_schedule(); \
break; \
case 1: \
__asm__ __volatile__( \
@@ -407,7 +417,9 @@
do { \
int __d0, __d1; \
switch (size & 3) { \
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE; \
default: \
+ release_kernel_lock_and_schedule(); \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1:\n" \
@@ -428,6 +440,7 @@
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
+ reacquire_kernel_lock_and_schedule(); \
break; \
case 1: \
__asm__ __volatile__( \
Index: linux/include/net/checksum.h
===================================================================
RCS file: /var/cvs/linux/include/net/checksum.h,v
retrieving revision 1.1.2.1
diff -u -r1.1.2.1 checksum.h
--- linux/include/net/checksum.h 1999/01/18 01:34:03 1.1.2.1
+++ linux/include/net/checksum.h 1999/05/20 15:34:01
@@ -98,7 +98,14 @@
int len, int sum, int *err_ptr)
{
if (verify_area(VERIFY_READ, src, len) == 0)
- return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+ {
+ int ret;
+ DECLARE_LOCAL_VAR_FOR_LOCK_AND_SCHEDULE;
+ release_kernel_lock_and_schedule();
+ ret = csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+ reacquire_kernel_lock_and_schedule();
+ return ret;
+ }

if (len)
*err_ptr = -EFAULT;

Andrea Arcangeli


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:51    [W:2.486 / U:0.376 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site