lkml.org 
[lkml]   [1999]   [May]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch] releasing kernel lock during copy_from/to_user [Re: 2.3.3_andrea2 & 2.2.9_andrea1 [was Re: Bad apache perfomance wtih linux SMP]]
    On Thu, 20 May 1999, Andrea Arcangeli wrote:

    >taking a different way to achieve the same thing in a simpler way. I'll
    >post a patch incremental with 2.3.3_andrea2.bz2 soon.

    Here it is the patch against 2.3.3_andrea2 (should apply quite cleanly
    also against clean 2.3.3 or 2.2.9):

    Index: linux/arch/i386/lib/usercopy.c
    ===================================================================
    RCS file: /var/cvs/linux/arch/i386/lib/usercopy.c,v
    retrieving revision 1.1.2.1
    diff -u -r1.1.2.1 usercopy.c
    --- linux/arch/i386/lib/usercopy.c 1999/01/18 01:37:02 1.1.2.1
    +++ linux/arch/i386/lib/usercopy.c 1999/05/20 13:02:46
    @@ -31,6 +31,7 @@
    #define __do_strncpy_from_user(dst,src,count,res) \
    do { \
    int __d0, __d1, __d2; \
    + release_kernel_lock(current); \
    __asm__ __volatile__( \
    " testl %1,%1\n" \
    " jz 2f\n" \
    @@ -54,6 +55,7 @@
    "=&D" (__d2) \
    : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
    : "memory"); \
    + reacquire_kernel_lock(current); \
    } while (0)

    long
    Index: linux/include/asm-i386/smplock.h
    ===================================================================
    RCS file: /var/cvs/linux/include/asm-i386/smplock.h,v
    retrieving revision 1.1.2.3
    diff -u -r1.1.2.3 smplock.h
    --- linux/include/asm-i386/smplock.h 1999/05/01 01:47:32 1.1.2.3
    +++ linux/include/asm-i386/smplock.h 1999/05/20 12:47:05
    @@ -3,6 +3,10 @@
    *
    * i386 SMP lock implementation
    */
    +
    +#ifndef __i386_SMPLOCK_H
    +#define __i386_SMPLOCK_H
    +
    #include <linux/interrupt.h>
    #include <asm/spinlock.h>

    @@ -11,10 +15,14 @@
    /*
    * Release global kernel lock and global interrupt lock
    */
    -#define release_kernel_lock(task, cpu) \
    +#define release_kernel_lock(task) \
    do { \
    if (task->lock_depth >= 0) \
    spin_unlock(&kernel_flag); \
    +} while (0)
    +#define release_kernel_and_irq_lock(task, cpu) \
    +do { \
    + release_kernel_lock(task); \
    release_irqlock(cpu); \
    __sti(); \
    } while (0)
    @@ -57,3 +65,5 @@
    :"=m" (__dummy_lock(&kernel_flag)),
    "=m" (current->lock_depth));
    }
    +
    +#endif /* __i386_SMPLOCK_H */
    Index: linux/include/asm-i386/uaccess.h
    ===================================================================
    RCS file: /var/cvs/linux/include/asm-i386/uaccess.h,v
    retrieving revision 1.1.2.1
    diff -u -r1.1.2.1 uaccess.h
    --- linux/include/asm-i386/uaccess.h 1999/01/18 01:33:31 1.1.2.1
    +++ linux/include/asm-i386/uaccess.h 1999/05/20 13:01:49
    @@ -7,6 +7,7 @@
    #include <linux/config.h>
    #include <linux/sched.h>
    #include <asm/page.h>
    +#include <asm/smplock.h>

    #define VERIFY_READ 0
    #define VERIFY_WRITE 1
    @@ -253,6 +254,7 @@
    #define __copy_user(to,from,size) \
    do { \
    int __d0, __d1; \
    + release_kernel_lock(current); \
    __asm__ __volatile__( \
    "0: rep; movsl\n" \
    " movl %3,%0\n" \
    @@ -270,11 +272,13 @@
    : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
    : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
    : "memory"); \
    + reacquire_kernel_lock(current); \
    } while (0)

    #define __copy_user_zeroing(to,from,size) \
    do { \
    int __d0, __d1; \
    + release_kernel_lock(current); \
    __asm__ __volatile__( \
    "0: rep; movsl\n" \
    " movl %3,%0\n" \
    @@ -298,6 +302,7 @@
    : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
    : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
    : "memory"); \
    + reacquire_kernel_lock(current); \
    } while (0)

    /* We let the __ versions of copy_from/to_user inline, because they're often
    @@ -324,6 +329,7 @@
    int __d0, __d1; \
    switch (size & 3) { \
    default: \
    + release_kernel_lock(current); \
    __asm__ __volatile__( \
    "0: rep; movsl\n" \
    "1:\n" \
    @@ -338,6 +344,7 @@
    : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
    : "1"(from), "2"(to), "0"(size/4) \
    : "memory"); \
    + reacquire_kernel_lock(current); \
    break; \
    case 1: \
    __asm__ __volatile__( \
    @@ -408,6 +415,7 @@
    int __d0, __d1; \
    switch (size & 3) { \
    default: \
    + release_kernel_lock(current); \
    __asm__ __volatile__( \
    "0: rep; movsl\n" \
    "1:\n" \
    @@ -428,6 +436,7 @@
    : "=c"(size), "=&S" (__d0), "=&D" (__d1)\
    : "1"(from), "2"(to), "0"(size/4) \
    : "memory"); \
    + reacquire_kernel_lock(current); \
    break; \
    case 1: \
    __asm__ __volatile__( \
    Index: linux/include/net/checksum.h
    ===================================================================
    RCS file: /var/cvs/linux/include/net/checksum.h,v
    retrieving revision 1.1.2.1
    diff -u -r1.1.2.1 checksum.h
    --- linux/include/net/checksum.h 1999/01/18 01:34:03 1.1.2.1
    +++ linux/include/net/checksum.h 1999/05/20 13:07:26
    @@ -98,7 +98,13 @@
    int len, int sum, int *err_ptr)
    {
    if (verify_area(VERIFY_READ, src, len) == 0)
    - return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
    + {
    + int ret;
    + release_kernel_lock(current);
    + ret = csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
    + reacquire_kernel_lock(current);
    + return ret;
    + }

    if (len)
    *err_ptr = -EFAULT;
    Index: linux/kernel/sched.c
    ===================================================================
    RCS file: /var/cvs/linux/kernel/sched.c,v
    retrieving revision 1.1.2.45
    diff -u -r1.1.2.45 sched.c
    --- linux/kernel/sched.c 1999/05/19 23:43:44 1.1.2.45
    +++ linux/kernel/sched.c 1999/05/20 01:38:56
    @@ -659,7 +659,7 @@
    if (in_interrupt())
    goto scheduling_in_interrupt;

    - release_kernel_lock(prev, this_cpu);
    + release_kernel_and_irq_lock(prev, this_cpu);

    /* Do "administrative" work here while we don't hold any locks */
    if (get_active_bhs())

    But I just had an Oops with the above patch applyed :-(. But I don't think
    the patch is wrong, I think instead there has to be some buggy piece of
    code that is triggered by really allowing other piece of locked-codes to
    run in the middle of a copy_from/to_user. I'll try to find the real cause
    of the Oops now.

    BTW, the SMP-lock profiler is just included in my patches (the /proc part
    is merged from Andi), so to know how much time the CPU are wasting due
    kenrel locks just enable it via the kernel configuration and then cat
    /proc/stat:

    andrea@laser:/usr/src$ cat /proc/stat
    cpu 86196 0 11855 1866185
    cpu0 43579 0 5803 932736 486
    ^^^
    cpu1 42617 0 6052 933449 875
    ^^^
    My two cpu wasted (486+875)/HZ sec in spinning over kernel locks since
    boot.

    Andrea Arcangeli


    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.rutgers.edu
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2005-03-22 13:51    [W:0.030 / U:126.404 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site