lkml.org 
[lkml]   [2008]   [Jan]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[RFC][PATCH 3/4] change mnt_writers[] spinlock to mutex
    From
    Date
    ---

    linux-2.6.git-dave/fs/namespace.c | 21 ++++++++++-----------
    1 file changed, 10 insertions(+), 11 deletions(-)

    diff -puN fs/namespace.c~change-spinlock-to-mutex fs/namespace.c
    --- linux-2.6.git/fs/namespace.c~change-spinlock-to-mutex 2008-01-10 10:45:47.000000000 -0800
    +++ linux-2.6.git-dave/fs/namespace.c 2008-01-10 10:46:01.000000000 -0800
    @@ -118,7 +118,7 @@ struct mnt_writer {
    * If holding multiple instances of this lock, they
    * must be ordered by cpu number.
    */
    - spinlock_t lock;
    + struct mutex lock;
    struct lock_class_key lock_class; /* compiles out with !lockdep */
    unsigned long count;
    struct vfsmount *mnt;
    @@ -130,7 +130,7 @@ static int __init init_mnt_writers(void)
    int cpu;
    for_each_possible_cpu(cpu) {
    struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
    - spin_lock_init(&writer->lock);
    + mutex_init(&writer->lock);
    lockdep_set_class(&writer->lock, &writer->lock_class);
    writer->count = 0;
    }
    @@ -145,7 +145,7 @@ static void mnt_unlock_cpus(void)

    for_each_possible_cpu(cpu) {
    cpu_writer = &per_cpu(mnt_writers, cpu);
    - spin_unlock(&cpu_writer->lock);
    + mutex_unlock(&cpu_writer->lock);
    }
    }

    @@ -191,8 +191,8 @@ int mnt_want_write(struct vfsmount *mnt)
    int ret = 0;
    struct mnt_writer *cpu_writer;

    - cpu_writer = &get_cpu_var(mnt_writers);
    - spin_lock(&cpu_writer->lock);
    + cpu_writer = &__get_cpu_var(mnt_writers);
    + mutex_lock(&cpu_writer->lock);
    if (__mnt_is_readonly(mnt)) {
    ret = -EROFS;
    goto out;
    @@ -200,8 +200,7 @@ int mnt_want_write(struct vfsmount *mnt)
    use_cpu_writer_for_mount(cpu_writer, mnt);
    cpu_writer->count++;
    out:
    - spin_unlock(&cpu_writer->lock);
    - put_cpu_var(mnt_writers);
    + mutex_unlock(&cpu_writer->lock);
    return ret;
    }
    EXPORT_SYMBOL_GPL(mnt_want_write);
    @@ -213,7 +212,7 @@ static void lock_and_coalesce_cpu_mnt_wr

    for_each_possible_cpu(cpu) {
    cpu_writer = &per_cpu(mnt_writers, cpu);
    - spin_lock(&cpu_writer->lock);
    + mutex_lock(&cpu_writer->lock);
    __clear_mnt_count(cpu_writer);
    cpu_writer->mnt = NULL;
    }
    @@ -269,7 +268,7 @@ void mnt_drop_write(struct vfsmount *mnt

    retry:
    cpu_writer = &__get_cpu_var(mnt_writers);
    - spin_lock(&cpu_writer->lock);
    + mutex_lock(&cpu_writer->lock);

    use_cpu_writer_for_mount(cpu_writer, mnt);
    if (cpu_writer->count > 0) {
    @@ -286,14 +285,14 @@ retry:
    */
    if (atomic_read(&mnt->__mnt_writers) <
    MNT_WRITER_UNDERFLOW_LIMIT) {
    - spin_unlock(&cpu_writer->lock);
    + mutex_unlock(&cpu_writer->lock);
    goto retry;
    }
    atomic_dec(&mnt->__mnt_writers);
    must_check_underflow = 1;
    }

    - spin_unlock(&cpu_writer->lock);
    + mutex_unlock(&cpu_writer->lock);
    /*
    * Logically, we could call this each time,
    * but the __mnt_writers cacheline tends to
    _

    \
     
     \ /
      Last update: 2008-01-10 20:11    [W:0.031 / U:1.384 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site