lkml.org 
[lkml]   [2014]   [Mar]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [RESEND PATCH 1/2] aio: simplify fetching ioctx_table pointer from, mm_struct
ping...

On 02/28/2014 06:27 PM, Gu Zheng wrote:

> Using rcu_dereference_protected() rather than the "rcu_read_lock-->
> rcu_dereference-->rcu_read_unlock" group to simplify fetching the
> ioctx_table pointer in ioctx_add_table and kill_ioctx, because it
> is protected by the ioctx_lock.
> And in the exit_aio(), there are no other users manipulating ioctx_table
> at this stage, so we can use rcu_access_pointer directly.
>
> Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
> ---
> fs/aio.c | 42 ++++++++++++++++--------------------------
> 1 files changed, 16 insertions(+), 26 deletions(-)
>
> diff --git a/fs/aio.c b/fs/aio.c
> index 062a5f6..7eaa631 100644
> --- a/fs/aio.c
> +++ b/fs/aio.c
> @@ -544,8 +544,8 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
> struct aio_ring *ring;
>
> spin_lock(&mm->ioctx_lock);
> - rcu_read_lock();
> - table = rcu_dereference(mm->ioctx_table);
> + table = rcu_dereference_protected(mm->ioctx_table,
> + lockdep_is_held(&mm->ioctx_lock));
>
> while (1) {
> if (table)
> @@ -553,7 +553,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
> if (!table->table[i]) {
> ctx->id = i;
> table->table[i] = ctx;
> - rcu_read_unlock();
> spin_unlock(&mm->ioctx_lock);
>
> ring = kmap_atomic(ctx->ring_pages[0]);
> @@ -564,7 +563,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
>
> new_nr = (table ? table->nr : 1) * 4;
>
> - rcu_read_unlock();
> spin_unlock(&mm->ioctx_lock);
>
> table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
> @@ -575,8 +573,8 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
> table->nr = new_nr;
>
> spin_lock(&mm->ioctx_lock);
> - rcu_read_lock();
> - old = rcu_dereference(mm->ioctx_table);
> + old = rcu_dereference_protected(mm->ioctx_table,
> + lockdep_is_held(&mm->ioctx_lock));
>
> if (!old) {
> rcu_assign_pointer(mm->ioctx_table, table);
> @@ -711,12 +709,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
> struct kioctx_table *table;
>
> spin_lock(&mm->ioctx_lock);
> - rcu_read_lock();
> - table = rcu_dereference(mm->ioctx_table);
> + table = rcu_dereference_protected(mm->ioctx_table,
> + lockdep_is_held(&mm->ioctx_lock));
>
> WARN_ON(ctx != table->table[ctx->id]);
> table->table[ctx->id] = NULL;
> - rcu_read_unlock();
> spin_unlock(&mm->ioctx_lock);
>
> /* percpu_ref_kill() will do the necessary call_rcu() */
> @@ -765,27 +762,17 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
> void exit_aio(struct mm_struct *mm)
> {
> struct kioctx_table *table;
> - struct kioctx *ctx;
> unsigned i = 0;
>
> - while (1) {
> - rcu_read_lock();
> - table = rcu_dereference(mm->ioctx_table);
> -
> - do {
> - if (!table || i >= table->nr) {
> - rcu_read_unlock();
> - rcu_assign_pointer(mm->ioctx_table, NULL);
> - if (table)
> - kfree(table);
> - return;
> - }
> -
> - ctx = table->table[i++];
> - } while (!ctx);
> + table = rcu_access_pointer(mm->ioctx_table);
> + if (!table)
> + return;
>
> - rcu_read_unlock();
> + while (i < table->nr) {
> + struct kioctx *ctx = table->table[i++];
>
> + if (!ctx)
> + continue;
> /*
> * We don't need to bother with munmap() here -
> * exit_mmap(mm) is coming and it'll unmap everything.
> @@ -798,6 +785,9 @@ void exit_aio(struct mm_struct *mm)
>
> kill_ioctx(mm, ctx);
> }
> +
> + rcu_assign_pointer(mm->ioctx_table, NULL);
> + kfree(table);
> }
>
> static void put_reqs_available(struct kioctx *ctx, unsigned nr)




\
 
 \ /
  Last update: 2014-03-07 12:41    [W:1.342 / U:0.024 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site