lkml.org 
[lkml]   [2012]   [Nov]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 23/25] aio: Percpu ioctx refcount
Date
This just converts the ioctx refcount to the new generic dynamic percpu
refcount code.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
---
fs/aio.c | 30 +++++++++++++-----------------
1 file changed, 13 insertions(+), 17 deletions(-)

diff --git a/fs/aio.c b/fs/aio.c
index 94218b7..0975675 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,6 +36,7 @@
#include <linux/eventfd.h>
#include <linux/blkdev.h>
#include <linux/compat.h>
+#include <linux/percpu-refcount.h>

#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -65,7 +66,7 @@ struct kioctx_cpu {
};

struct kioctx {
- atomic_t users;
+ struct percpu_ref users;
atomic_t dead;

/* This needs improving */
@@ -297,6 +298,8 @@ static void free_ioctx(struct kioctx *ctx)
struct io_event res;
unsigned cpu, head, avail;

+ pr_debug("freeing %p\n", ctx);
+
spin_lock_irq(&ctx->ctx_lock);

while (!list_empty(&ctx->active_reqs)) {
@@ -341,14 +344,14 @@ static void free_ioctx(struct kioctx *ctx)

synchronize_rcu();

- pr_debug("freeing %p\n", ctx);
+ pr_debug("freed %p\n", ctx);
free_percpu(ctx->cpu);
kmem_cache_free(kioctx_cachep, ctx);
}

static void put_ioctx(struct kioctx *ctx)
{
- if (unlikely(atomic_dec_and_test(&ctx->users)))
+ if (percpu_ref_put(&ctx->users))
free_ioctx(ctx);
}

@@ -377,7 +380,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)

ctx->max_reqs = nr_events;

- atomic_set(&ctx->users, 2);
+ percpu_ref_init(&ctx->users);
+ rcu_read_lock();
+ percpu_ref_get(&ctx->users);
+ rcu_read_unlock();
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->completion_lock);
mutex_init(&ctx->ring_lock);
@@ -433,12 +439,9 @@ out_freectx:
*/
static void kill_ioctx(struct kioctx *ctx)
{
- if (!atomic_xchg(&ctx->dead, 1)) {
+ if (percpu_ref_kill(&ctx->users)) {
hlist_del_rcu(&ctx->list);
synchronize_rcu();
-
- wake_up_all(&ctx->wait);
-
put_ioctx(ctx);
}
}
@@ -473,12 +476,6 @@ void exit_aio(struct mm_struct *mm)
struct hlist_node *p, *n;

hlist_for_each_entry_safe(ctx, p, n, &mm->ioctx_list, list) {
- if (1 != atomic_read(&ctx->users))
- printk(KERN_DEBUG
- "exit_aio:ioctx still alive: %d %d %d\n",
- atomic_read(&ctx->users),
- atomic_read(&ctx->dead),
- atomic_read(&ctx->reqs_available));
/*
* We don't need to bother with munmap() here -
* exit_mmap(mm) is coming and it'll unmap everything.
@@ -597,8 +594,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)

hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list)
if (ctx->user_id == ctx_id){
- BUG_ON(atomic_read(&ctx->dead));
- atomic_inc(&ctx->users);
+ percpu_ref_get(&ctx->users);
ret = ctx;
break;
}
@@ -838,7 +834,7 @@ static int read_events(struct kioctx *ctx,
i += ret;
if (i >= min_nr)
break;
- if (unlikely(atomic_read(&ctx->dead))) {
+ if (unlikely(percpu_ref_dead(&ctx->users))) {
ret = -EINVAL;
break;
}
--
1.7.12


\
 
 \ /
  Last update: 2012-11-28 18:01    [W:0.169 / U:0.192 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site