lkml.org 
[lkml]   [2009]   [Jun]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/4] FUSE: make request_wait_answer() wait for ->end() completion
Date
Previously, a request was marked FINISHED before ->end() is executed
and thus request_wait_answer() can return before it's done. This
patch makes request_wait_answer() wait for ->end() to finish before
returning.

Note that no current ->end() user waits for request completion, so
this change doesn't cause any behavior difference.

While at it, beef up the comment above ->end() hook and clarify when
and where it's called.

Signed-off-by: Tejun Heo <tj@kernel.org>
---
fs/fuse/dev.c | 41 +++++++++++++++++++++++++----------------
fs/fuse/fuse_i.h | 5 ++++-
2 files changed, 29 insertions(+), 17 deletions(-)

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8fed2ed..0745728 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -278,7 +278,6 @@ __releases(&fc->lock)
req->end = NULL;
list_del(&req->list);
list_del(&req->intr_entry);
- req->state = FUSE_REQ_FINISHED;
if (req->background) {
if (fc->num_background == FUSE_MAX_BACKGROUND) {
fc->blocked = 0;
@@ -293,10 +292,21 @@ __releases(&fc->lock)
fc->active_background--;
flush_bg_queue(fc);
}
+
spin_unlock(&fc->lock);
- wake_up(&req->waitq);
- if (end)
+
+ if (end) {
end(fc, req);
+ smp_wmb();
+ }
+
+ /*
+ * We own this request and wake_up() has enough memory
+ * barrier, no need to grab spin lock to set state.
+ */
+ req->state = FUSE_REQ_FINISHED;
+
+ wake_up(&req->waitq);
fuse_put_request(fc, req);
}

@@ -372,17 +382,16 @@ __acquires(&fc->lock)
return;

aborted:
- BUG_ON(req->state != FUSE_REQ_FINISHED);
- if (req->locked) {
- /* This is uninterruptible sleep, because data is
- being copied to/from the buffers of req. During
- locked state, there mustn't be any filesystem
- operation (e.g. page fault), since that could lead
- to deadlock */
- spin_unlock(&fc->lock);
- wait_event(req->waitq, !req->locked);
- spin_lock(&fc->lock);
- }
+ spin_unlock(&fc->lock);
+ wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+ /*
+ * This is uninterruptible sleep, because data is being copied
+ * to/from the buffers of req. During locked state, there
+ * mustn't be any filesystem operation (e.g. page fault),
+ * since that could lead to deadlock
+ */
+ wait_event(req->waitq, !req->locked);
+ spin_lock(&fc->lock);
}

void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
@@ -1062,9 +1071,7 @@ __acquires(&fc->lock)

req->aborted = 1;
req->out.h.error = -ECONNABORTED;
- req->state = FUSE_REQ_FINISHED;
list_del_init(&req->list);
- wake_up(&req->waitq);
if (end) {
req->end = NULL;
__fuse_get_request(req);
@@ -1074,6 +1081,8 @@ __acquires(&fc->lock)
fuse_put_request(fc, req);
spin_lock(&fc->lock);
}
+ req->state = FUSE_REQ_FINISHED;
+ wake_up(&req->waitq);
}
}

diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index aaf2f9f..7b930b6 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -292,7 +292,10 @@ struct fuse_req {
/** Link on fi->writepages */
struct list_head writepages_entry;

- /** Request completion callback */
+ /** Request completion callback. This function is called from
+ the kernel context of the FUSE server if the request isn't
+ being aborted. If the request is being aborted, it's
+ called from the kernel context of the aborting process. */
void (*end)(struct fuse_conn *, struct fuse_req *);

/** Request is stolen from fuse_file->reserved_req */
--
1.6.0.2


\
 
 \ /
  Last update: 2009-06-18 11:31    [W:0.618 / U:0.460 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site