lkml.org 
[lkml]   [2018]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 6/8] blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq
Date
After the recent updates to use generation number and state based
synchronization, blk-mq no longer depends on REQ_ATOM_COMPLETE except
to avoid firing the same timeout multiple times.

Remove all REQ_ATOM_COMPLETE usages and use a new rq_flags flag
RQF_MQ_TIMEOUT_EXPIRED to avoid firing the same timeout multiple
times. This removes atomic bitops from hot paths too.

v2: Removed blk_clear_rq_complete() from blk_mq_rq_timed_out().

v3: Added RQF_MQ_TIMEOUT_EXPIRED flag.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: "jianchao.wang" <jianchao.w.wang@oracle.com>
---
block/blk-mq.c | 15 +++++++--------
block/blk-timeout.c | 1 +
include/linux/blkdev.h | 2 ++
3 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index b419746..3f297f0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -634,8 +634,7 @@ void blk_mq_complete_request(struct request *rq)
* hctx_lock() covers both issue and completion paths.
*/
hctx_lock(hctx, &srcu_idx);
- if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
- !blk_mark_rq_complete(rq))
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
hctx_unlock(hctx, srcu_idx);
}
@@ -685,8 +684,6 @@ void blk_mq_start_request(struct request *rq)
preempt_enable();

set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);

if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
@@ -837,6 +834,8 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
return;

+ req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
+
if (ops->timeout)
ret = ops->timeout(req, reserved);

@@ -852,7 +851,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
*/
blk_mq_rq_update_aborted_gstate(req, 0);
blk_add_timer(req);
- blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
break;
@@ -871,7 +869,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,

might_sleep();

- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) ||
+ !test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return;

/* read coherent snapshots of @rq->state_gen and @rq->deadline */
@@ -906,8 +905,8 @@ static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
* now guaranteed to see @rq->aborted_gstate and yield. If
* @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
*/
- if (READ_ONCE(rq->gstate) == rq->aborted_gstate &&
- !blk_mark_rq_complete(rq))
+ if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
+ READ_ONCE(rq->gstate) == rq->aborted_gstate)
blk_mq_rq_timed_out(rq, reserved);
}

diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 4f04cd1..ebe9996 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -214,6 +214,7 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout;

req->deadline = jiffies + req->timeout;
+ req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;

/*
* Only the non-mq case needs to add the request to a protected list.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ae563d0..007a7cf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -125,6 +125,8 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* timeout is expired */
+#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))

/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
--
2.9.5
\
 
 \ /
  Last update: 2018-01-14 23:19    [W:0.125 / U:0.308 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site