lkml.org 
[lkml]   [2018]   [Dec]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH V9 2/4] blk-mq: refactor the code of issue request directly
Date
Merge blk_mq_try_issue_directly and __blk_mq_try_issue_directly
into one interface to unify the interfaces to issue requests
directly. The merged interface takes over the requests totally,
it could insert, end or do nothing based on the return value of
.queue_rq and 'bypass' parameter. Then caller needn't any other
handling any more and then code could be cleaned up.

Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
block/blk-mq.c | 116 +++++++++++++++++++++++++++------------------------------
1 file changed, 54 insertions(+), 62 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 153af90..fe92e52 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1815,93 +1815,85 @@ static bool blk_rq_can_direct_dispatch(struct request *rq)
return req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE;
}

-static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass_insert, bool last)
+ bool bypass, bool last)
{
struct request_queue *q = rq->q;
bool run_queue = true;
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int srcu_idx;
bool force = false;

+ if (!blk_rq_can_direct_dispatch(rq)) {
+ /*
+ * Insert request to hctx dispatch list for 'bypass == true'
+ * case, otherwise, the caller will fail forever.
+ */
+ if (bypass)
+ force = true;
+ goto out;
+ }
+
+ hctx_lock(hctx, &srcu_idx);
/*
- * RCU or SRCU read lock is needed before checking quiesced flag.
+ * hctx_lock is needed before checking quiesced flag.
*
- * When queue is stopped or quiesced, ignore 'bypass_insert' from
- * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
- * and avoid driver to try to dispatch again.
+ * When queue is stopped or quiesced, ignore 'bypass', insert
+ * and return BLK_STS_OK to caller, and avoid driver to try to
+ * dispatch again.
*/
- if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
+ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
run_queue = false;
- bypass_insert = false;
- goto insert;
+ bypass = false;
+ goto out_unlock;
}

- if (q->elevator && !bypass_insert)
- goto insert;
-
- if (!blk_rq_can_direct_dispatch(rq)) {
- /*
- * For 'bypass_insert == true' case, insert request into hctx
- * dispatch list.
- */
- if (bypass_insert)
- force = true;
- goto insert;
- }
+ if (unlikely(q->elevator && !bypass))
+ goto out_unlock;

if (!blk_mq_get_dispatch_budget(hctx))
- goto insert;
+ goto out_unlock;

if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
- goto insert;
+ goto out_unlock;
}

- return __blk_mq_issue_directly(hctx, rq, cookie, last);
-insert:
- if (force) {
- blk_mq_request_bypass_insert(rq, run_queue);
- return BLK_STS_OK;
- } else if (bypass_insert) {
- return BLK_STS_RESOURCE;
+ ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
+out_unlock:
+ hctx_unlock(hctx, srcu_idx);
+out:
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_DEV_RESOURCE:
+ case BLK_STS_RESOURCE:
+ if (force) {
+ blk_mq_request_bypass_insert(rq, run_queue);
+ ret = BLK_STS_OK;
+ } else if (!bypass) {
+ blk_mq_sched_insert_request(rq, false, run_queue, false);
+ ret = BLK_STS_OK;
+ }
+ break;
+ default:
+ if (!bypass) {
+ blk_mq_end_request(rq, ret);
+ ret = BLK_STS_OK;
+ }
+ break;
}

- blk_mq_sched_insert_request(rq, false, run_queue, false);
- return BLK_STS_OK;
-}
-
-static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq, blk_qc_t *cookie)
-{
- blk_status_t ret;
- int srcu_idx;
-
- might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
-
- hctx_lock(hctx, &srcu_idx);
-
- ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
- blk_mq_sched_insert_request(rq, false, true, false);
- else if (ret != BLK_STS_OK)
- blk_mq_end_request(rq, ret);
-
- hctx_unlock(hctx, srcu_idx);
+ return ret;
}

blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
- blk_status_t ret;
- int srcu_idx;
- blk_qc_t unused_cookie;
- struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ blk_qc_t unused;

- hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
- hctx_unlock(hctx, srcu_idx);
-
- return ret;
+ return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
}

void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -2044,13 +2036,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx;
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
- &cookie);
+ &cookie, false, true);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
--
2.7.4
\
 
 \ /
  Last update: 2018-12-05 08:45    [W:0.067 / U:0.292 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site