lkml.org 
[lkml]   [2010]   [Jan]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] cfq-iosched: NCQ SSDs do not need read queue merging
Date
NCQ SSDs' performances are not affected by
distance of read requests, so there is no point in having
overhead to merge such queues.

Non-NCQ SSDs showed regression in some special cases, so
they are ruled out by this patch.

This patch intentionally doesn't affect writes, so
it changes the queued[] field, to be indexed by
READ/WRITE instead of SYNC/ASYNC, and only compute proximity
for queues with WRITE requests.

Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com>
---
block/cfq-iosched.c | 28 +++++++++++++++++-----------
1 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 918c7fd..3b7c60e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -108,9 +108,9 @@ struct cfq_queue {
struct rb_root sort_list;
/* if fifo isn't expired, next request to serve */
struct request *next_rq;
- /* requests queued in sort_list */
+ /* requests queued in sort_list, indexed by READ/WRITE */
int queued[2];
- /* currently allocated requests */
+ /* currently allocated requests, indexed by READ/WRITE */
int allocated[2];
/* fifo list of requests in sort_list */
struct list_head fifo;
@@ -436,6 +436,10 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
cic->cfqq[is_sync] = cfqq;
}

+static inline bool is_smart_ssd(struct cfq_queue *cfqq)
+{
+ return blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag;
+}
/*
* We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE).
@@ -1268,7 +1272,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return;
if (!cfqq->next_rq)
return;
-
+ if (is_smart_ssd(cfqd) && !cfqq->queued[WRITE])
+ return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
blk_rq_pos(cfqq->next_rq), &parent, &p);
@@ -1337,10 +1342,10 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
static void cfq_del_rq_rb(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
- const int sync = rq_is_sync(rq);
+ const int rw = rq_data_dir(rq);

- BUG_ON(!cfqq->queued[sync]);
- cfqq->queued[sync]--;
+ BUG_ON(!cfqq->queued[rw]);
+ cfqq->queued[rw]--;

elv_rb_del(&cfqq->sort_list, rq);

@@ -1363,7 +1368,7 @@ static void cfq_add_rq_rb(struct request *rq)
struct cfq_data *cfqd = cfqq->cfqd;
struct request *__alias, *prev;

- cfqq->queued[rq_is_sync(rq)]++;
+ cfqq->queued[rq_data_dir(rq)]++;

/*
* looks a little odd, but the first insert might return an alias.
@@ -1393,7 +1398,7 @@ static void cfq_add_rq_rb(struct request *rq)
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
- cfqq->queued[rq_is_sync(rq)]--;
+ cfqq->queued[rq_data_dir(rq)]--;
cfq_add_rq_rb(rq);
}

@@ -1689,7 +1694,8 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
struct cfq_queue *__cfqq;
sector_t sector = cfqd->last_position;

- if (RB_EMPTY_ROOT(root))
+ if (RB_EMPTY_ROOT(root) ||
+ (is_smart_ssd(cfqd) && !cur_cfqq->queued[WRITE]))
return NULL;

/*
@@ -1796,7 +1802,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)

/* We do for queues that were marked with idle window flag. */
if (cfq_cfqq_idle_window(cfqq) &&
- !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
+ !is_smart_ssd(cfqd))
return true;

/*
@@ -1817,7 +1823,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* for devices that support queuing, otherwise we still have a problem
* with sync vs async workloads.
*/
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ if (is_smart_ssd(cfqd))
return;

WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
--
1.6.4.4


\
 
 \ /
  Last update: 2010-01-10 22:07    [W:2.188 / U:0.056 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site