lkml.org 
[lkml]   [2016]   [Jul]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RFC V8 04/22] block, cfq: remove SSD-related logic
Date
From: Arianna Avanzini <avanzini.arianna@gmail.com>

CFQ disables idling for SSD devices to achieve a higher throughput. As
for seeky queues (see the previous commit), BFQ makes idling decisions
for SSD devices in a more complex way, according to a unified strategy
for boosting the throughput while at the same preserving strong
throughput-distribution and latency guarantees. This commit then
removes the CFQ mechanism.

Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Paolo Valente <paolo.valente@linaro.org>
---
block/cfq-iosched.c | 18 ++----------------
1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 870d1ba..fc53555 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -51,7 +51,6 @@ static const int cfq_hist_divisor = 4;

#define CFQQ_SEEK_THR (sector_t)(8 * 100)
#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
-#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)

#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
@@ -2695,8 +2694,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return false;

/* We do for queues that were marked with idle window flag. */
- if (cfq_cfqq_idle_window(cfqq) &&
- !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
+ if (cfq_cfqq_idle_window(cfqq))
return true;

/*
@@ -2717,14 +2715,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
struct cfq_io_cq *cic;
unsigned long sl, group_idle = 0;

- /*
- * SSD device without seek penalty, disable idling. But only do so
- * for devices that support queuing, otherwise we still have a problem
- * with sync vs async workloads.
- */
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
- return;
-
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfq_cfqq_slice_new(cfqq));

@@ -3585,7 +3575,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq)
{
sector_t sdist = 0;
- sector_t n_sec = blk_rq_sectors(rq);
if (cfqq->last_request_pos) {
if (cfqq->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
@@ -3594,10 +3583,7 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}

cfqq->seek_history <<= 1;
- if (blk_queue_nonrot(cfqd->queue))
- cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
- else
- cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
+ cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
}

/*
--
1.9.1
\
 
 \ /
  Last update: 2016-07-27 19:01    [W:0.117 / U:1.676 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site