lkml.org 
[lkml]   [2017]   [Nov]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH] block, bfq: remove batches of confusing ifdefs
On 28 November 2017 at 10:37, Paolo Valente <paolo.valente@linaro.org> wrote:
> Commit a33801e8b473 ("block, bfq: move debug blkio stats behind
> CONFIG_DEBUG_BLK_CGROUP") introduced two batches of confusing ifdefs:
> one reported in [1], plus a similar one in another function. This
> commit removes both batches, in the way suggested in [1].
>
> [1] https://www.spinics.net/lists/linux-block/msg20043.html
>
> Fixes: a33801e8b473 ("block, bfq: move debug blkio stats behind CONFIG_DEBUG_BLK_CGROUP")
> Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
> Tested-by: Luca Miccio <lucmiccio@gmail.com>
> Signed-off-by: Paolo Valente <paolo.valente@linaro.org>

This is a good step of improvement (one may consider more steps...),
so feel free to add:

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

Kind regards
Uffe

> ---
> block/bfq-iosched.c | 127 +++++++++++++++++++++++++++++-----------------------
> 1 file changed, 72 insertions(+), 55 deletions(-)
>
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index bcb6d21..0153fea 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -3689,35 +3689,16 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> return rq;
> }
>
> -static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> -{
> - struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
> - struct request *rq;
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> - struct bfq_queue *in_serv_queue, *bfqq;
> - bool waiting_rq, idle_timer_disabled;
> -#endif
> -
> - spin_lock_irq(&bfqd->lock);
> -
> #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> - in_serv_queue = bfqd->in_service_queue;
> - waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
> -
> - rq = __bfq_dispatch_request(hctx);
> -
> - idle_timer_disabled =
> - waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
> -
> -#else
> - rq = __bfq_dispatch_request(hctx);
> -#endif
> - spin_unlock_irq(&bfqd->lock);
> +static inline void bfq_update_dispatch_stats(struct request *rq,
> + spinlock_t *queue_lock,
> + struct bfq_queue *in_serv_queue,
> + bool idle_timer_disabled)
> +{
> + struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
>
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> - bfqq = rq ? RQ_BFQQ(rq) : NULL;
> if (!idle_timer_disabled && !bfqq)
> - return rq;
> + return;
>
> /*
> * rq and bfqq are guaranteed to exist until this function
> @@ -3732,7 +3713,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> * In addition, the following queue lock guarantees that
> * bfqq_group(bfqq) exists as well.
> */
> - spin_lock_irq(hctx->queue->queue_lock);
> + spin_lock_irq(queue_lock);
> if (idle_timer_disabled)
> /*
> * Since the idle timer has been disabled,
> @@ -3751,9 +3732,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> bfqg_stats_set_start_empty_time(bfqg);
> bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
> }
> - spin_unlock_irq(hctx->queue->queue_lock);
> + spin_unlock_irq(queue_lock);
> +}
> +#else
> +static inline void bfq_update_dispatch_stats(struct request *rq,
> + spinlock_t *queue_lock,
> + struct bfq_queue *in_serv_queue,
> + bool idle_timer_disabled) {}
> #endif
>
> +static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
> +{
> + struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
> + struct request *rq;
> + struct bfq_queue *in_serv_queue;
> + bool waiting_rq, idle_timer_disabled;
> +
> + spin_lock_irq(&bfqd->lock);
> +
> + in_serv_queue = bfqd->in_service_queue;
> + waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
> +
> + rq = __bfq_dispatch_request(hctx);
> +
> + idle_timer_disabled =
> + waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
> +
> + spin_unlock_irq(&bfqd->lock);
> +
> + bfq_update_dispatch_stats(rq, hctx->queue->queue_lock, in_serv_queue,
> + idle_timer_disabled);
> +
> return rq;
> }
>
> @@ -4276,16 +4285,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
> return idle_timer_disabled;
> }
>
> +#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
> + spinlock_t *queue_lock,
> + bool idle_timer_disabled,
> + unsigned int cmd_flags)
> +{
> + if (!bfqq)
> + return;
> +
> + /*
> + * bfqq still exists, because it can disappear only after
> + * either it is merged with another queue, or the process it
> + * is associated with exits. But both actions must be taken by
> + * the same process currently executing this flow of
> + * instructions.
> + *
> + * In addition, the following queue lock guarantees that
> + * bfqq_group(bfqq) exists as well.
> + */
> + spin_lock_irq(queue_lock);
> + bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
> + if (idle_timer_disabled)
> + bfqg_stats_update_idle_time(bfqq_group(bfqq));
> + spin_unlock_irq(queue_lock);
> +}
> +#else
> +static inline void bfq_update_insert_stats(struct bfq_queue *bfqq,
> + spinlock_t *queue_lock,
> + bool idle_timer_disabled,
> + unsigned int cmd_flags) {}
> +#endif
> +
> static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
> bool at_head)
> {
> struct request_queue *q = hctx->queue;
> struct bfq_data *bfqd = q->elevator->elevator_data;
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> struct bfq_queue *bfqq = RQ_BFQQ(rq);
> bool idle_timer_disabled = false;
> unsigned int cmd_flags;
> -#endif
>
> spin_lock_irq(&bfqd->lock);
> if (blk_mq_sched_try_insert_merge(q, rq)) {
> @@ -4304,7 +4343,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
> else
> list_add_tail(&rq->queuelist, &bfqd->dispatch);
> } else {
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> idle_timer_disabled = __bfq_insert_request(bfqd, rq);
> /*
> * Update bfqq, because, if a queue merge has occurred
> @@ -4312,9 +4350,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
> * redirected into a new queue.
> */
> bfqq = RQ_BFQQ(rq);
> -#else
> - __bfq_insert_request(bfqd, rq);
> -#endif
>
> if (rq_mergeable(rq)) {
> elv_rqhash_add(q, rq);
> @@ -4323,35 +4358,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
> }
> }
>
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> /*
> * Cache cmd_flags before releasing scheduler lock, because rq
> * may disappear afterwards (for example, because of a request
> * merge).
> */
> cmd_flags = rq->cmd_flags;
> -#endif
> +
> spin_unlock_irq(&bfqd->lock);
>
> -#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
> - if (!bfqq)
> - return;
> - /*
> - * bfqq still exists, because it can disappear only after
> - * either it is merged with another queue, or the process it
> - * is associated with exits. But both actions must be taken by
> - * the same process currently executing this flow of
> - * instruction.
> - *
> - * In addition, the following queue lock guarantees that
> - * bfqq_group(bfqq) exists as well.
> - */
> - spin_lock_irq(q->queue_lock);
> - bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
> - if (idle_timer_disabled)
> - bfqg_stats_update_idle_time(bfqq_group(bfqq));
> - spin_unlock_irq(q->queue_lock);
> -#endif
> + bfq_update_insert_stats(bfqq, q->queue_lock, idle_timer_disabled,
> + cmd_flags);
> }
>
> static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
> --
> 2.10.0
>

\
 
 \ /
  Last update: 2017-11-28 11:40    [W:0.082 / U:0.912 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site