lkml.org 
[lkml]   [2015]   [Feb]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 01/10] xen/blkfront: convert to blk-mq API
Date
This patch convert xen-blkfront driver to use the block multiqueue API and
force to use only one hardware queue at this time.

Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
drivers/block/xen-blkfront.c | 118 ++++++++++++++++++++++++++++++++++++-------
1 file changed, 100 insertions(+), 18 deletions(-)

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2236c6f..13e6178 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -37,6 +37,7 @@

#include <linux/interrupt.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/module.h>
@@ -133,6 +134,8 @@ struct blkfront_info
unsigned int feature_persistent:1;
unsigned int max_indirect_segments;
int is_ready;
+ struct blk_mq_tag_set tag_set;
+ int feature_multiqueue;
};

static unsigned int nr_minors;
@@ -651,6 +654,42 @@ wait:
flush_requests(info);
}

+static int blk_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *qd)
+{
+ struct blkfront_info *info = qd->rq->rq_disk->private_data;
+ int ret = BLK_MQ_RQ_QUEUE_OK;
+
+ blk_mq_start_request(qd->rq);
+ spin_lock_irq(&info->io_lock);
+ if (RING_FULL(&info->ring)) {
+ blk_mq_stop_hw_queue(hctx);
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ goto out;
+ }
+
+ if (blkif_request_flush_invalid(qd->rq, info)) {
+ ret = BLK_MQ_RQ_QUEUE_ERROR;
+ goto out;
+ }
+
+ if (blkif_queue_request(qd->rq)) {
+ blk_mq_stop_hw_queue(hctx);
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ goto out;
+ }
+
+ flush_requests(info);
+out:
+ spin_unlock_irq(&info->io_lock);
+ return ret;
+}
+
+static struct blk_mq_ops blkfront_mq_ops = {
+ .queue_rq = blk_mq_queue_rq,
+ .map_queue = blk_mq_map_queue,
+};
+
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
unsigned int physical_sector_size,
unsigned int segments)
@@ -658,9 +697,28 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;

- rq = blk_init_queue(do_blkif_request, &info->io_lock);
- if (rq == NULL)
- return -1;
+ if (info->feature_multiqueue) {
+ memset(&info->tag_set, 0, sizeof(info->tag_set));
+ info->tag_set.ops = &blkfront_mq_ops;
+ info->tag_set.nr_hw_queues = 1;
+ info->tag_set.queue_depth = BLK_RING_SIZE;
+ info->tag_set.numa_node = NUMA_NO_NODE;
+ info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ info->tag_set.cmd_size = 0;
+ info->tag_set.driver_data = info;
+
+ if (blk_mq_alloc_tag_set(&info->tag_set))
+ return -1;
+ rq = blk_mq_init_queue(&info->tag_set);
+ if (IS_ERR(rq)) {
+ blk_mq_free_tag_set(&info->tag_set);
+ return -1;
+ }
+ } else {
+ rq = blk_init_queue(do_blkif_request, &info->io_lock);
+ if (rq == NULL)
+ return -1;
+ }

queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);

@@ -896,7 +954,10 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
spin_lock_irqsave(&info->io_lock, flags);

/* No more blkif_request(). */
- blk_stop_queue(info->rq);
+ if (info->feature_multiqueue)
+ blk_mq_stop_hw_queues(info->rq);
+ else
+ blk_stop_queue(info->rq);

/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
@@ -912,6 +973,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
xlbd_release_minors(minor, nr_minors);

blk_cleanup_queue(info->rq);
+ blk_mq_free_tag_set(&info->tag_set);
info->rq = NULL;

put_disk(info->gd);
@@ -921,10 +983,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
static void kick_pending_request_queues(struct blkfront_info *info)
{
if (!RING_FULL(&info->ring)) {
- /* Re-enable calldowns. */
- blk_start_queue(info->rq);
- /* Kick things off immediately. */
- do_blkif_request(info->rq);
+ if (info->feature_multiqueue) {
+ blk_mq_start_stopped_hw_queues(info->rq, true);
+ } else {
+ /* Re-enable calldowns. */
+ blk_start_queue(info->rq);
+ /* Kick things off immediately. */
+ do_blkif_request(info->rq);
+ }
}
}

@@ -949,8 +1015,12 @@ static void blkif_free(struct blkfront_info *info, int suspend)
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
- if (info->rq)
- blk_stop_queue(info->rq);
+ if (info->rq) {
+ if (info->feature_multiqueue)
+ blk_mq_stop_hw_queues(info->rq);
+ else
+ blk_stop_queue(info->rq);
+ }

/* Remove all persistent grants */
if (!list_empty(&info->grants)) {
@@ -1175,37 +1245,40 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue;
}

- error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ error = req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- error = -EOPNOTSUPP;
+ error = req->errors = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
- __blk_end_request_all(req, error);
+ if (info->feature_multiqueue)
+ blk_mq_complete_request(req);
+ else
+ __blk_end_request_all(req, error);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- error = -EOPNOTSUPP;
+ error = req->errors = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- error = -EOPNOTSUPP;
+ error = req->errors = -EOPNOTSUPP;
}
if (unlikely(error)) {
if (error == -EOPNOTSUPP)
- error = 0;
+ error = req->errors = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -1216,7 +1289,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);

- __blk_end_request_all(req, error);
+ if (info->feature_multiqueue)
+ blk_mq_complete_request(req);
+ else
+ __blk_end_request_all(req, error);
break;
default:
BUG();
@@ -1552,8 +1628,13 @@ static int blkif_recover(struct blkfront_info *info)
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
- blk_requeue_request(info->rq, req);
+ if (info->feature_multiqueue)
+ blk_mq_requeue_request(req);
+ else
+ blk_requeue_request(info->rq, req);
}
+ if (info->feature_multiqueue)
+ blk_mq_kick_requeue_list(info->rq);
spin_unlock_irq(&info->io_lock);

while ((bio = bio_list_pop(&bio_list)) != NULL) {
@@ -1873,6 +1954,7 @@ static void blkfront_connect(struct blkfront_info *info)
return;
}

+ info->feature_multiqueue = 1;
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
physical_sector_size);
if (err) {
--
1.8.3.1


\
 
 \ /
  Last update: 2015-02-15 09:41    [W:0.175 / U:2.040 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site