Skip to content

Commit

Permalink
blk-mq: remove ->map_queue
Browse files Browse the repository at this point in the history
All drivers use the default, so provide an inline version of it.  If we
ever need other queue mapping we can add an optional method back,
although supporting will also require major changes to the queue setup
code.

This provides better code generation, and better debugability as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Christoph Hellwig authored and axboe committed Sep 15, 2016
1 parent bdd17e7 commit 7d7e0f9
Show file tree
Hide file tree
Showing 18 changed files with 25 additions and 65 deletions.
6 changes: 3 additions & 3 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, int error)

/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
}
Expand Down Expand Up @@ -325,7 +325,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
flush_rq->tag = first_rq->tag;
fq->orig_rq = first_rq;

hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}

Expand Down Expand Up @@ -358,7 +358,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);

hctx = q->mq_ops->map_queue(q, ctx->cpu);
hctx = blk_mq_map_queue(q, ctx->cpu);

/*
* After populating an empty queue, kick it to avoid stall. Read
Expand Down
5 changes: 2 additions & 3 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,8 +301,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
io_schedule();

data->ctx = blk_mq_get_ctx(data->q);
data->hctx = data->q->mq_ops->map_queue(data->q,
data->ctx->cpu);
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) {
bt = &data->hctx->tags->breserved_tags;
} else {
Expand Down Expand Up @@ -726,7 +725,7 @@ u32 blk_mq_unique_tag(struct request *rq)
int hwq = 0;

if (q->mq_ops) {
hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}

Expand Down
40 changes: 11 additions & 29 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
return ERR_PTR(ret);

ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);

rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
Expand All @@ -254,7 +254,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
blk_mq_put_ctx(ctx);

ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
ctx = alloc_data.ctx;
Expand Down Expand Up @@ -338,11 +338,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);

void blk_mq_free_request(struct request *rq)
{
struct blk_mq_hw_ctx *hctx;
struct request_queue *q = rq->q;

hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
blk_mq_free_hctx_request(hctx, rq);
blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);

Expand Down Expand Up @@ -1074,9 +1070,7 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;

hctx = q->mq_ops->map_queue(q, ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);

spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
Expand All @@ -1093,12 +1087,10 @@ static void blk_mq_insert_requests(struct request_queue *q,
bool from_schedule)

{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);

trace_block_unplug(q, depth, !from_schedule);

hctx = q->mq_ops->map_queue(q, ctx->cpu);

/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
Expand Down Expand Up @@ -1232,7 +1224,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,

blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
hctx = blk_mq_map_queue(q, ctx->cpu);

if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
Expand All @@ -1246,7 +1238,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
trace_block_sleeprq(q, bio, op);

ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
ctx = alloc_data.ctx;
Expand All @@ -1263,8 +1255,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
rq->mq_ctx->cpu);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
struct blk_mq_queue_data bd = {
.rq = rq,
.list = NULL,
Expand Down Expand Up @@ -1468,15 +1459,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
return cookie;
}

/*
* Default mapping to a software queue, since we use one per CPU.
*/
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
{
return q->queue_hw_ctx[q->mq_map[cpu]];
}
EXPORT_SYMBOL(blk_mq_map_queue);

static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
Expand Down Expand Up @@ -1810,7 +1792,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
if (!cpu_online(i))
continue;

hctx = q->mq_ops->map_queue(q, i);
hctx = blk_mq_map_queue(q, i);

/*
* Set local node, IFF we have more than one hw queue. If
Expand Down Expand Up @@ -1848,7 +1830,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
continue;

ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = q->mq_ops->map_queue(q, i);
hctx = blk_mq_map_queue(q, i);

cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
Expand Down Expand Up @@ -2313,7 +2295,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;

if (!set->ops->queue_rq || !set->ops->map_queue)
if (!set->ops->queue_rq)
return -EINVAL;

if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
Expand Down
6 changes: 6 additions & 0 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,12 @@ extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
const struct cpumask *online_mask);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);

static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
int cpu)
{
return q->queue_hw_ctx[q->mq_map[cpu]];
}

/*
* sysfs helpers
*/
Expand Down
11 changes: 3 additions & 8 deletions block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,9 @@ extern struct ida blk_queue_ida;
static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
{
struct blk_mq_hw_ctx *hctx;

if (!q->mq_ops)
return q->fq;

hctx = q->mq_ops->map_queue(q, ctx->cpu);

return hctx->fq;
if (q->mq_ops)
return blk_mq_map_queue(q, ctx->cpu)->fq;
return q->fq;
}

static inline void __blk_get_queue(struct request_queue *q)
Expand Down
1 change: 0 additions & 1 deletion drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -1703,7 +1703,6 @@ static int loop_init_request(void *data, struct request *rq,

static struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = loop_init_request,
};

Expand Down
1 change: 0 additions & 1 deletion drivers/block/mtip32xx/mtip32xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -3895,7 +3895,6 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,

static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
.complete = mtip_softirq_done_fn,
Expand Down
1 change: 0 additions & 1 deletion drivers/block/null_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,

static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
};
Expand Down
1 change: 0 additions & 1 deletion drivers/block/rbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -3621,7 +3621,6 @@ static int rbd_init_request(void *data, struct request *rq,

static struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = rbd_init_request,
};

Expand Down
1 change: 0 additions & 1 deletion drivers/block/virtio_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,6 @@ static int virtblk_init_request(void *data, struct request *rq,

static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.map_queue = blk_mq_map_queue,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
};
Expand Down
1 change: 0 additions & 1 deletion drivers/block/xen-blkfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -909,7 +909,6 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,

static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue,
};

static void blkif_set_queue_limits(struct blkfront_info *info)
Expand Down
1 change: 0 additions & 1 deletion drivers/md/dm-rq.c
Original file line number Diff line number Diff line change
Expand Up @@ -908,7 +908,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,

static struct blk_mq_ops dm_mq_ops = {
.queue_rq = dm_mq_queue_rq,
.map_queue = blk_mq_map_queue,
.complete = dm_softirq_done,
.init_request = dm_mq_init_request,
};
Expand Down
1 change: 0 additions & 1 deletion drivers/mtd/ubi/block.c
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,6 @@ static int ubiblock_init_request(void *data, struct request *req,
static struct blk_mq_ops ubiblock_mq_ops = {
.queue_rq = ubiblock_queue_rq,
.init_request = ubiblock_init_request,
.map_queue = blk_mq_map_queue,
};

static DEFINE_IDR(ubiblock_minor_idr);
Expand Down
2 changes: 0 additions & 2 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1131,7 +1131,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
static struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = nvme_admin_init_hctx,
.exit_hctx = nvme_admin_exit_hctx,
.init_request = nvme_admin_init_request,
Expand All @@ -1141,7 +1140,6 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
static struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
.timeout = nvme_timeout,
Expand Down
2 changes: 0 additions & 2 deletions drivers/nvme/host/rdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1531,7 +1531,6 @@ static void nvme_rdma_complete_rq(struct request *rq)
static struct blk_mq_ops nvme_rdma_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
.map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.reinit_request = nvme_rdma_reinit_request,
Expand All @@ -1543,7 +1542,6 @@ static struct blk_mq_ops nvme_rdma_mq_ops = {
static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
.map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_admin_request,
.exit_request = nvme_rdma_exit_admin_request,
.reinit_request = nvme_rdma_reinit_request,
Expand Down
2 changes: 0 additions & 2 deletions drivers/nvme/target/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static struct blk_mq_ops nvme_loop_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
.map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx,
.timeout = nvme_loop_timeout,
Expand All @@ -282,7 +281,6 @@ static struct blk_mq_ops nvme_loop_mq_ops = {
static struct blk_mq_ops nvme_loop_admin_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
.map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_admin_request,
.init_hctx = nvme_loop_init_admin_hctx,
.timeout = nvme_loop_timeout,
Expand Down
1 change: 0 additions & 1 deletion drivers/scsi/scsi_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -2077,7 +2077,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
}

static struct blk_mq_ops scsi_mq_ops = {
.map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
Expand Down
7 changes: 0 additions & 7 deletions include/linux/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ struct blk_mq_queue_data {
};

typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
Expand All @@ -113,11 +112,6 @@ struct blk_mq_ops {
*/
queue_rq_fn *queue_rq;

/*
* Map to specific hardware queue
*/
map_queue_fn *map_queue;

/*
* Called on request timeout
*/
Expand Down Expand Up @@ -223,7 +217,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}

struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);

int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
Expand Down

0 comments on commit 7d7e0f9

Please sign in to comment.