Skip to content

Commit

Permalink
block: better op and flags encoding
Browse files Browse the repository at this point in the history
Now that we don't need the common flags to overflow outside the range
of a 32-bit type we can encode them the same way for both the bio and
request fields.  This in addition allows us to place the operation
first (and make some room for more ops while we're at it) and to
stop having to shift around the operation values.

In addition this allows passing around only one value in the block layer
instead of two (and eventuall also in the file systems, but we can do
that later) and thus clean up a lot of code.

Last but not least this allows decreasing the size of the cmd_flags
field in struct request to 32-bits.  Various functions passing this
value could also be updated, but I'd like to avoid the churn for now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Christoph Hellwig authored and axboe committed Oct 28, 2016
1 parent e806402 commit ef295ec
Show file tree
Hide file tree
Showing 23 changed files with 148 additions and 221 deletions.
4 changes: 2 additions & 2 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -553,8 +553,8 @@ struct request {
struct request_list *rl;
}

See the rq_flag_bits definitions for an explanation of the various flags
available. Some bits are used by the block layer or i/o scheduler.
See the req_ops and req_flag_bits definitions for an explanation of the various
flags available. Some bits are used by the block layer or i/o scheduler.

The behaviour of the various sector counts are almost the same as before,
except that since we have multi-segment bios, current_nr_sectors refers
Expand Down
60 changes: 20 additions & 40 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1056,8 +1056,7 @@ static struct io_context *rq_ioc(struct bio *bio)
/**
* __get_request - get a free request
* @rl: request list to allocate from
* @op: REQ_OP_READ/REQ_OP_WRITE
* @op_flags: rq_flag_bits
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
Expand All @@ -1068,23 +1067,22 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *__get_request(struct request_list *rl, int op,
int op_flags, struct bio *bio,
gfp_t gfp_mask)
static struct request *__get_request(struct request_list *rl, unsigned int op,
struct bio *bio, gfp_t gfp_mask)
{
struct request_queue *q = rl->q;
struct request *rq;
struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(op, op_flags) != 0;
const bool is_sync = op_is_sync(op);
int may_queue;
req_flags_t rq_flags = RQF_ALLOCED;

if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);

may_queue = elv_may_queue(q, op, op_flags);
may_queue = elv_may_queue(q, op);
if (may_queue == ELV_MQUEUE_NO)
goto rq_starved;

Expand Down Expand Up @@ -1154,7 +1152,7 @@ static struct request *__get_request(struct request_list *rl, int op,

blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
req_set_op_attrs(rq, op, op_flags);
rq->cmd_flags = op;
rq->rq_flags = rq_flags;

/* init elvpriv */
Expand Down Expand Up @@ -1232,8 +1230,7 @@ static struct request *__get_request(struct request_list *rl, int op,
/**
* get_request - get a free request
* @q: request_queue to allocate request from
* @op: REQ_OP_READ/REQ_OP_WRITE
* @op_flags: rq_flag_bits
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
*
Expand All @@ -1244,18 +1241,17 @@ static struct request *__get_request(struct request_list *rl, int op,
* Returns ERR_PTR on failure, with @q->queue_lock held.
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, int op,
int op_flags, struct bio *bio,
gfp_t gfp_mask)
static struct request *get_request(struct request_queue *q, unsigned int op,
struct bio *bio, gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(op, op_flags) != 0;
const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
struct request_list *rl;
struct request *rq;

rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
rq = __get_request(rl, op, op_flags, bio, gfp_mask);
rq = __get_request(rl, op, bio, gfp_mask);
if (!IS_ERR(rq))
return rq;

Expand Down Expand Up @@ -1297,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
create_io_context(gfp_mask, q->node);

spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, 0, NULL, gfp_mask);
rq = get_request(q, rw, NULL, gfp_mask);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
return rq;
Expand Down Expand Up @@ -1446,7 +1442,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (rq_flags & RQF_ALLOCED) {
struct request_list *rl = blk_rq_rl(req);
bool sync = rw_is_sync(req_op(req), req->cmd_flags);
bool sync = op_is_sync(req->cmd_flags);

BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));
Expand Down Expand Up @@ -1652,8 +1648,6 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;

req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;

Expand All @@ -1665,9 +1659,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)

static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
int el_ret, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;

Expand Down Expand Up @@ -1722,24 +1715,11 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}

get_rq:
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.
*/
if (sync)
rw_flags |= REQ_SYNC;

/*
* Add in META/PRIO flags, if set, before we get to the IO scheduler
*/
rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));

/*
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) {
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
Expand Down Expand Up @@ -2946,8 +2926,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
req_set_op(rq, bio_op(bio));

if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);

Expand Down Expand Up @@ -3031,8 +3009,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
req_set_op_attrs(dst, req_op(src),
(src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
dst->cmd_type = src->cmd_type;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
Expand Down Expand Up @@ -3537,8 +3514,11 @@ EXPORT_SYMBOL(blk_set_runtime_active);

int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
FIELD_SIZEOF(struct bio, bi_opf));

/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
Expand Down
2 changes: 1 addition & 1 deletion block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
}

flush_rq->cmd_type = REQ_TYPE_FS;
req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH);
flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
Expand Down
2 changes: 1 addition & 1 deletion block/blk-lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
enum req_op op;
unsigned int op;
int alignment;
sector_t bs_mask;

Expand Down
2 changes: 2 additions & 0 deletions block/blk-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
int blk_rq_append_bio(struct request *rq, struct bio *bio)
{
if (!rq->bio) {
rq->cmd_flags &= REQ_OP_MASK;
rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
blk_rq_bio_prep(rq->q, rq, bio);
} else {
if (!ll_back_merge_fn(rq->q, rq, bio))
Expand Down
28 changes: 11 additions & 17 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,14 +139,13 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
EXPORT_SYMBOL(blk_mq_can_queue);

static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
struct request *rq, int op,
unsigned int op_flags)
struct request *rq, unsigned int op)
{
INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = q;
rq->mq_ctx = ctx;
req_set_op_attrs(rq, op, op_flags);
rq->cmd_flags = op;
if (blk_queue_io_stat(q))
rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
Expand Down Expand Up @@ -183,11 +182,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
rq->end_io_data = NULL;
rq->next_rq = NULL;

ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
ctx->rq_dispatched[op_is_sync(op)]++;
}

static struct request *
__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
{
struct request *rq;
unsigned int tag;
Expand All @@ -202,7 +201,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
}

rq->tag = tag;
blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
return rq;
}

Expand All @@ -225,7 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
rq = __blk_mq_alloc_request(&alloc_data, rw);
blk_mq_put_ctx(ctx);

if (!rq) {
Expand Down Expand Up @@ -277,7 +276,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));

blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
rq = __blk_mq_alloc_request(&alloc_data, rw);
if (!rq) {
ret = -EWOULDBLOCK;
goto out_queue_exit;
Expand Down Expand Up @@ -1196,19 +1195,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct request *rq;
int op = bio_data_dir(bio);
int op_flags = 0;

blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, ctx->cpu);

if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;

trace_block_getrq(q, bio, op);
trace_block_getrq(q, bio, bio->bi_opf);
blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(data, op, op_flags);
rq = __blk_mq_alloc_request(data, bio->bi_opf);

data->hctx->queued++;
return rq;
Expand Down Expand Up @@ -1256,7 +1250,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_mq_alloc_data data;
struct request *rq;
Expand Down Expand Up @@ -1350,7 +1344,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
Expand Down
Loading

0 comments on commit ef295ec

Please sign in to comment.