Skip to content

Commit

Permalink
block: blk-mq: support draining mq queue
Browse files Browse the repository at this point in the history
blk_mq_drain_queue() is introduced so that we can drain
mq queue inside blk_cleanup_queue().

Also don't accept new requests any more if queue is marked
as dying.

Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
ming1 authored and axboe committed Dec 31, 2013
1 parent b28bc9b commit 43a5e4e
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 18 deletions.
10 changes: 8 additions & 2 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@

#include "blk.h"
#include "blk-cgroup.h"
#include "blk-mq.h"

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
Expand Down Expand Up @@ -497,8 +498,13 @@ void blk_cleanup_queue(struct request_queue *q)
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
spin_lock_irq(lock);
__blk_drain_queue(q, true);
if (q->mq_ops) {
blk_mq_drain_queue(q);
spin_lock_irq(lock);
} else {
spin_lock_irq(lock);
__blk_drain_queue(q, true);
}
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);

Expand Down
4 changes: 4 additions & 0 deletions block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk;
rq->end_io = done;

/*
* don't check dying flag for MQ because the request won't
* be resued after dying flag is set
*/
if (q->mq_ops) {
blk_mq_insert_request(q, rq, true);
return;
Expand Down
43 changes: 27 additions & 16 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q)

spin_lock_irq(q->queue_lock);
ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
!blk_queue_bypass(q), *q->queue_lock);
!blk_queue_bypass(q) || blk_queue_dying(q),
*q->queue_lock);
/* inc usage with lock hold to avoid freeze_queue runs here */
if (!ret)
if (!ret && !blk_queue_dying(q))
__percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
else if (blk_queue_dying(q))
ret = -ENODEV;
spin_unlock_irq(q->queue_lock);

return ret;
Expand All @@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q)
__percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
}

static void __blk_mq_drain_queue(struct request_queue *q)
{
while (true) {
s64 count;

spin_lock_irq(q->queue_lock);
count = percpu_counter_sum(&q->mq_usage_counter);
spin_unlock_irq(q->queue_lock);

if (count == 0)
break;
blk_mq_run_queues(q, false);
msleep(10);
}
}

/*
* Guarantee no request is in use, so we can change any data structure of
* the queue afterward.
Expand All @@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);

if (!drain)
return;

while (true) {
s64 count;

spin_lock_irq(q->queue_lock);
count = percpu_counter_sum(&q->mq_usage_counter);
spin_unlock_irq(q->queue_lock);
if (drain)
__blk_mq_drain_queue(q);
}

if (count == 0)
break;
blk_mq_run_queues(q, false);
msleep(10);
}
void blk_mq_drain_queue(struct request_queue *q)
{
__blk_mq_drain_queue(q);
}

static void blk_mq_unfreeze_queue(struct request_queue *q)
Expand Down
1 change: 1 addition & 0 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);

/*
* CPU hotplug helpers
Expand Down

0 comments on commit 43a5e4e

Please sign in to comment.