Skip to content

Commit

Permalink
Merge branch 'for-3.8/core' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block layer core updates from Jens Axboe:
 "Here are the core block IO bits for 3.8.  The branch contains:

   - The final version of the surprise device removal fixups from Bart.

   - Don't hide EFI partitions under advanced partition types.  It's
     fairly wide spread these days.  This is especially dangerous for
     systems that have both msdos and efi partition tables, where you
     want to keep them in sync.

   - Cleanup of using -1 instead of the proper NUMA_NO_NODE

   - Export control of bdi flusher thread CPU mask and default to using
     the home node (if known) from Jeff.

   - Export unplug tracepoint for MD.

   - Core improvements from Shaohua.  Reinstate the recursive merge, as
     the original bug has been fixed.  Add plugging for discard and also
     fix a problem handling non pow-of-2 discard limits.

  There's a trivial merge in block/blk-exec.c due to a fix that went
  into 3.7-rc at a later point than -rc4 where this is based."

* 'for-3.8/core' of git://git.kernel.dk/linux-block:
  block: export block_unplug tracepoint
  block: add plug for blkdev_issue_discard
  block: discard granularity might not be power of 2
  deadline: Allow 0ms deadline latency, increase the read speed
  partitions: enable EFI/GPT support by default
  bsg: Remove unused function bsg_goose_queue()
  block: Make blk_cleanup_queue() wait until request_fn finished
  block: Avoid scheduling delayed work on a dead queue
  block: Avoid that request_fn is invoked on a dead queue
  block: Let blk_drain_queue() caller obtain the queue lock
  block: Rename queue dead flag
  bdi: add a user-tunable cpu_list for the bdi flusher threads
  block: use NUMA_NO_NODE instead of -1
  block: recursive merge requests
  block CFQ: avoid moving request to different queue
  • Loading branch information
torvalds committed Dec 17, 2012
2 parents 3c2e81e + cbae8d4 commit 60da5bf
Show file tree
Hide file tree
Showing 19 changed files with 224 additions and 99 deletions.
2 changes: 1 addition & 1 deletion block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
Expand Down
127 changes: 76 additions & 51 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);

DEFINE_IDA(blk_queue_ida);

Expand Down Expand Up @@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
* Description:
* Sometimes queueing needs to be postponed for a little while, to allow
* resources to come back. This function will make sure that queueing is
* restarted around the specified time.
* restarted around the specified time. Queue lock must be held.
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
if (likely(!blk_queue_dead(q)))
queue_delayed_work(kblockd_workqueue, &q->delay_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);

Expand Down Expand Up @@ -292,6 +294,34 @@ void blk_sync_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_sync_queue);

/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
* @q: The queue to run
*
* Description:
* Invoke request handling on a queue if there are any pending requests.
* May be used to restart request handling after a request has completed.
* This variant runs the queue whether or not the queue has been
* stopped. Must be called with the queue lock held and interrupts
* disabled. See also @blk_run_queue.
*/
inline void __blk_run_queue_uncond(struct request_queue *q)
{
if (unlikely(blk_queue_dead(q)))
return;

/*
* Some request_fn implementations, e.g. scsi_request_fn(), unlock
* the queue lock internally. As a result multiple threads may be
* running such a request function concurrently. Keep track of the
* number of active request_fn invocations such that blk_drain_queue()
* can wait until all these request_fn calls have finished.
*/
q->request_fn_active++;
q->request_fn(q);
q->request_fn_active--;
}

/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
Expand All @@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
if (unlikely(blk_queue_stopped(q)))
return;

q->request_fn(q);
__blk_run_queue_uncond(q);
}
EXPORT_SYMBOL(__blk_run_queue);

Expand All @@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Tells kblockd to perform the equivalent of @blk_run_queue on behalf
* of us.
* of us. The caller must hold the queue lock.
*/
void blk_run_queue_async(struct request_queue *q)
{
if (likely(!blk_queue_stopped(q)))
if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);
Expand Down Expand Up @@ -349,23 +379,25 @@ void blk_put_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_put_queue);

/**
* blk_drain_queue - drain requests from request_queue
* __blk_drain_queue - drain requests from request_queue
* @q: queue to drain
* @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
* Drain requests from @q. If @drain_all is set, all requests are drained.
* If not, only ELVPRIV requests are drained. The caller is responsible
* for ensuring that no new requests which need to be drained are queued.
*/
void blk_drain_queue(struct request_queue *q, bool drain_all)
static void __blk_drain_queue(struct request_queue *q, bool drain_all)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
int i;

lockdep_assert_held(q->queue_lock);

while (true) {
bool drain = false;

spin_lock_irq(q->queue_lock);

/*
* The caller might be trying to drain @q before its
* elevator is initialized.
Expand All @@ -386,6 +418,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
__blk_run_queue(q);

drain |= q->nr_rqs_elvpriv;
drain |= q->request_fn_active;

/*
* Unfortunately, requests are queued at and tracked from
Expand All @@ -401,11 +434,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
}

spin_unlock_irq(q->queue_lock);

if (!drain)
break;

spin_unlock_irq(q->queue_lock);

msleep(10);

spin_lock_irq(q->queue_lock);
}

/*
Expand All @@ -416,13 +452,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (q->request_fn) {
struct request_list *rl;

spin_lock_irq(q->queue_lock);

blk_queue_for_each_rl(rl, q)
for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
wake_up_all(&rl->wait[i]);

spin_unlock_irq(q->queue_lock);
}
}

Expand All @@ -446,7 +478,10 @@ void blk_queue_bypass_start(struct request_queue *q)
spin_unlock_irq(q->queue_lock);

if (drain) {
blk_drain_queue(q, false);
spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, false);
spin_unlock_irq(q->queue_lock);

/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
Expand All @@ -473,20 +508,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
* Mark @q DEAD, drain all pending requests, destroy and put it. All
* future requests will be failed immediately with -ENODEV.
* Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
* put it. All future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;

/* mark @q DEAD, no new request or merges will be allowed afterwards */
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);

/*
* Dead queue is permanently in bypass mode till released. Note
* A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
Expand All @@ -499,12 +534,18 @@ void blk_cleanup_queue(struct request_queue *q)

queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);

/* drain all requests queued before DEAD marking */
blk_drain_queue(q, true);
/*
* Drain all requests queued before DYING marking. Set DEAD flag to
* prevent that q->request_fn() gets invoked after draining finished.
*/
spin_lock_irq(lock);
__blk_drain_queue(q, true);
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);

/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
Expand Down Expand Up @@ -549,7 +590,7 @@ void blk_exit_rl(struct request_list *rl)

struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);

Expand Down Expand Up @@ -660,7 +701,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);

struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, -1);
return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_init_queue);

Expand Down Expand Up @@ -716,7 +757,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);

bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dead(q))) {
if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
Expand Down Expand Up @@ -870,7 +911,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;

if (unlikely(blk_queue_dead(q)))
if (unlikely(blk_queue_dying(q)))
return NULL;

may_queue = elv_may_queue(q, rw_flags);
Expand Down Expand Up @@ -1050,7 +1091,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (rq)
return rq;

if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
Expand Down Expand Up @@ -1910,7 +1951,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;

spin_lock_irqsave(q->queue_lock, flags);
if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
Expand Down Expand Up @@ -2884,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
{
trace_block_unplug(q, depth, !from_schedule);

/*
* Don't mess with dead queue.
*/
if (unlikely(blk_queue_dead(q))) {
spin_unlock(q->queue_lock);
return;
}

/*
* If we are punting this to kblockd, then we can safely drop
* the queue_lock before waking kblockd (which needs to take
* this lock).
*/
if (from_schedule) {
spin_unlock(q->queue_lock);
if (from_schedule)
blk_run_queue_async(q);
} else {
else
__blk_run_queue(q);
spin_unlock(q->queue_lock);
}

spin_unlock(q->queue_lock);
}

static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
Expand Down Expand Up @@ -2996,7 +3021,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
Expand Down
4 changes: 2 additions & 2 deletions block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,

spin_lock_irq(q->queue_lock);

if (unlikely(blk_queue_dead(q))) {
if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
Expand All @@ -78,7 +78,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
if (is_pm_resume)
q->request_fn(q);
__blk_run_queue_uncond(q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
Expand Down
Loading

0 comments on commit 60da5bf

Please sign in to comment.