Skip to content

Commit

Permalink
blkio: Core implementation of throttle policy
Browse files Browse the repository at this point in the history
o Actual implementation of throttling policy in block layer. Currently it
  implements READ and WRITE bytes per second throttling logic. IOPS throttling
  comes in later patches.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
  • Loading branch information
rhvgoyal authored and Jens Axboe committed Sep 16, 2010
1 parent 4c9eefa commit e43473b
Show file tree
Hide file tree
Showing 7 changed files with 979 additions and 3 deletions.
12 changes: 12 additions & 0 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,18 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.

config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y && EXPERIMENTAL
default n
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
one needs to mount and use blkio cgroup controller for creating
cgroups and specifying per device IO rate policies.

See Documentation/cgroups/blkio-controller.txt for more information.

endif # BLOCK

config BLOCK_COMPAT
Expand Down
1 change: 1 addition & 0 deletions block/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \

obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
Expand Down
24 changes: 24 additions & 0 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,7 @@ void blk_sync_queue(struct request_queue *q)
del_timer_sync(&q->unplug_timer);
del_timer_sync(&q->timeout);
cancel_work_sync(&q->unplug_work);
throtl_shutdown_timer_wq(q);
}
EXPORT_SYMBOL(blk_sync_queue);

Expand Down Expand Up @@ -459,6 +460,8 @@ void blk_cleanup_queue(struct request_queue *q)
if (q->elevator)
elevator_exit(q->elevator);

blk_throtl_exit(q);

blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
Expand Down Expand Up @@ -515,6 +518,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
return NULL;
}

if (blk_throtl_init(q)) {
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}

setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
init_timer(&q->unplug_timer);
Expand Down Expand Up @@ -1522,6 +1530,15 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io;
}

blk_throtl_bio(q, &bio);

/*
* If bio = NULL, bio has been throttled and will be submitted
* later.
*/
if (!bio)
break;

trace_block_bio_queue(q, bio);

ret = q->make_request_fn(q, bio);
Expand Down Expand Up @@ -2580,6 +2597,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);

int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);

int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
Expand Down
Loading

0 comments on commit e43473b

Please sign in to comment.