summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-05 01:06:58 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-05 01:06:58 +0100
commitb4fdcb02f1e39c27058a885905bd0277370ba441 (patch)
treefd4cfd1994f21f44afe5e7904681fb5ac09f81b8 /block/blk-throttle.c
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/pkl/squashfs-next (diff)
parentblock: don't call blk_drain_queue() if elevator is not up (diff)
downloadlinux-b4fdcb02f1e39c27058a885905bd0277370ba441.tar.xz
linux-b4fdcb02f1e39c27058a885905bd0277370ba441.zip
Merge branch 'for-3.2/core' of git://git.kernel.dk/linux-block
* 'for-3.2/core' of git://git.kernel.dk/linux-block: (29 commits) block: don't call blk_drain_queue() if elevator is not up blk-throttle: use queue_is_locked() instead of lockdep_is_held() blk-throttle: Take blkcg->lock while traversing blkcg->policy_list blk-throttle: Free up policy node associated with deleted rule block: warn if tag is greater than real_max_depth. block: make gendisk hold a reference to its queue blk-flush: move the queue kick into blk-flush: fix invalid BUG_ON in blk_insert_flush block: Remove the control of complete cpu from bio. block: fix a typo in the blk-cgroup.h file block: initialize the bounce pool if high memory may be added later block: fix request_queue lifetime handling by making blk_queue_cleanup() properly shutdown block: drop @tsk from attempt_plug_merge() and explain sync rules block: make get_request[_wait]() fail if queue is dead block: reorganize throtl_get_tg() and blk_throtl_bio() block: reorganize queue draining block: drop unnecessary blk_get/put_queue() in scsi_cmd_ioctl() and blk_get_tg() block: pass around REQ_* flags instead of broken down booleans during request alloc/free block: move blk_throtl prototypes to block/blk.h block: fix genhd refcounting in blkio_policy_parse_and_set() ... Fix up trivial conflicts due to "mddev_t" -> "struct mddev" conversion and making the request functions be of type "void" instead of "int" in - drivers/md/{faulty.c,linear.c,md.c,md.h,multipath.c,raid0.c,raid1.c,raid10.c,raid5.c} - drivers/staging/zram/zram_drv.c
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c106
1 files changed, 62 insertions, 44 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a19f58c6fc3a..4553245d9317 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -10,6 +10,7 @@
#include <linux/bio.h>
#include <linux/blktrace_api.h>
#include "blk-cgroup.h"
+#include "blk.h"
/* Max dispatch from a group in 1 round */
static int throtl_grp_quantum = 8;
@@ -302,16 +303,16 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg;
}
-/*
- * This function returns with queue lock unlocked in case of error, like
- * request queue is no more
- */
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
+ /* no throttling for dead queue */
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ return NULL;
+
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
@@ -323,32 +324,22 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc
- *
- * Take the request queue reference to make sure queue does not
- * go away once we return from allocation.
+ * we need to drop rcu lock and queue_lock before we call alloc.
*/
- blk_get_queue(q);
rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
tg = throtl_alloc_tg(td);
- /*
- * We might have slept in group allocation. Make sure queue is not
- * dead
- */
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
- blk_put_queue(q);
- if (tg)
- kfree(tg);
-
- return ERR_PTR(-ENODEV);
- }
- blk_put_queue(q);
/* Group allocated and queue is still alive. take the lock */
spin_lock_irq(q->queue_lock);
+ /* Make sure @q is still alive */
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+ kfree(tg);
+ return NULL;
+ }
+
/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
@@ -1014,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td)
}
}
-static void throtl_td_free(struct throtl_data *td)
-{
- kfree(td);
-}
-
/*
* Blk cgroup controller notification saying that blkio_group object is being
* delinked as associated cgroup object is going away. That also means that
@@ -1123,17 +1109,17 @@ static struct blkio_policy_type blkio_policy_throtl = {
.plid = BLKIO_POLICY_THROTL,
};
-int blk_throtl_bio(struct request_queue *q, struct bio **biop)
+bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
struct throtl_grp *tg;
- struct bio *bio = *biop;
bool rw = bio_data_dir(bio), update_disptime = true;
struct blkio_cgroup *blkcg;
+ bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
bio->bi_rw &= ~REQ_THROTTLED;
- return 0;
+ goto out;
}
/*
@@ -1152,7 +1138,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
rw, rw_is_sync(bio->bi_rw));
rcu_read_unlock();
- return 0;
+ goto out;
}
}
rcu_read_unlock();
@@ -1161,18 +1147,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
-
spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td);
-
- if (IS_ERR(tg)) {
- if (PTR_ERR(tg) == -ENODEV) {
- /*
- * Queue is gone. No queue lock held here.
- */
- return -ENODEV;
- }
- }
+ if (unlikely(!tg))
+ goto out_unlock;
if (tg->nr_queued[rw]) {
/*
@@ -1200,7 +1178,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
* So keep on trimming slice even if bio is not queued.
*/
throtl_trim_slice(td, tg, rw);
- goto out;
+ goto out_unlock;
}
queue_bio:
@@ -1212,16 +1190,52 @@ queue_bio:
tg->nr_queued[READ], tg->nr_queued[WRITE]);
throtl_add_bio_tg(q->td, tg, bio);
- *biop = NULL;
+ throttled = true;
if (update_disptime) {
tg_update_disptime(td, tg);
throtl_schedule_next_dispatch(td);
}
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
out:
+ return throttled;
+}
+
+/**
+ * blk_throtl_drain - drain throttled bios
+ * @q: request_queue to drain throttled bios for
+ *
+ * Dispatch all currently throttled bios on @q through ->make_request_fn().
+ */
+void blk_throtl_drain(struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
+{
+ struct throtl_data *td = q->td;
+ struct throtl_rb_root *st = &td->tg_service_tree;
+ struct throtl_grp *tg;
+ struct bio_list bl;
+ struct bio *bio;
+
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ bio_list_init(&bl);
+
+ while ((tg = throtl_rb_first(st))) {
+ throtl_dequeue_tg(td, tg);
+
+ while ((bio = bio_list_peek(&tg->bio_lists[READ])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
+ tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
+ }
spin_unlock_irq(q->queue_lock);
- return 0;
+
+ while ((bio = bio_list_pop(&bl)))
+ generic_make_request(bio);
+
+ spin_lock_irq(q->queue_lock);
}
int blk_throtl_init(struct request_queue *q)
@@ -1296,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q)
* it.
*/
throtl_shutdown_wq(q);
- throtl_td_free(td);
+}
+
+void blk_throtl_release(struct request_queue *q)
+{
+ kfree(q->td);
}
static int __init throtl_init(void)