summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig.iosched4
-rw-r--r--block/blk-cgroup.c2109
-rw-r--r--block/blk-cgroup.h647
-rw-r--r--block/blk-core.c281
-rw-r--r--block/blk-ioc.c126
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/blk-throttle.c695
-rw-r--r--block/blk.h32
-rw-r--r--block/cfq-iosched.c1072
-rw-r--r--block/cfq.h115
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c121
-rw-r--r--block/noop-iosched.c8
13 files changed, 2307 insertions, 2917 deletions
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 3199b76f795d..421bef9c4c48 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
- # If BLK_CGROUP is a module, CFQ has to be built as module.
- depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y
---help---
The CFQ I/O scheduler tries to distribute bandwidth equally
@@ -34,8 +32,6 @@ config IOSCHED_CFQ
This is the default I/O scheduler.
- Note: If BLK_CGROUP=m, then CFQ can be built only as module.
-
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ea84a23d5e68..02cf6335e9bd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -11,1679 +11,906 @@
* Nauman Rafique <nauman@google.com>
*/
#include <linux/ioprio.h>
-#include <linux/seq_file.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
-#include "blk-cgroup.h"
#include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "blk-cgroup.h"
+#include "blk.h"
#define MAX_KEY_LEN 100
-static DEFINE_SPINLOCK(blkio_list_lock);
-static LIST_HEAD(blkio_list);
+static DEFINE_MUTEX(blkcg_pol_mutex);
-struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
-EXPORT_SYMBOL_GPL(blkio_root_cgroup);
+struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
+EXPORT_SYMBOL_GPL(blkcg_root);
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup *);
-static int blkiocg_can_attach(struct cgroup *, struct cgroup_taskset *);
-static void blkiocg_attach(struct cgroup *, struct cgroup_taskset *);
-static void blkiocg_destroy(struct cgroup *);
-static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
+static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
-/* for encoding cft->private value on file */
-#define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
-/* What policy owns the file, proportional or throttle */
-#define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
-#define BLKIOFILE_ATTR(val) ((val) & 0xffff)
-
-struct cgroup_subsys blkio_subsys = {
- .name = "blkio",
- .create = blkiocg_create,
- .can_attach = blkiocg_can_attach,
- .attach = blkiocg_attach,
- .destroy = blkiocg_destroy,
- .populate = blkiocg_populate,
-#ifdef CONFIG_BLK_CGROUP
- /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
- .subsys_id = blkio_subsys_id,
-#endif
- .use_id = 1,
- .module = THIS_MODULE,
-};
-EXPORT_SYMBOL_GPL(blkio_subsys);
-
-static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
{
- list_add(&pn->node, &blkcg->policy_list);
+ return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
+ struct blkcg, css);
}
+EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
-static inline bool cftype_blkg_same_policy(struct cftype *cft,
- struct blkio_group *blkg)
+static struct blkcg *task_blkcg(struct task_struct *tsk)
{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
-
- if (blkg->plid == plid)
- return 1;
-
- return 0;
+ return container_of(task_subsys_state(tsk, blkio_subsys_id),
+ struct blkcg, css);
}
-/* Determines if policy node matches cgroup file being accessed */
-static inline bool pn_matches_cftype(struct cftype *cft,
- struct blkio_policy_node *pn)
+struct blkcg *bio_blkcg(struct bio *bio)
{
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
-
- return (plid == pn->plid && fileid == pn->fileid);
+ if (bio && bio->bi_css)
+ return container_of(bio->bi_css, struct blkcg, css);
+ return task_blkcg(current);
}
+EXPORT_SYMBOL_GPL(bio_blkcg);
-/* Must be called with blkcg->lock held */
-static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
+static bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- list_del(&pn->node);
+ return pol && test_bit(pol->plid, q->blkcg_pols);
}
-/* Must be called with blkcg->lock held */
-static struct blkio_policy_node *
-blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
- enum blkio_policy_id plid, int fileid)
+/**
+ * blkg_free - free a blkg
+ * @blkg: blkg to free
+ *
+ * Free @blkg which may be partially allocated.
+ */
+static void blkg_free(struct blkcg_gq *blkg)
{
- struct blkio_policy_node *pn;
-
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
- return pn;
- }
+ int i;
- return NULL;
-}
+ if (!blkg)
+ return;
-struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
-{
- return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd = blkg->pd[i];
-struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
-{
- return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkio_cgroup, css);
-}
-EXPORT_SYMBOL_GPL(task_blkio_cgroup);
+ if (!pd)
+ continue;
-static inline void
-blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
-{
- struct blkio_policy_type *blkiop;
+ if (pol && pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
- list_for_each_entry(blkiop, &blkio_list, list) {
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
- if (blkiop->ops.blkio_update_group_weight_fn)
- blkiop->ops.blkio_update_group_weight_fn(blkg->key,
- blkg, weight);
+ kfree(pd);
}
+
+ kfree(blkg);
}
-static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
- int fileid)
+/**
+ * blkg_alloc - allocate a blkg
+ * @blkcg: block cgroup the new blkg is associated with
+ * @q: request_queue the new blkg is associated with
+ *
+ * Allocate a new blkg assocating @blkcg and @q.
+ */
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
{
- struct blkio_policy_type *blkiop;
-
- list_for_each_entry(blkiop, &blkio_list, list) {
-
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
- continue;
+ struct blkcg_gq *blkg;
+ int i;
- if (fileid == BLKIO_THROTL_read_bps_device
- && blkiop->ops.blkio_update_group_read_bps_fn)
- blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
- blkg, bps);
+ /* alloc and init base part */
+ blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+ if (!blkg)
+ return NULL;
- if (fileid == BLKIO_THROTL_write_bps_device
- && blkiop->ops.blkio_update_group_write_bps_fn)
- blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
- blkg, bps);
- }
-}
-
-static inline void blkio_update_group_iops(struct blkio_group *blkg,
- unsigned int iops, int fileid)
-{
- struct blkio_policy_type *blkiop;
+ blkg->q = q;
+ INIT_LIST_HEAD(&blkg->q_node);
+ blkg->blkcg = blkcg;
+ blkg->refcnt = 1;
- list_for_each_entry(blkiop, &blkio_list, list) {
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+ struct blkg_policy_data *pd;
- /* If this policy does not own the blkg, do not send updates */
- if (blkiop->plid != blkg->plid)
+ if (!blkcg_policy_enabled(q, pol))
continue;
- if (fileid == BLKIO_THROTL_read_iops_device
- && blkiop->ops.blkio_update_group_read_iops_fn)
- blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
- blkg, iops);
+ /* alloc per-policy data and attach it to blkg */
+ pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
+ if (!pd) {
+ blkg_free(blkg);
+ return NULL;
+ }
- if (fileid == BLKIO_THROTL_write_iops_device
- && blkiop->ops.blkio_update_group_write_iops_fn)
- blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
- blkg,iops);
+ blkg->pd[i] = pd;
+ pd->blkg = blkg;
}
-}
-/*
- * Add to the appropriate stat variable depending on the request type.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
- bool sync)
-{
- if (direction)
- stat[BLKIO_STAT_WRITE] += add;
- else
- stat[BLKIO_STAT_READ] += add;
- if (sync)
- stat[BLKIO_STAT_SYNC] += add;
- else
- stat[BLKIO_STAT_ASYNC] += add;
-}
+ /* invoke per-policy init */
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
-/*
- * Decrements the appropriate stat variable if non-zero depending on the
- * request type. Panics on value being zero.
- * This should be called with the blkg->stats_lock held.
- */
-static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
-{
- if (direction) {
- BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
- stat[BLKIO_STAT_WRITE]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_READ] == 0);
- stat[BLKIO_STAT_READ]--;
+ if (blkcg_policy_enabled(blkg->q, pol))
+ pol->pd_init_fn(blkg);
}
- if (sync) {
- BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
- stat[BLKIO_STAT_SYNC]--;
- } else {
- BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
- stat[BLKIO_STAT_ASYNC]--;
- }
-}
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg)
-{
- if (blkio_blkg_waiting(&blkg->stats))
- return;
- if (blkg == curr_blkg)
- return;
- blkg->stats.start_group_wait_time = sched_clock();
- blkio_mark_blkg_waiting(&blkg->stats);
+ return blkg;
}
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
+static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
+ struct request_queue *q)
{
- unsigned long long now;
+ struct blkcg_gq *blkg;
- if (!blkio_blkg_waiting(stats))
- return;
+ blkg = rcu_dereference(blkcg->blkg_hint);
+ if (blkg && blkg->q == q)
+ return blkg;
+
+ /*
+ * Hint didn't match. Look up from the radix tree. Note that we
+ * may not be holding queue_lock and thus are not sure whether
+ * @blkg from blkg_tree has already been removed or not, so we
+ * can't update hint to the lookup result. Leave it to the caller.
+ */
+ blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
+ if (blkg && blkg->q == q)
+ return blkg;
- now = sched_clock();
- if (time_after64(now, stats->start_group_wait_time))
- stats->group_wait_time += now - stats->start_group_wait_time;
- blkio_clear_blkg_waiting(stats);
+ return NULL;
}
-/* This should be called with the blkg->stats_lock held. */
-static void blkio_end_empty_time(struct blkio_group_stats *stats)
+/**
+ * blkg_lookup - lookup blkg for the specified blkcg - q pair
+ * @blkcg: blkcg of interest
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for the @blkcg - @q pair. This function should be called
+ * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
+ * - see blk_queue_bypass_start() for details.
+ */
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
{
- unsigned long long now;
-
- if (!blkio_blkg_empty(stats))
- return;
+ WARN_ON_ONCE(!rcu_read_lock_held());
- now = sched_clock();
- if (time_after64(now, stats->start_empty_time))
- stats->empty_time += now - stats->start_empty_time;
- blkio_clear_blkg_empty(stats);
+ if (unlikely(blk_queue_bypass(q)))
+ return NULL;
+ return __blkg_lookup(blkcg, q);
}
+EXPORT_SYMBOL_GPL(blkg_lookup);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
+static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+ __releases(q->queue_lock) __acquires(q->queue_lock)
{
- unsigned long flags;
+ struct blkcg_gq *blkg;
+ int ret;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- BUG_ON(blkio_blkg_idling(&blkg->stats));
- blkg->stats.start_idle_time = sched_clock();
- blkio_mark_blkg_idling(&blkg->stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ lockdep_assert_held(q->queue_lock);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- unsigned long long now;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (blkio_blkg_idling(stats)) {
- now = sched_clock();
- if (time_after64(now, stats->start_idle_time))
- stats->idle_time += now - stats->start_idle_time;
- blkio_clear_blkg_idling(stats);
+ /* lookup and update hint on success, see __blkg_lookup() for details */
+ blkg = __blkg_lookup(blkcg, q);
+ if (blkg) {
+ rcu_assign_pointer(blkcg->blkg_hint, blkg);
+ return blkg;
}
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- stats->avg_queue_size_sum +=
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
- stats->avg_queue_size_samples++;
- blkio_update_group_wait_time(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
+ /* blkg holds a reference to blkcg */
+ if (!css_tryget(&blkcg->css))
+ return ERR_PTR(-EINVAL);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- unsigned long flags;
- struct blkio_group_stats *stats;
+ /* allocate */
+ ret = -ENOMEM;
+ blkg = blkg_alloc(blkcg, q);
+ if (unlikely(!blkg))
+ goto err_put;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
+ /* insert */
+ ret = radix_tree_preload(GFP_ATOMIC);
+ if (ret)
+ goto err_free;
- if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
- stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
+ spin_lock(&blkcg->lock);
+ ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
+ if (likely(!ret)) {
+ hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
+ list_add(&blkg->q_node, &q->blkg_list);
}
+ spin_unlock(&blkcg->lock);
- /*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
- */
- if(blkio_blkg_empty(stats)) {
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
- return;
- }
+ radix_tree_preload_end();
- stats->start_empty_time = sched_clock();
- blkio_mark_blkg_empty(stats);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ if (!ret)
+ return blkg;
+err_free:
+ blkg_free(blkg);
+err_put:
+ css_put(&blkcg->css);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkg->stats.dequeue += dequeue;
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
-#else
-static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
- struct blkio_group *curr_blkg) {}
-static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
-#endif
-
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction,
- bool sync)
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
- sync);
- blkio_end_empty_time(&blkg->stats);
- blkio_set_start_group_wait_time(blkg, curr_blkg);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
+ /*
+ * This could be the first entry point of blkcg implementation and
+ * we shouldn't allow anything to go through for a bypassing queue.
+ */
+ if (unlikely(blk_queue_bypass(q)))
+ return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return __blkg_lookup_create(blkcg, q);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
+EXPORT_SYMBOL_GPL(blkg_lookup_create);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
+static void blkg_destroy(struct blkcg_gq *blkg)
{
- unsigned long flags;
+ struct request_queue *q = blkg->q;
+ struct blkcg *blkcg = blkg->blkcg;
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
- direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
+ lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&blkcg->lock);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
- unsigned long unaccounted_time)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- blkg->stats.time += time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg->stats.unaccounted_time += unaccounted_time;
-#endif
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
+ /* Something wrong if we are trying to remove same group twice */
+ WARN_ON_ONCE(list_empty(&blkg->q_node));
+ WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
-/*
- * should be called under rcu read lock or queue lock to make sure blkg pointer
- * is valid.
- */
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
+ radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+ list_del_init(&blkg->q_node);
+ hlist_del_init_rcu(&blkg->blkcg_node);
/*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
+ * Both setting lookup hint to and clearing it from @blkg are done
+ * under queue_lock. If it's not pointing to @blkg now, it never
+ * will. Hint assignment itself can race safely.
*/
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- stats_cpu->sectors += bytes >> 9;
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
- 1, direction, sync);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
- bytes, direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
-
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- struct blkio_group_stats *stats;
- unsigned long flags;
- unsigned long long now = sched_clock();
-
- spin_lock_irqsave(&blkg->stats_lock, flags);
- stats = &blkg->stats;
- if (time_after64(now, io_start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
- now - io_start_time, direction, sync);
- if (time_after64(io_start_time, start_time))
- blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
- io_start_time - start_time, direction, sync);
- spin_unlock_irqrestore(&blkg->stats_lock, flags);
-}
-EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
-
-/* Merged stats are per cpu. */
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync)
-{
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
+ if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
+ * Put the reference taken at the time of creation so that when all
+ * queues are gone, group can be destroyed.
*/
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(blkg->stats_cpu);
-
- u64_stats_update_begin(&stats_cpu->syncp);
- blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
- direction, sync);
- u64_stats_update_end(&stats_cpu->syncp);
- local_irq_restore(flags);
+ blkg_put(blkg);
}
-EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
-/*
- * This function allocates the per cpu stats for blkio_group. Should be called
- * from sleepable context as alloc_per_cpu() requires that.
+/**
+ * blkg_destroy_all - destroy all blkgs associated with a request_queue
+ * @q: request_queue of interest
+ *
+ * Destroy all blkgs associated with @q.
*/
-int blkio_alloc_blkg_stats(struct blkio_group *blkg)
+static void blkg_destroy_all(struct request_queue *q)
{
- /* Allocate memory for per cpu stats */
- blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
- if (!blkg->stats_cpu)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
+ struct blkcg_gq *blkg, *n;
-void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- spin_lock_init(&blkg->stats_lock);
- rcu_assign_pointer(blkg->key, key);
- blkg->blkcg_id = css_id(&blkcg->css);
- hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
- blkg->plid = plid;
- spin_unlock_irqrestore(&blkcg->lock, flags);
- /* Need to take css reference ? */
- cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
- blkg->dev = dev;
-}
-EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
-
-static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- hlist_del_init_rcu(&blkg->blkcg_node);
- blkg->blkcg_id = 0;
-}
+ lockdep_assert_held(q->queue_lock);
-/*
- * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
- * indicating that blk_group was unhashed by the time we got to it.
- */
-int blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- struct blkio_cgroup *blkcg;
- unsigned long flags;
- struct cgroup_subsys_state *css;
- int ret = 1;
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct blkcg *blkcg = blkg->blkcg;
- rcu_read_lock();
- css = css_lookup(&blkio_subsys, blkg->blkcg_id);
- if (css) {
- blkcg = container_of(css, struct blkio_cgroup, css);
- spin_lock_irqsave(&blkcg->lock, flags);
- if (!hlist_unhashed(&blkg->blkcg_node)) {
- __blkiocg_del_blkio_group(blkg);
- ret = 0;
- }
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ spin_lock(&blkcg->lock);
+ blkg_destroy(blkg);
+ spin_unlock(&blkcg->lock);
}
-
- rcu_read_unlock();
- return ret;
}
-EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
-/* called under rcu_read_lock(). */
-struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
+static void blkg_rcu_free(struct rcu_head *rcu_head)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- void *__key;
-
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- __key = blkg->key;
- if (__key == key)
- return blkg;
- }
-
- return NULL;
+ blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
}
-EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
-static void blkio_reset_stats_cpu(struct blkio_group *blkg)
+void __blkg_release(struct blkcg_gq *blkg)
{
- struct blkio_group_stats_cpu *stats_cpu;
- int i, j, k;
+ /* release the extra blkcg reference this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+
/*
- * Note: On 64 bit arch this should not be an issue. This has the
- * possibility of returning some inconsistent value on 32bit arch
- * as 64bit update on 32bit is non atomic. Taking care of this
- * corner case makes code very complicated, like sending IPIs to
- * cpus, taking care of stats of offline cpus etc.
+ * A group is freed in rcu manner. But having an rcu lock does not
+ * mean that one can access all the fields of blkg and assume these
+ * are valid. For example, don't try to follow throtl_data and
+ * request queue links.
*
- * reset stats is anyway more of a debug feature and this sounds a
- * corner case. So I am not complicating the code yet until and
- * unless this becomes a real issue.
+ * Having a reference to blkg under an rcu allows acess to only
+ * values local to groups like group stats and group rate limits
*/
- for_each_possible_cpu(i) {
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
- stats_cpu->sectors = 0;
- for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
- for (k = 0; k < BLKIO_STAT_TOTAL; k++)
- stats_cpu->stat_arr_cpu[j][k] = 0;
- }
+ call_rcu(&blkg->rcu_head, blkg_rcu_free);
}
+EXPORT_SYMBOL_GPL(__blkg_release);
-static int
-blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
+static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
+ u64 val)
{
- struct blkio_cgroup *blkcg;
- struct blkio_group *blkg;
- struct blkio_group_stats *stats;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg_gq *blkg;
struct hlist_node *n;
- uint64_t queued[BLKIO_STAT_TOTAL];
int i;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- bool idling, waiting, empty;
- unsigned long long now = sched_clock();
-#endif
- blkcg = cgroup_to_blkio_cgroup(cgroup);
+ mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
+
+ /*
+ * Note that stat reset is racy - it doesn't synchronize against
+ * stat updates. This is a debug feature which shouldn't exist
+ * anyway. If you get hit by a race, retry.
+ */
hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- spin_lock(&blkg->stats_lock);
- stats = &blkg->stats;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- idling = blkio_blkg_idling(stats);
- waiting = blkio_blkg_waiting(stats);
- empty = blkio_blkg_empty(stats);
-#endif
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
- memset(stats, 0, sizeof(struct blkio_group_stats));
- for (i = 0; i < BLKIO_STAT_TOTAL; i++)
- stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (idling) {
- blkio_mark_blkg_idling(stats);
- stats->start_idle_time = now;
- }
- if (waiting) {
- blkio_mark_blkg_waiting(stats);
- stats->start_group_wait_time = now;
- }
- if (empty) {
- blkio_mark_blkg_empty(stats);
- stats->start_empty_time = now;
- }
-#endif
- spin_unlock(&blkg->stats_lock);
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
- /* Reset Per cpu stats which don't take blkg->stats_lock */
- blkio_reset_stats_cpu(blkg);
+ if (blkcg_policy_enabled(blkg->q, pol) &&
+ pol->pd_reset_stats_fn)
+ pol->pd_reset_stats_fn(blkg);
+ }
}
spin_unlock_irq(&blkcg->lock);
+ mutex_unlock(&blkcg_pol_mutex);
return 0;
}
-static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
- int chars_left, bool diskname_only)
+static const char *blkg_dev_name(struct blkcg_gq *blkg)
{
- snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
- chars_left -= strlen(str);
- if (chars_left <= 0) {
- printk(KERN_WARNING
- "Possibly incorrect cgroup stat display format");
- return;
- }
- if (diskname_only)
- return;
- switch (type) {
- case BLKIO_STAT_READ:
- strlcat(str, " Read", chars_left);
- break;
- case BLKIO_STAT_WRITE:
- strlcat(str, " Write", chars_left);
- break;
- case BLKIO_STAT_SYNC:
- strlcat(str, " Sync", chars_left);
- break;
- case BLKIO_STAT_ASYNC:
- strlcat(str, " Async", chars_left);
- break;
- case BLKIO_STAT_TOTAL:
- strlcat(str, " Total", chars_left);
- break;
- default:
- strlcat(str, " Invalid", chars_left);
- }
+ /* some drivers (floppy) instantiate a queue w/o disk registered */
+ if (blkg->q->backing_dev_info.dev)
+ return dev_name(blkg->q->backing_dev_info.dev);
+ return NULL;
}
-static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
- struct cgroup_map_cb *cb, dev_t dev)
+/**
+ * blkcg_print_blkgs - helper for printing per-blkg data
+ * @sf: seq_file to print to
+ * @blkcg: blkcg of interest
+ * @prfill: fill function to print out a blkg
+ * @pol: policy in question
+ * @data: data to be passed to @prfill
+ * @show_total: to print out sum of prfill return values or not
+ *
+ * This function invokes @prfill on each blkg of @blkcg if pd for the
+ * policy specified by @pol exists. @prfill is invoked with @sf, the
+ * policy data and @data. If @show_total is %true, the sum of the return
+ * values from @prfill is printed with "Total" label at the end.
+ *
+ * This is to be used to construct print functions for
+ * cftype->read_seq_string method.
+ */
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total)
{
- blkio_get_key_name(0, dev, str, chars_left, true);
- cb->fill(cb, str, val);
- return val;
-}
-
+ struct blkcg_gq *blkg;
+ struct hlist_node *n;
+ u64 total = 0;
-static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
- enum stat_type_cpu type, enum stat_sub_type sub_type)
-{
- int cpu;
- struct blkio_group_stats_cpu *stats_cpu;
- u64 val = 0, tval;
-
- for_each_possible_cpu(cpu) {
- unsigned int start;
- stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
-
- do {
- start = u64_stats_fetch_begin(&stats_cpu->syncp);
- if (type == BLKIO_STAT_CPU_SECTORS)
- tval = stats_cpu->sectors;
- else
- tval = stats_cpu->stat_arr_cpu[type][sub_type];
- } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
-
- val += tval;
- }
+ spin_lock_irq(&blkcg->lock);
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
+ if (blkcg_policy_enabled(blkg->q, pol))
+ total += prfill(sf, blkg->pd[pol->plid], data);
+ spin_unlock_irq(&blkcg->lock);
- return val;
+ if (show_total)
+ seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
+EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
-static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
+/**
+ * __blkg_prfill_u64 - prfill helper for a single u64 value
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @v: value to print
+ *
+ * Print @v to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
{
- uint64_t disk_total, val;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
-
- if (type == BLKIO_STAT_CPU_SECTORS) {
- val = blkio_read_stat_cpu(blkg, type, 0);
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
- }
+ const char *dname = blkg_dev_name(pd->blkg);
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- val = blkio_read_stat_cpu(blkg, type, sub_type);
- cb->fill(cb, key_str, val);
- }
+ if (!dname)
+ return 0;
- disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
- blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
-
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
+ seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
+ return v;
}
+EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
-/* This should be called with blkg->stats_lock held */
-static uint64_t blkio_get_stat(struct blkio_group *blkg,
- struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
-{
- uint64_t disk_total;
- char key_str[MAX_KEY_LEN];
- enum stat_sub_type sub_type;
-
- if (type == BLKIO_STAT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.time, cb, dev);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (type == BLKIO_STAT_UNACCOUNTED_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.unaccounted_time, cb, dev);
- if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
- uint64_t sum = blkg->stats.avg_queue_size_sum;
- uint64_t samples = blkg->stats.avg_queue_size_samples;
- if (samples)
- do_div(sum, samples);
- else
- sum = 0;
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
- }
- if (type == BLKIO_STAT_GROUP_WAIT_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.group_wait_time, cb, dev);
- if (type == BLKIO_STAT_IDLE_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.idle_time, cb, dev);
- if (type == BLKIO_STAT_EMPTY_TIME)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.empty_time, cb, dev);
- if (type == BLKIO_STAT_DEQUEUE)
- return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
- blkg->stats.dequeue, cb, dev);
-#endif
-
- for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
- sub_type++) {
- blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
- }
- disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
- blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
- blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
- cb->fill(cb, key_str, disk_total);
- return disk_total;
-}
-
-static int blkio_policy_parse_and_set(char *buf,
- struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
-{
- struct gendisk *disk = NULL;
- char *s[4], *p, *major_s = NULL, *minor_s = NULL;
- unsigned long major, minor;
- int i = 0, ret = -EINVAL;
- int part;
- dev_t dev;
- u64 temp;
-
- memset(s, 0, sizeof(s));
-
- while ((p = strsep(&buf, " ")) != NULL) {
- if (!*p)
- continue;
-
- s[i++] = p;
-
- /* Prevent from inputing too many things */
- if (i == 3)
- break;
- }
-
- if (i != 2)
- goto out;
-
- p = strsep(&s[0], ":");
- if (p != NULL)
- major_s = p;
- else
- goto out;
-
- minor_s = s[0];
- if (!minor_s)
- goto out;
-
- if (strict_strtoul(major_s, 10, &major))
- goto out;
-
- if (strict_strtoul(minor_s, 10, &minor))
- goto out;
-
- dev = MKDEV(major, minor);
+/**
+ * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @rwstat: rwstat to print
+ *
+ * Print @rwstat to @sf for the device assocaited with @pd.
+ */
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat)
+{
+ static const char *rwstr[] = {
+ [BLKG_RWSTAT_READ] = "Read",
+ [BLKG_RWSTAT_WRITE] = "Write",
+ [BLKG_RWSTAT_SYNC] = "Sync",
+ [BLKG_RWSTAT_ASYNC] = "Async",
+ };
+ const char *dname = blkg_dev_name(pd->blkg);
+ u64 v;
+ int i;
- if (strict_strtoull(s[1], 10, &temp))
- goto out;
+ if (!dname)
+ return 0;
- /* For rule removal, do not check for device presence. */
- if (temp) {
- disk = get_gendisk(dev, &part);
- if (!disk || part) {
- ret = -ENODEV;
- goto out;
- }
- }
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
+ (unsigned long long)rwstat->cnt[i]);
- newpn->dev = dev;
-
- switch (plid) {
- case BLKIO_POLICY_PROP:
- if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
- temp > BLKIO_WEIGHT_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.weight = temp;
- break;
- case BLKIO_POLICY_THROTL:
- switch(fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.bps = temp;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (temp > THROTL_IOPS_MAX)
- goto out;
-
- newpn->plid = plid;
- newpn->fileid = fileid;
- newpn->val.iops = (unsigned int)temp;
- break;
- }
- break;
- default:
- BUG();
- }
- ret = 0;
-out:
- put_disk(disk);
- return ret;
+ v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
+ seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
+ return v;
}
-unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev)
+/**
+ * blkg_prfill_stat - prfill callback for blkg_stat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_stat in @pd
+ *
+ * prfill callback for printing a blkg_stat.
+ */
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int weight;
-
- spin_lock_irqsave(&blkcg->lock, flags);
-
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device);
- if (pn)
- weight = pn->val.weight;
- else
- weight = blkcg->weight;
-
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return weight;
+ return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
}
-EXPORT_SYMBOL_GPL(blkcg_get_weight);
+EXPORT_SYMBOL_GPL(blkg_prfill_stat);
-uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
+/**
+ * blkg_prfill_rwstat - prfill callback for blkg_rwstat
+ * @sf: seq_file to print to
+ * @pd: policy private data of interest
+ * @off: offset to the blkg_rwstat in @pd
+ *
+ * prfill callback for printing a blkg_rwstat.
+ */
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
-}
+ struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
-uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- uint64_t bps = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device);
- if (pn)
- bps = pn->val.bps;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return bps;
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
+EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
-unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
+/**
+ * blkg_conf_prep - parse and prepare for per-blkg config update
+ * @blkcg: target block cgroup
+ * @pol: target policy
+ * @input: input string
+ * @ctx: blkg_conf_ctx to be filled
+ *
+ * Parse per-blkg config update from @input and initialize @ctx with the
+ * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
+ * value. This function returns with RCU read lock and queue lock held and
+ * must be paired with blkg_conf_finish().
+ */
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx)
+ __acquires(rcu) __acquires(disk->queue->queue_lock)
{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ unsigned int major, minor;
+ unsigned long long v;
+ int part, ret;
-unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
-{
- struct blkio_policy_node *pn;
- unsigned long flags;
- unsigned int iops = -1;
-
- spin_lock_irqsave(&blkcg->lock, flags);
- pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device);
- if (pn)
- iops = pn->val.iops;
- spin_unlock_irqrestore(&blkcg->lock, flags);
-
- return iops;
-}
+ if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
+ return -EINVAL;
-/* Checks whether user asked for deleting a policy rule */
-static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
-{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->val.weight == 0)
- return 1;
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- if (pn->val.bps == 0)
- return 1;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- if (pn->val.iops == 0)
- return 1;
- }
- break;
- default:
- BUG();
- }
+ disk = get_gendisk(MKDEV(major, minor), &part);
+ if (!disk || part)
+ return -EINVAL;
- return 0;
-}
+ rcu_read_lock();
+ spin_lock_irq(disk->queue->queue_lock);
-static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
- struct blkio_policy_node *newpn)
-{
- switch(oldpn->plid) {
- case BLKIO_POLICY_PROP:
- oldpn->val.weight = newpn->val.weight;
- break;
- case BLKIO_POLICY_THROTL:
- switch(newpn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- oldpn->val.bps = newpn->val.bps;
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- oldpn->val.iops = newpn->val.iops;
+ if (blkcg_policy_enabled(disk->queue, pol))
+ blkg = blkg_lookup_create(blkcg, disk->queue);
+ else
+ blkg = ERR_PTR(-EINVAL);
+
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ rcu_read_unlock();
+ spin_unlock_irq(disk->queue->queue_lock);
+ put_disk(disk);
+ /*
+ * If queue was bypassing, we should retry. Do so after a
+ * short msleep(). It isn't strictly necessary but queue
+ * can be bypassing for some time and it's always nice to
+ * avoid busy looping.
+ */
+ if (ret == -EBUSY) {
+ msleep(10);
+ ret = restart_syscall();
}
- break;
- default:
- BUG();
+ return ret;
}
+
+ ctx->disk = disk;
+ ctx->blkg = blkg;
+ ctx->v = v;
+ return 0;
}
+EXPORT_SYMBOL_GPL(blkg_conf_prep);
-/*
- * Some rules/values in blkg have changed. Propagate those to respective
- * policies.
+/**
+ * blkg_conf_finish - finish up per-blkg config update
+ * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
+ *
+ * Finish up after per-blkg config update. This function must be paired
+ * with blkg_conf_prep().
*/
-static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, struct blkio_policy_node *pn)
+void blkg_conf_finish(struct blkg_conf_ctx *ctx)
+ __releases(ctx->disk->queue->queue_lock) __releases(rcu)
{
- unsigned int weight, iops;
- u64 bps;
-
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- weight = pn->val.weight ? pn->val.weight :
- blkcg->weight;
- blkio_update_group_weight(blkg, weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- bps = pn->val.bps ? pn->val.bps : (-1);
- blkio_update_group_bps(blkg, bps, pn->fileid);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- iops = pn->val.iops ? pn->val.iops : (-1);
- blkio_update_group_iops(blkg, iops, pn->fileid);
- break;
- }
- break;
- default:
- BUG();
- }
+ spin_unlock_irq(ctx->disk->queue->queue_lock);
+ rcu_read_unlock();
+ put_disk(ctx->disk);
}
+EXPORT_SYMBOL_GPL(blkg_conf_finish);
-/*
- * A policy node rule has been updated. Propagate this update to all the
- * block groups which might be affected by this update.
+struct cftype blkcg_files[] = {
+ {
+ .name = "reset_stats",
+ .write_u64 = blkcg_reset_stats,
+ },
+ { } /* terminate */
+};
+
+/**
+ * blkcg_pre_destroy - cgroup pre_destroy callback
+ * @cgroup: cgroup of interest
+ *
+ * This function is called when @cgroup is about to go away and responsible
+ * for shooting down all blkgs associated with @cgroup. blkgs should be
+ * removed while holding both q and blkcg locks. As blkcg lock is nested
+ * inside q lock, this function performs reverse double lock dancing.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
*/
-static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
- struct blkio_policy_node *pn)
+static int blkcg_pre_destroy(struct cgroup *cgroup)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
- spin_lock(&blkio_list_lock);
spin_lock_irq(&blkcg->lock);
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (pn->dev != blkg->dev || pn->plid != blkg->plid)
- continue;
- blkio_update_blkg_policy(blkcg, blkg, pn);
+ while (!hlist_empty(&blkcg->blkg_list)) {
+ struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
+ struct blkcg_gq, blkcg_node);
+ struct request_queue *q = blkg->q;
+
+ if (spin_trylock(q->queue_lock)) {
+ blkg_destroy(blkg);
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irq(&blkcg->lock);
+ cpu_relax();
+ spin_lock_irq(&blkcg->lock);
+ }
}
spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
+ return 0;
}
-static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static void blkcg_destroy(struct cgroup *cgroup)
{
- int ret = 0;
- char *buf;
- struct blkio_policy_node *newpn, *pn;
- struct blkio_cgroup *blkcg;
- int keep_newpn = 0;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int fileid = BLKIOFILE_ATTR(cft->private);
-
- buf = kstrdup(buffer, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
- if (!newpn) {
- ret = -ENOMEM;
- goto free_buf;
- }
+ struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
- ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
- if (ret)
- goto free_newpn;
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- spin_lock_irq(&blkcg->lock);
+ if (blkcg != &blkcg_root)
+ kfree(blkcg);
+}
- pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
- if (!pn) {
- if (!blkio_delete_rule_command(newpn)) {
- blkio_policy_insert_node(blkcg, newpn);
- keep_newpn = 1;
- }
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
- }
+static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup)
+{
+ static atomic64_t id_seq = ATOMIC64_INIT(0);
+ struct blkcg *blkcg;
+ struct cgroup *parent = cgroup->parent;
- if (blkio_delete_rule_command(newpn)) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- spin_unlock_irq(&blkcg->lock);
- goto update_io_group;
+ if (!parent) {
+ blkcg = &blkcg_root;
+ goto done;
}
- spin_unlock_irq(&blkcg->lock);
- blkio_update_policy_rule(pn, newpn);
+ blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
+ if (!blkcg)
+ return ERR_PTR(-ENOMEM);
-update_io_group:
- blkio_update_policy_node_blkg(blkcg, newpn);
+ blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
+ blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
+done:
+ spin_lock_init(&blkcg->lock);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
+ INIT_HLIST_HEAD(&blkcg->blkg_list);
-free_newpn:
- if (!keep_newpn)
- kfree(newpn);
-free_buf:
- kfree(buf);
- return ret;
+ return &blkcg->css;
}
-static void
-blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
+/**
+ * blkcg_init_queue - initialize blkcg part of request queue
+ * @q: request_queue to initialize
+ *
+ * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
+ * part of new request_queue @q.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkcg_init_queue(struct request_queue *q)
{
- switch(pn->plid) {
- case BLKIO_POLICY_PROP:
- if (pn->fileid == BLKIO_PROP_weight_device)
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.weight);
- break;
- case BLKIO_POLICY_THROTL:
- switch(pn->fileid) {
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.bps);
- break;
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
- MINOR(pn->dev), pn->val.iops);
- break;
- }
- break;
- default:
- BUG();
- }
-}
+ might_sleep();
-/* cgroup files which read their data from policy nodes end up here */
-static void blkio_read_policy_node_files(struct cftype *cft,
- struct blkio_cgroup *blkcg, struct seq_file *m)
-{
- struct blkio_policy_node *pn;
-
- if (!list_empty(&blkcg->policy_list)) {
- spin_lock_irq(&blkcg->lock);
- list_for_each_entry(pn, &blkcg->policy_list, node) {
- if (!pn_matches_cftype(cft, pn))
- continue;
- blkio_print_policy_node(m, pn);
- }
- spin_unlock_irq(&blkcg->lock);
- }
+ return blk_throtl_init(q);
}
-static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *m)
+/**
+ * blkcg_drain_queue - drain blkcg part of request_queue
+ * @q: request_queue to drain
+ *
+ * Called from blk_drain_queue(). Responsible for draining blkcg part.
+ */
+void blkcg_drain_queue(struct request_queue *q)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_read_bps_device:
- case BLKIO_THROTL_write_bps_device:
- case BLKIO_THROTL_read_iops_device:
- case BLKIO_THROTL_write_iops_device:
- blkio_read_policy_node_files(cft, blkcg, m);
- return 0;
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
+ lockdep_assert_held(q->queue_lock);
- return 0;
+ blk_throtl_drain(q);
}
-static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
- struct cftype *cft, struct cgroup_map_cb *cb,
- enum stat_type type, bool show_total, bool pcpu)
+/**
+ * blkcg_exit_queue - exit and release blkcg part of request_queue
+ * @q: request_queue being released
+ *
+ * Called from blk_release_queue(). Responsible for exiting blkcg part.
+ */
+void blkcg_exit_queue(struct request_queue *q)
{
- struct blkio_group *blkg;
- struct hlist_node *n;
- uint64_t cgroup_total = 0;
+ spin_lock_irq(q->queue_lock);
+ blkg_destroy_all(q);
+ spin_unlock_irq(q->queue_lock);
- rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
- if (blkg->dev) {
- if (!cftype_blkg_same_policy(cft, blkg))
- continue;
- if (pcpu)
- cgroup_total += blkio_get_stat_cpu(blkg, cb,
- blkg->dev, type);
- else {
- spin_lock_irq(&blkg->stats_lock);
- cgroup_total += blkio_get_stat(blkg, cb,
- blkg->dev, type);
- spin_unlock_irq(&blkg->stats_lock);
- }
- }
- }
- if (show_total)
- cb->fill(cb, "Total", cgroup_total);
- rcu_read_unlock();
- return 0;
+ blk_throtl_exit(q);
}
-/* All map kind of cgroup file get serviced by this function */
-static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+/*
+ * We cannot support shared io contexts, as we have no mean to support
+ * two tasks with the same ioc in two different groups without major rework
+ * of the main cic data structures. For now we allow a task to change
+ * its cgroup only if it's the only owner of its ioc.
+ */
+static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
-
- blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_TIME, 0, 0);
- case BLKIO_PROP_sectors:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SECTORS, 0, 1);
- case BLKIO_PROP_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_PROP_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- case BLKIO_PROP_io_service_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_SERVICE_TIME, 1, 0);
- case BLKIO_PROP_io_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_WAIT_TIME, 1, 0);
- case BLKIO_PROP_io_merged:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_MERGED, 1, 1);
- case BLKIO_PROP_io_queued:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_QUEUED, 1, 0);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- case BLKIO_PROP_unaccounted_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
- case BLKIO_PROP_dequeue:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_DEQUEUE, 0, 0);
- case BLKIO_PROP_avg_queue_size:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
- case BLKIO_PROP_group_wait_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
- case BLKIO_PROP_idle_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_IDLE_TIME, 0, 0);
- case BLKIO_PROP_empty_time:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_EMPTY_TIME, 0, 0);
-#endif
- default:
- BUG();
- }
- break;
- case BLKIO_POLICY_THROTL:
- switch(name){
- case BLKIO_THROTL_io_service_bytes:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
- case BLKIO_THROTL_io_serviced:
- return blkio_read_blkg_stats(blkcg, cft, cb,
- BLKIO_STAT_CPU_SERVICED, 1, 1);
- default:
- BUG();
- }
- break;
- default:
- BUG();
- }
+ struct task_struct *task;
+ struct io_context *ioc;
+ int ret = 0;
- return 0;
+ /* task_lock() is needed to avoid races with exit_io_context() */
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ task_lock(task);
+ ioc = task->io_context;
+ if (ioc && atomic_read(&ioc->nr_tasks) > 1)
+ ret = -EINVAL;
+ task_unlock(task);
+ if (ret)
+ break;
+ }
+ return ret;
}
-static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
-{
- struct blkio_group *blkg;
- struct hlist_node *n;
- struct blkio_policy_node *pn;
-
- if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
- return -EINVAL;
+struct cgroup_subsys blkio_subsys = {
+ .name = "blkio",
+ .create = blkcg_create,
+ .can_attach = blkcg_can_attach,
+ .pre_destroy = blkcg_pre_destroy,
+ .destroy = blkcg_destroy,
+ .subsys_id = blkio_subsys_id,
+ .base_cftypes = blkcg_files,
+ .module = THIS_MODULE,
+};
+EXPORT_SYMBOL_GPL(blkio_subsys);
- spin_lock(&blkio_list_lock);
- spin_lock_irq(&blkcg->lock);
- blkcg->weight = (unsigned int)val;
+/**
+ * blkcg_activate_policy - activate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to activate
+ *
+ * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
+ * bypass mode to populate its blkgs with policy_data for @pol.
+ *
+ * Activation happens with @q bypassed, so nobody would be accessing blkgs
+ * from IO path. Update of each blkg is protected by both queue and blkcg
+ * locks so that holding either lock and testing blkcg_policy_enabled() is
+ * always enough for dereferencing policy data.
+ *
+ * The caller is responsible for synchronizing [de]activations and policy
+ * [un]registerations. Returns 0 on success, -errno on failure.
+ */
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
+{
+ LIST_HEAD(pds);
+ struct blkcg_gq *blkg;
+ struct blkg_policy_data *pd, *n;
+ int cnt = 0, ret;
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
- pn = blkio_policy_search_node(blkcg, blkg->dev,
- BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
- if (pn)
- continue;
+ if (blkcg_policy_enabled(q, pol))
+ return 0;
- blkio_update_group_weight(blkg, blkcg->weight);
- }
- spin_unlock_irq(&blkcg->lock);
- spin_unlock(&blkio_list_lock);
- return 0;
-}
+ blk_queue_bypass_start(q);
-static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
+ /* make sure the root blkg exists and count the existing blkgs */
+ spin_lock_irq(q->queue_lock);
- blkcg = cgroup_to_blkio_cgroup(cgrp);
+ rcu_read_lock();
+ blkg = __blkg_lookup_create(&blkcg_root, q);
+ rcu_read_unlock();
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return (u64)blkcg->weight;
- }
- break;
- default:
- BUG();
+ if (IS_ERR(blkg)) {
+ ret = PTR_ERR(blkg);
+ goto out_unlock;
}
- return 0;
-}
+ q->root_blkg = blkg;
-static int
-blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
-{
- struct blkio_cgroup *blkcg;
- enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
- int name = BLKIOFILE_ATTR(cft->private);
+ list_for_each_entry(blkg, &q->blkg_list, q_node)
+ cnt++;
- blkcg = cgroup_to_blkio_cgroup(cgrp);
+ spin_unlock_irq(q->queue_lock);
- switch(plid) {
- case BLKIO_POLICY_PROP:
- switch(name) {
- case BLKIO_PROP_weight:
- return blkio_weight_write(blkcg, val);
+ /* allocate policy_data for all existing blkgs */
+ while (cnt--) {
+ pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out_free;
}
- break;
- default:
- BUG();
+ list_add_tail(&pd->alloc_node, &pds);
}
- return 0;
-}
-
-struct cftype blkio_files[] = {
- {
- .name = "weight_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "weight",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_weight),
- .read_u64 = blkiocg_file_read_u64,
- .write_u64 = blkiocg_file_write_u64,
- },
- {
- .name = "time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "sectors",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_sectors),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_service_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_service_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_merged",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_merged),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "io_queued",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_io_queued),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "reset_stats",
- .write_u64 = blkiocg_reset_stats,
- },
-#ifdef CONFIG_BLK_DEV_THROTTLING
- {
- .name = "throttle.read_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
+ /*
+ * Install the allocated pds. With @q bypassing, no new blkg
+ * should have been created while the queue lock was dropped.
+ */
+ spin_lock_irq(q->queue_lock);
- {
- .name = "throttle.write_bps_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_bps_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ if (WARN_ON(list_empty(&pds))) {
+ /* umm... this shouldn't happen, just abort */
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
+ list_del_init(&pd->alloc_node);
- {
- .name = "throttle.read_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_read_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
+ /* grab blkcg lock too while installing @pd on @blkg */
+ spin_lock(&blkg->blkcg->lock);
- {
- .name = "throttle.write_iops_device",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_write_iops_device),
- .read_seq_string = blkiocg_file_read,
- .write_string = blkiocg_file_write,
- .max_write_len = 256,
- },
- {
- .name = "throttle.io_service_bytes",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_service_bytes),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "throttle.io_serviced",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
- BLKIO_THROTL_io_serviced),
- .read_map = blkiocg_file_read_map,
- },
-#endif /* CONFIG_BLK_DEV_THROTTLING */
+ blkg->pd[pol->plid] = pd;
+ pd->blkg = blkg;
+ pol->pd_init_fn(blkg);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
- .name = "avg_queue_size",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_avg_queue_size),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "group_wait_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_group_wait_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "idle_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_idle_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "empty_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_empty_time),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "dequeue",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_dequeue),
- .read_map = blkiocg_file_read_map,
- },
- {
- .name = "unaccounted_time",
- .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
- BLKIO_PROP_unaccounted_time),
- .read_map = blkiocg_file_read_map,
- },
-#endif
-};
+ spin_unlock(&blkg->blkcg->lock);
+ }
-static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
-{
- return cgroup_add_files(cgroup, subsys, blkio_files,
- ARRAY_SIZE(blkio_files));
+ __set_bit(pol->plid, q->blkcg_pols);
+ ret = 0;
+out_unlock:
+ spin_unlock_irq(q->queue_lock);
+out_free:
+ blk_queue_bypass_end(q);
+ list_for_each_entry_safe(pd, n, &pds, alloc_node)
+ kfree(pd);
+ return ret;
}
+EXPORT_SYMBOL_GPL(blkcg_activate_policy);
-static void blkiocg_destroy(struct cgroup *cgroup)
+/**
+ * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
+ * @q: request_queue of interest
+ * @pol: blkcg policy to deactivate
+ *
+ * Deactivate @pol on @q. Follows the same synchronization rules as
+ * blkcg_activate_policy().
+ */
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol)
{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
- unsigned long flags;
- struct blkio_group *blkg;
- void *key;
- struct blkio_policy_type *blkiop;
- struct blkio_policy_node *pn, *pntmp;
+ struct blkcg_gq *blkg;
- rcu_read_lock();
- do {
- spin_lock_irqsave(&blkcg->lock, flags);
+ if (!blkcg_policy_enabled(q, pol))
+ return;
- if (hlist_empty(&blkcg->blkg_list)) {
- spin_unlock_irqrestore(&blkcg->lock, flags);
- break;
- }
+ blk_queue_bypass_start(q);
+ spin_lock_irq(q->queue_lock);
- blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
- blkcg_node);
- key = rcu_dereference(blkg->key);
- __blkiocg_del_blkio_group(blkg);
+ __clear_bit(pol->plid, q->blkcg_pols);
- spin_unlock_irqrestore(&blkcg->lock, flags);
+ /* if no policy is left, no need for blkgs - shoot them down */
+ if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
+ blkg_destroy_all(q);
- /*
- * This blkio_group is being unlinked as associated cgroup is
- * going away. Let all the IO controlling policies know about
- * this event.
- */
- spin_lock(&blkio_list_lock);
- list_for_each_entry(blkiop, &blkio_list, list) {
- if (blkiop->plid != blkg->plid)
- continue;
- blkiop->ops.blkio_unlink_group_fn(key, blkg);
- }
- spin_unlock(&blkio_list_lock);
- } while (1);
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
+ /* grab blkcg lock too while removing @pd from @blkg */
+ spin_lock(&blkg->blkcg->lock);
- list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
- blkio_policy_delete_node(pn);
- kfree(pn);
- }
+ if (pol->pd_exit_fn)
+ pol->pd_exit_fn(blkg);
- free_css_id(&blkio_subsys, &blkcg->css);
- rcu_read_unlock();
- if (blkcg != &blkio_root_cgroup)
- kfree(blkcg);
-}
-
-static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
-{
- struct blkio_cgroup *blkcg;
- struct cgroup *parent = cgroup->parent;
+ kfree(blkg->pd[pol->plid]);
+ blkg->pd[pol->plid] = NULL;
- if (!parent) {
- blkcg = &blkio_root_cgroup;
- goto done;
+ spin_unlock(&blkg->blkcg->lock);
}
- blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
- if (!blkcg)
- return ERR_PTR(-ENOMEM);
-
- blkcg->weight = BLKIO_WEIGHT_DEFAULT;
-done:
- spin_lock_init(&blkcg->lock);
- INIT_HLIST_HEAD(&blkcg->blkg_list);
-
- INIT_LIST_HEAD(&blkcg->policy_list);
- return &blkcg->css;
+ spin_unlock_irq(q->queue_lock);
+ blk_queue_bypass_end(q);
}
+EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
-/*
- * We cannot support shared io contexts, as we have no mean to support
- * two tasks with the same ioc in two different groups without major rework
- * of the main cic data structures. For now we allow a task to change
- * its cgroup only if it's the only owner of its ioc.
+/**
+ * blkcg_policy_register - register a blkcg policy
+ * @pol: blkcg policy to register
+ *
+ * Register @pol with blkcg core. Might sleep and @pol may be modified on
+ * successful registration. Returns 0 on success and -errno on failure.
*/
-static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+int blkcg_policy_register(struct blkcg_policy *pol)
{
- struct task_struct *task;
- struct io_context *ioc;
- int ret = 0;
+ int i, ret;
- /* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, cgrp, tset) {
- task_lock(task);
- ioc = task->io_context;
- if (ioc && atomic_read(&ioc->nr_tasks) > 1)
- ret = -EINVAL;
- task_unlock(task);
- if (ret)
+ if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
+ return -EINVAL;
+
+ mutex_lock(&blkcg_pol_mutex);
+
+ /* find an empty slot */
+ ret = -ENOSPC;
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (!blkcg_policy[i])
break;
- }
- return ret;
-}
+ if (i >= BLKCG_MAX_POLS)
+ goto out_unlock;
-static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
-{
- struct task_struct *task;
- struct io_context *ioc;
+ /* register and update blkgs */
+ pol->plid = i;
+ blkcg_policy[i] = pol;
- cgroup_taskset_for_each(task, cgrp, tset) {
- /* we don't lose anything even if ioc allocation fails */
- ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
- if (ioc) {
- ioc_cgroup_changed(ioc);
- put_io_context(ioc);
- }
- }
+ /* everything is in place, add intf files for the new policy */
+ if (pol->cftypes)
+ WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+ ret = 0;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
+ return ret;
}
+EXPORT_SYMBOL_GPL(blkcg_policy_register);
-void blkio_policy_register(struct blkio_policy_type *blkiop)
+/**
+ * blkcg_policy_unregister - unregister a blkcg policy
+ * @pol: blkcg policy to unregister
+ *
+ * Undo blkcg_policy_register(@pol). Might sleep.
+ */
+void blkcg_policy_unregister(struct blkcg_policy *pol)
{
- spin_lock(&blkio_list_lock);
- list_add_tail(&blkiop->list, &blkio_list);
- spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_register);
+ mutex_lock(&blkcg_pol_mutex);
-void blkio_policy_unregister(struct blkio_policy_type *blkiop)
-{
- spin_lock(&blkio_list_lock);
- list_del_init(&blkiop->list);
- spin_unlock(&blkio_list_lock);
-}
-EXPORT_SYMBOL_GPL(blkio_policy_unregister);
+ if (WARN_ON(blkcg_policy[pol->plid] != pol))
+ goto out_unlock;
-static int __init init_cgroup_blkio(void)
-{
- return cgroup_load_subsys(&blkio_subsys);
-}
+ /* kill the intf files first */
+ if (pol->cftypes)
+ cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
-static void __exit exit_cgroup_blkio(void)
-{
- cgroup_unload_subsys(&blkio_subsys);
+ /* unregister and update blkgs */
+ blkcg_policy[pol->plid] = NULL;
+out_unlock:
+ mutex_unlock(&blkcg_pol_mutex);
}
-
-module_init(init_cgroup_blkio);
-module_exit(exit_cgroup_blkio);
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6f3ace7e792f..8ac457ce7783 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,350 +15,371 @@
#include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
-
-enum blkio_policy_id {
- BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
- BLKIO_POLICY_THROTL, /* Throttling */
-};
+#include <linux/seq_file.h>
+#include <linux/radix-tree.h>
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-
-#ifndef CONFIG_BLK_CGROUP
-/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */
-extern struct cgroup_subsys blkio_subsys;
-#define blkio_subsys_id blkio_subsys.subsys_id
-#endif
-
-enum stat_type {
- /* Total time spent (in ns) between request dispatch to the driver and
- * request completion for IOs doen by this cgroup. This may not be
- * accurate when NCQ is turned on. */
- BLKIO_STAT_SERVICE_TIME = 0,
- /* Total time spent waiting in scheduler queue in ns */
- BLKIO_STAT_WAIT_TIME,
- /* Number of IOs queued up */
- BLKIO_STAT_QUEUED,
- /* All the single valued stats go below this */
- BLKIO_STAT_TIME,
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- BLKIO_STAT_UNACCOUNTED_TIME,
- BLKIO_STAT_AVG_QUEUE_SIZE,
- BLKIO_STAT_IDLE_TIME,
- BLKIO_STAT_EMPTY_TIME,
- BLKIO_STAT_GROUP_WAIT_TIME,
- BLKIO_STAT_DEQUEUE
-#endif
-};
+/* CFQ specific, out here for blkcg->cfq_weight */
+#define CFQ_WEIGHT_MIN 10
+#define CFQ_WEIGHT_MAX 1000
+#define CFQ_WEIGHT_DEFAULT 500
-/* Per cpu stats */
-enum stat_type_cpu {
- BLKIO_STAT_CPU_SECTORS,
- /* Total bytes transferred */
- BLKIO_STAT_CPU_SERVICE_BYTES,
- /* Total IOs serviced, post merge */
- BLKIO_STAT_CPU_SERVICED,
- /* Number of IOs merged */
- BLKIO_STAT_CPU_MERGED,
- BLKIO_STAT_CPU_NR
-};
+#ifdef CONFIG_BLK_CGROUP
-enum stat_sub_type {
- BLKIO_STAT_READ = 0,
- BLKIO_STAT_WRITE,
- BLKIO_STAT_SYNC,
- BLKIO_STAT_ASYNC,
- BLKIO_STAT_TOTAL
-};
+enum blkg_rwstat_type {
+ BLKG_RWSTAT_READ,
+ BLKG_RWSTAT_WRITE,
+ BLKG_RWSTAT_SYNC,
+ BLKG_RWSTAT_ASYNC,
-/* blkg state flags */
-enum blkg_state_flags {
- BLKG_waiting = 0,
- BLKG_idling,
- BLKG_empty,
+ BLKG_RWSTAT_NR,
+ BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
};
-/* cgroup files owned by proportional weight policy */
-enum blkcg_file_name_prop {
- BLKIO_PROP_weight = 1,
- BLKIO_PROP_weight_device,
- BLKIO_PROP_io_service_bytes,
- BLKIO_PROP_io_serviced,
- BLKIO_PROP_time,
- BLKIO_PROP_sectors,
- BLKIO_PROP_unaccounted_time,
- BLKIO_PROP_io_service_time,
- BLKIO_PROP_io_wait_time,
- BLKIO_PROP_io_merged,
- BLKIO_PROP_io_queued,
- BLKIO_PROP_avg_queue_size,
- BLKIO_PROP_group_wait_time,
- BLKIO_PROP_idle_time,
- BLKIO_PROP_empty_time,
- BLKIO_PROP_dequeue,
-};
+struct blkcg_gq;
-/* cgroup files owned by throttle policy */
-enum blkcg_file_name_throtl {
- BLKIO_THROTL_read_bps_device,
- BLKIO_THROTL_write_bps_device,
- BLKIO_THROTL_read_iops_device,
- BLKIO_THROTL_write_iops_device,
- BLKIO_THROTL_io_service_bytes,
- BLKIO_THROTL_io_serviced,
-};
+struct blkcg {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
-struct blkio_cgroup {
- struct cgroup_subsys_state css;
- unsigned int weight;
- spinlock_t lock;
- struct hlist_head blkg_list;
- struct list_head policy_list; /* list of blkio_policy_node */
-};
+ struct radix_tree_root blkg_tree;
+ struct blkcg_gq *blkg_hint;
+ struct hlist_head blkg_list;
+
+ /* for policies to test whether associated blkcg has changed */
+ uint64_t id;
-struct blkio_group_stats {
- /* total disk time and nr sectors dispatched by this group */
- uint64_t time;
- uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* Time not charged to this cgroup */
- uint64_t unaccounted_time;
-
- /* Sum of number of IOs queued across all samples */
- uint64_t avg_queue_size_sum;
- /* Count of samples taken for average */
- uint64_t avg_queue_size_samples;
- /* How many times this group has been removed from service tree */
- unsigned long dequeue;
-
- /* Total time spent waiting for it to be assigned a timeslice. */
- uint64_t group_wait_time;
- uint64_t start_group_wait_time;
-
- /* Time spent idling for this blkio_group */
- uint64_t idle_time;
- uint64_t start_idle_time;
- /*
- * Total time when we have requests queued and do not contain the
- * current active queue.
- */
- uint64_t empty_time;
- uint64_t start_empty_time;
- uint16_t flags;
-#endif
+ /* TODO: per-policy storage in blkcg */
+ unsigned int cfq_weight; /* belongs to cfq */
};
-/* Per cpu blkio group stats */
-struct blkio_group_stats_cpu {
- uint64_t sectors;
- uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
- struct u64_stats_sync syncp;
+struct blkg_stat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt;
};
-struct blkio_group {
- /* An rcu protected unique identifier for the group */
- void *key;
- struct hlist_node blkcg_node;
- unsigned short blkcg_id;
- /* Store cgroup path */
- char path[128];
- /* The device MKDEV(major, minor), this group has been created for */
- dev_t dev;
- /* policy which owns this blk group */
- enum blkio_policy_id plid;
-
- /* Need to serialize the stats in the case of reset/update */
- spinlock_t stats_lock;
- struct blkio_group_stats stats;
- /* Per cpu stats pointer */
- struct blkio_group_stats_cpu __percpu *stats_cpu;
+struct blkg_rwstat {
+ struct u64_stats_sync syncp;
+ uint64_t cnt[BLKG_RWSTAT_NR];
};
-struct blkio_policy_node {
- struct list_head node;
- dev_t dev;
- /* This node belongs to max bw policy or porportional weight policy */
- enum blkio_policy_id plid;
- /* cgroup file to which this rule belongs to */
- int fileid;
-
- union {
- unsigned int weight;
- /*
- * Rate read/write in terms of bytes per second
- * Whether this rate represents read or write is determined
- * by file type "fileid".
- */
- u64 bps;
- unsigned int iops;
- } val;
+/*
+ * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
+ * request_queue (q). This is used by blkcg policies which need to track
+ * information per blkcg - q pair.
+ *
+ * There can be multiple active blkcg policies and each has its private
+ * data on each blkg, the size of which is determined by
+ * blkcg_policy->pd_size. blkcg core allocates and frees such areas
+ * together with blkg and invokes pd_init/exit_fn() methods.
+ *
+ * Such private data must embed struct blkg_policy_data (pd) at the
+ * beginning and pd_size can't be smaller than pd.
+ */
+struct blkg_policy_data {
+ /* the blkg this per-policy data belongs to */
+ struct blkcg_gq *blkg;
+
+ /* used during policy activation */
+ struct list_head alloc_node;
};
-extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg,
- dev_t dev);
-
-typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg);
-
-typedef void (blkio_update_group_weight_fn) (void *key,
- struct blkio_group *blkg, unsigned int weight);
-typedef void (blkio_update_group_read_bps_fn) (void * key,
- struct blkio_group *blkg, u64 read_bps);
-typedef void (blkio_update_group_write_bps_fn) (void *key,
- struct blkio_group *blkg, u64 write_bps);
-typedef void (blkio_update_group_read_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int read_iops);
-typedef void (blkio_update_group_write_iops_fn) (void *key,
- struct blkio_group *blkg, unsigned int write_iops);
-
-struct blkio_policy_ops {
- blkio_unlink_group_fn *blkio_unlink_group_fn;
- blkio_update_group_weight_fn *blkio_update_group_weight_fn;
- blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
- blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
- blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
- blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
+/* association between a blk cgroup and a request queue */
+struct blkcg_gq {
+ /* Pointer to the associated request_queue */
+ struct request_queue *q;
+ struct list_head q_node;
+ struct hlist_node blkcg_node;
+ struct blkcg *blkcg;
+ /* reference count */
+ int refcnt;
+
+ struct blkg_policy_data *pd[BLKCG_MAX_POLS];
+
+ struct rcu_head rcu_head;
};
-struct blkio_policy_type {
- struct list_head list;
- struct blkio_policy_ops ops;
- enum blkio_policy_id plid;
+typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
+typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
+
+struct blkcg_policy {
+ int plid;
+ /* policy specific private data size */
+ size_t pd_size;
+ /* cgroup files for the policy */
+ struct cftype *cftypes;
+
+ /* operations */
+ blkcg_pol_init_pd_fn *pd_init_fn;
+ blkcg_pol_exit_pd_fn *pd_exit_fn;
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
};
+extern struct blkcg blkcg_root;
+
+struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
+struct blkcg *bio_blkcg(struct bio *bio);
+struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
+int blkcg_init_queue(struct request_queue *q);
+void blkcg_drain_queue(struct request_queue *q);
+void blkcg_exit_queue(struct request_queue *q);
+
/* Blkio controller policy registration */
-extern void blkio_policy_register(struct blkio_policy_type *);
-extern void blkio_policy_unregister(struct blkio_policy_type *);
+int blkcg_policy_register(struct blkcg_policy *pol);
+void blkcg_policy_unregister(struct blkcg_policy *pol);
+int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol);
+
+void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
+ u64 (*prfill)(struct seq_file *,
+ struct blkg_policy_data *, int),
+ const struct blkcg_policy *pol, int data,
+ bool show_total);
+u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
+u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ const struct blkg_rwstat *rwstat);
+u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
+u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off);
+
+struct blkg_conf_ctx {
+ struct gendisk *disk;
+ struct blkcg_gq *blkg;
+ u64 v;
+};
+
+int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ const char *input, struct blkg_conf_ctx *ctx);
+void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+
+
+/**
+ * blkg_to_pdata - get policy private data
+ * @blkg: blkg of interest
+ * @pol: policy of interest
+ *
+ * Return pointer to private data associated with the @blkg-@pol pair.
+ */
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol)
+{
+ return blkg ? blkg->pd[pol->plid] : NULL;
+}
+
+/**
+ * pdata_to_blkg - get blkg associated with policy private data
+ * @pd: policy private data of interest
+ *
+ * @pd is policy private data. Determine the blkg it's associated with.
+ */
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
+{
+ return pd ? pd->blkg : NULL;
+}
+
+/**
+ * blkg_path - format cgroup path of blkg
+ * @blkg: blkg of interest
+ * @buf: target buffer
+ * @buflen: target buffer length
+ *
+ * Format the path of the cgroup of @blkg into @buf.
+ */
+static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+{
+ int ret;
+
+ rcu_read_lock();
+ ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ rcu_read_unlock();
+ if (ret)
+ strncpy(buf, "<unavailable>", buflen);
+ return ret;
+}
-static inline char *blkg_path(struct blkio_group *blkg)
+/**
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+ * The caller should be holding queue_lock and an existing reference.
+ */
+static inline void blkg_get(struct blkcg_gq *blkg)
{
- return blkg->path;
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(!blkg->refcnt);
+ blkg->refcnt++;
}
-#else
+void __blkg_release(struct blkcg_gq *blkg);
-struct blkio_group {
+/**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+ *
+ * The caller should be holding queue_lock.
+ */
+static inline void blkg_put(struct blkcg_gq *blkg)
+{
+ lockdep_assert_held(blkg->q->queue_lock);
+ WARN_ON_ONCE(blkg->refcnt <= 0);
+ if (!--blkg->refcnt)
+ __blkg_release(blkg);
+}
+
+/**
+ * blkg_stat_add - add a value to a blkg_stat
+ * @stat: target blkg_stat
+ * @val: value to add
+ *
+ * Add @val to @stat. The caller is responsible for synchronizing calls to
+ * this function.
+ */
+static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
+{
+ u64_stats_update_begin(&stat->syncp);
+ stat->cnt += val;
+ u64_stats_update_end(&stat->syncp);
+}
+
+/**
+ * blkg_stat_read - read the current value of a blkg_stat
+ * @stat: blkg_stat to read
+ *
+ * Read the current value of @stat. This function can be called without
+ * synchroniztion and takes care of u64 atomicity.
+ */
+static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
+{
+ unsigned int start;
+ uint64_t v;
+
+ do {
+ start = u64_stats_fetch_begin(&stat->syncp);
+ v = stat->cnt;
+ } while (u64_stats_fetch_retry(&stat->syncp, start));
+
+ return v;
+}
+
+/**
+ * blkg_stat_reset - reset a blkg_stat
+ * @stat: blkg_stat to reset
+ */
+static inline void blkg_stat_reset(struct blkg_stat *stat)
+{
+ stat->cnt = 0;
+}
+
+/**
+ * blkg_rwstat_add - add a value to a blkg_rwstat
+ * @rwstat: target blkg_rwstat
+ * @rw: mask of REQ_{WRITE|SYNC}
+ * @val: value to add
+ *
+ * Add @val to @rwstat. The counters are chosen according to @rw. The
+ * caller is responsible for synchronizing calls to this function.
+ */
+static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
+ int rw, uint64_t val)
+{
+ u64_stats_update_begin(&rwstat->syncp);
+
+ if (rw & REQ_WRITE)
+ rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_READ] += val;
+ if (rw & REQ_SYNC)
+ rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
+ else
+ rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
+
+ u64_stats_update_end(&rwstat->syncp);
+}
+
+/**
+ * blkg_rwstat_read - read the current values of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Read the current snapshot of @rwstat and return it as the return value.
+ * This function can be called without synchronization and takes care of
+ * u64 atomicity.
+ */
+static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
+{
+ unsigned int start;
+ struct blkg_rwstat tmp;
+
+ do {
+ start = u64_stats_fetch_begin(&rwstat->syncp);
+ tmp = *rwstat;
+ } while (u64_stats_fetch_retry(&rwstat->syncp, start));
+
+ return tmp;
+}
+
+/**
+ * blkg_rwstat_sum - read the total count of a blkg_rwstat
+ * @rwstat: blkg_rwstat to read
+ *
+ * Return the total count of @rwstat regardless of the IO direction. This
+ * function can be called without synchronization and takes care of u64
+ * atomicity.
+ */
+static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
+{
+ struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
+
+ return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
+}
+
+/**
+ * blkg_rwstat_reset - reset a blkg_rwstat
+ * @rwstat: blkg_rwstat to reset
+ */
+static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
+{
+ memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
+}
+
+#else /* CONFIG_BLK_CGROUP */
+
+struct cgroup;
+
+struct blkg_policy_data {
};
-struct blkio_policy_type {
+struct blkcg_gq {
};
-static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
-static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
-
-static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
-
-#endif
-
-#define BLKIO_WEIGHT_MIN 10
-#define BLKIO_WEIGHT_MAX 1000
-#define BLKIO_WEIGHT_DEFAULT 500
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg);
-void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue);
-void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_update_idle_time_stats(struct blkio_group *blkg);
-void blkiocg_set_start_empty_time(struct blkio_group *blkg);
-
-#define BLKG_FLAG_FNS(name) \
-static inline void blkio_mark_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags |= (1 << BLKG_##name); \
-} \
-static inline void blkio_clear_blkg_##name( \
- struct blkio_group_stats *stats) \
-{ \
- stats->flags &= ~(1 << BLKG_##name); \
-} \
-static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
-{ \
- return (stats->flags & (1 << BLKG_##name)) != 0; \
-} \
-
-BLKG_FLAG_FNS(waiting)
-BLKG_FLAG_FNS(idling)
-BLKG_FLAG_FNS(empty)
-#undef BLKG_FLAG_FNS
-#else
-static inline void blkiocg_update_avg_queue_size_stats(
- struct blkio_group *blkg) {}
-static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{}
-static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {}
-static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-#endif
-
-#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
-extern struct blkio_cgroup blkio_root_cgroup;
-extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
-extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
-extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid);
-extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
-extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
-extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
- void *key);
-void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time);
-void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes,
- bool direction, bool sync);
-void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction, bool sync);
-void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
- bool sync);
-void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync);
-void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync);
-#else
-struct cgroup;
-static inline struct blkio_cgroup *
-cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
-static inline struct blkio_cgroup *
-task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
-
-static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev,
- enum blkio_policy_id plid) {}
-
-static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
-
-static inline int
-blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
-
-static inline struct blkio_group *
-blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; }
-static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time,
- unsigned long unaccounted_time)
-{}
-static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
- uint64_t start_time, uint64_t io_start_time, bool direction,
- bool sync) {}
-static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-#endif
-#endif /* _BLK_CGROUP_H */
+struct blkcg_policy {
+};
+
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
+static inline void blkcg_drain_queue(struct request_queue *q) { }
+static inline void blkcg_exit_queue(struct request_queue *q) { }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
+static inline int blkcg_activate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { return 0; }
+static inline void blkcg_deactivate_policy(struct request_queue *q,
+ const struct blkcg_policy *pol) { }
+
+static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
+ struct blkcg_policy *pol) { return NULL; }
+static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
+static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
+static inline void blkg_get(struct blkcg_gq *blkg) { }
+static inline void blkg_put(struct blkcg_gq *blkg) { }
+
+#endif /* CONFIG_BLK_CGROUP */
+#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 1f61b74867e4..3c923a7aeb56 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -29,11 +29,13 @@
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
+#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
*
* This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevaotor_exit()
- * and blk_throtl_exit() to be called with queue lock initialized.
+ * and blkcg_exit_queue() to be called with queue lock initialized.
*
*/
void blk_sync_queue(struct request_queue *q)
@@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
spin_lock_irq(q->queue_lock);
- elv_drain_elevator(q);
- if (drain_all)
- blk_throtl_drain(q);
+ /*
+ * The caller might be trying to drain @q before its
+ * elevator is initialized.
+ */
+ if (q->elevator)
+ elv_drain_elevator(q);
+
+ blkcg_drain_queue(q);
/*
* This function might be called on a queue which failed
- * driver init after queue creation. Some drivers
- * (e.g. fd) get unhappy in such cases. Kick queue iff
- * dispatch queue has something on it.
+ * driver init after queue creation or is not yet fully
+ * active yet. Some drivers (e.g. fd and loop) get unhappy
+ * in such cases. Kick queue iff dispatch queue has
+ * something on it and @q has request_fn set.
*/
- if (!list_empty(&q->queue_head))
+ if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
drain |= q->rq.elvpriv;
@@ -403,6 +411,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
+ * function makes @q enter bypass mode and drains all requests which were
+ * throttled or issued before. On return, it's guaranteed that no request
+ * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
+ * inside queue or RCU read lock.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+ bool drain;
+
+ spin_lock_irq(q->queue_lock);
+ drain = !q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ if (drain) {
+ blk_drain_queue(q, false);
+ /* ensure blk_queue_bypass() is %true inside RCU read lock */
+ synchronize_rcu();
+ }
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth)
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
+/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
@@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
spin_lock_irq(lock);
+
+ /*
+ * Dead queue is permanently in bypass mode till released. Note
+ * that, unlike blk_queue_bypass_start(), we aren't performing
+ * synchronize_rcu() after entering bypass mode to avoid the delay
+ * as some drivers create and destroy a lot of queues while
+ * probing. This is still safe because blk_release_queue() will be
+ * called only after the queue refcnt drops to zero and nothing,
+ * RCU or not, would be traversing the queue by then.
+ */
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
@@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q)
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /*
- * Drain all requests queued before DEAD marking. The caller might
- * be trying to tear down @q before its elevator is initialized, in
- * which case we don't want to call into draining.
- */
- if (q->elevator)
- blk_drain_queue(q, true);
+ /* drain all requests queued before DEAD marking */
+ blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (err)
goto fail_id;
- if (blk_throtl_init(q))
- goto fail_id;
-
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
+#ifdef CONFIG_BLK_CGROUP
+ INIT_LIST_HEAD(&q->blkg_list);
+#endif
INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight);
@@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
*/
q->queue_lock = &q->__queue_lock;
+ /*
+ * A queue starts its life with bypass turned on to avoid
+ * unnecessary bypass on/off overhead and nasty surprises during
+ * init. The initial bypass will be finished at the end of
+ * blk_init_allocated_queue().
+ */
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+ if (blkcg_init_queue(q))
+ goto fail_id;
+
return q;
fail_id:
@@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX;
- /*
- * all done
- */
- if (!elevator_init(q, NULL)) {
- blk_queue_congestion_threshold(q);
- return q;
- }
+ /* init elevator */
+ if (elevator_init(q, NULL))
+ return NULL;
- return NULL;
+ blk_queue_congestion_threshold(q);
+
+ /* all done, end the initial bypass */
+ blk_queue_bypass_end(q);
+ return q;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool);
}
-static struct request *
-blk_alloc_request(struct request_queue *q, struct io_cq *icq,
- unsigned int flags, gfp_t gfp_mask)
-{
- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
- if (!rq)
- return NULL;
-
- blk_rq_init(q, rq);
-
- rq->cmd_flags = flags | REQ_ALLOCED;
-
- if (flags & REQ_ELVPRIV) {
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, gfp_mask))) {
- mempool_free(rq, q->rq.rq_pool);
- return NULL;
- }
- /* @rq->elv.icq holds on to io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-
- return rq;
-}
-
/*
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
@@ -763,6 +808,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
}
/**
+ * rq_ioc - determine io_context for request allocation
+ * @bio: request being allocated is for this bio (can be %NULL)
+ *
+ * Determine io_context to use for request allocation for @bio. May return
+ * %NULL if %current->io_context doesn't exist.
+ */
+static struct io_context *rq_ioc(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+ if (bio && bio->bi_ioc)
+ return bio->bi_ioc;
+#endif
+ return current->io_context;
+}
+
+/**
* get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
@@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
- struct request *rq = NULL;
+ struct request *rq;
struct request_list *rl = &q->rq;
struct elevator_type *et;
struct io_context *ioc;
@@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
int may_queue;
retry:
et = q->elevator->type;
- ioc = current->io_context;
+ ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q)))
return NULL;
@@ -808,7 +869,7 @@ retry:
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
- create_io_context(current, gfp_mask, q->node);
+ create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
@@ -831,7 +892,7 @@ retry:
* process is not a "batcher", and not
* exempted by the IO scheduler
*/
- goto out;
+ return NULL;
}
}
}
@@ -844,7 +905,7 @@ retry:
* allocated with any setting of ->nr_requests
*/
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- goto out;
+ return NULL;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
@@ -859,8 +920,7 @@ retry:
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) &&
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
rl->elvpriv++;
if (et->icq_cache && ioc)
@@ -871,41 +931,36 @@ retry:
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock);
- /* create icq if missing */
- if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
- icq = ioc_create_icq(q, gfp_mask);
- if (!icq)
- goto fail_icq;
- }
-
- rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+ /* allocate and init request */
+ rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+ if (!rq)
+ goto fail_alloc;
-fail_icq:
- if (unlikely(!rq)) {
- /*
- * Allocation failed presumably due to memory. Undo anything
- * we might have messed up.
- *
- * Allocating task should really be put onto the front of the
- * wait queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(q, rw_flags);
+ blk_rq_init(q, rq);
+ rq->cmd_flags = rw_flags | REQ_ALLOCED;
+
+ /* init elvpriv */
+ if (rw_flags & REQ_ELVPRIV) {
+ if (unlikely(et->icq_cache && !icq)) {
+ create_io_context(gfp_mask, q->node);
+ ioc = rq_ioc(bio);
+ if (!ioc)
+ goto fail_elvpriv;
+
+ icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (!icq)
+ goto fail_elvpriv;
+ }
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved
- * so that freeing of a request in the other direction will
- * notice us. another possible fix would be to split the
- * rq mempool into READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
+ rq->elv.icq = icq;
+ if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
+ goto fail_elvpriv;
- goto out;
+ /* @rq->elv.icq holds io_context until @rq is freed */
+ if (icq)
+ get_io_context(icq->ioc);
}
-
+out:
/*
* ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need
@@ -916,8 +971,48 @@ rq_starved:
ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1);
-out:
return rq;
+
+fail_elvpriv:
+ /*
+ * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
+ * and may fail indefinitely under memory pressure and thus
+ * shouldn't stall IO. Treat this request as !elvpriv. This will
+ * disturb iosched and blkcg but weird is bettern than dead.
+ */
+ printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
+ dev_name(q->backing_dev_info.dev));
+
+ rq->cmd_flags &= ~REQ_ELVPRIV;
+ rq->elv.icq = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ rl->elvpriv--;
+ spin_unlock_irq(q->queue_lock);
+ goto out;
+
+fail_alloc:
+ /*
+ * Allocation failed presumably due to memory. Undo anything we
+ * might have messed up.
+ *
+ * Allocating task should really be put onto the front of the wait
+ * queue, but this is pretty rare.
+ */
+ spin_lock_irq(q->queue_lock);
+ freed_request(q, rw_flags);
+
+ /*
+ * in the very unlikely event that allocation failed and no
+ * requests for this direction was pending, mark us starved so that
+ * freeing of a request in the other direction will notice
+ * us. another possible fix would be to split the rq mempool into
+ * READ and WRITE
+ */
+rq_starved:
+ if (unlikely(rl->count[is_sync] == 0))
+ rl->starved[is_sync] = 1;
+ return NULL;
}
/**
@@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
- create_io_context(current, GFP_NOIO, q->node);
+ create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fb95dd2f889a..1e2d53b04858 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
}
EXPORT_SYMBOL(put_io_context);
-/* Called by the exiting task */
-void exit_io_context(struct task_struct *task)
+/**
+ * put_io_context_active - put active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Undo get_io_context_active(). If active reference reaches zero after
+ * put, @ioc can never issue further IOs and ioscheds are notified.
+ */
+void put_io_context_active(struct io_context *ioc)
{
- struct io_context *ioc;
- struct io_cq *icq;
struct hlist_node *n;
unsigned long flags;
+ struct io_cq *icq;
- task_lock(task);
- ioc = task->io_context;
- task->io_context = NULL;
- task_unlock(task);
-
- if (!atomic_dec_and_test(&ioc->nr_tasks)) {
+ if (!atomic_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
@@ -197,6 +197,20 @@ retry:
put_io_context(ioc);
}
+/* Called by the exiting task */
+void exit_io_context(struct task_struct *task)
+{
+ struct io_context *ioc;
+
+ task_lock(task);
+ ioc = task->io_context;
+ task->io_context = NULL;
+ task_unlock(task);
+
+ atomic_dec(&ioc->nr_tasks);
+ put_io_context_active(ioc);
+}
+
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
@@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q)
}
}
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
- int node)
+int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
- return;
+ return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
- atomic_set(&ioc->nr_tasks, 1);
+ atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
@@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
else
kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task);
+
+ return 0;
}
/**
@@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc;
}
task_unlock(task);
- } while (create_io_context(task, gfp_flags, node));
+ } while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
@@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
+ * @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
- * Make sure io_cq linking %current->io_context and @q exists. If either
- * io_context and/or icq don't exist, they will be created using @gfp_mask.
+ * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
+ * will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
- struct io_context *ioc;
struct io_cq *icq;
/* allocate stuff */
- ioc = create_io_context(current, gfp_mask, q->node);
- if (!ioc)
- return NULL;
-
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
@@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
return icq;
}
-void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
-{
- struct io_cq *icq;
- struct hlist_node *n;
-
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
- icq->flags |= flags;
-}
-
-/**
- * ioc_ioprio_changed - notify ioprio change
- * @ioc: io_context of interest
- * @ioprio: new ioprio
- *
- * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
- * icq's. iosched is responsible for checking the bit and applying it on
- * request issue path.
- */
-void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc->ioprio = ioprio;
- ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-
-/**
- * ioc_cgroup_changed - notify cgroup change
- * @ioc: io_context of interest
- *
- * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
- * iosched is responsible for checking the bit and applying it on request
- * issue path.
- */
-void ioc_cgroup_changed(struct io_context *ioc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->lock, flags);
- ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
- spin_unlock_irqrestore(&ioc->lock, flags);
-}
-EXPORT_SYMBOL(ioc_cgroup_changed);
-
-/**
- * icq_get_changed - fetch and clear icq changed mask
- * @icq: icq of interest
- *
- * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
- * @icq->ioc->lock.
- */
-unsigned icq_get_changed(struct io_cq *icq)
-{
- unsigned int changed = 0;
- unsigned long flags;
-
- if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
- spin_lock_irqsave(&icq->ioc->lock, flags);
- changed = icq->flags & ICQ_CHANGED_MASK;
- icq->flags &= ~ICQ_CHANGED_MASK;
- spin_unlock_irqrestore(&icq->ioc->lock, flags);
- }
- return changed;
-}
-EXPORT_SYMBOL(icq_get_changed);
-
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cf150011d808..aa41b47c22d2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -9,6 +9,7 @@
#include <linux/blktrace_api.h>
#include "blk.h"
+#include "blk-cgroup.h"
struct queue_sysfs_entry {
struct attribute attr;
@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q);
+ blkcg_exit_queue(q);
+
if (q->elevator) {
spin_lock_irq(q->queue_lock);
ioc_clear_queue(q);
@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator);
}
- blk_throtl_exit(q);
-
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
if (q->queue_tags)
__blk_queue_free_tags(q);
- blk_throtl_release(q);
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f2ddb94626bd..14dedecfc7e8 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -21,6 +21,8 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
+static struct blkcg_policy blkcg_policy_throtl;
+
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
@@ -38,9 +40,17 @@ struct throtl_rb_root {
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
+/* Per-cpu group stats */
+struct tg_stats_cpu {
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+};
+
struct throtl_grp {
- /* List of throtl groups on the request queue*/
- struct hlist_node tg_node;
+ /* must be the first member */
+ struct blkg_policy_data pd;
/* active throtl group service_tree member */
struct rb_node rb_node;
@@ -52,8 +62,6 @@ struct throtl_grp {
*/
unsigned long disptime;
- struct blkio_group blkg;
- atomic_t ref;
unsigned int flags;
/* Two lists for READ and WRITE */
@@ -80,18 +88,18 @@ struct throtl_grp {
/* Some throttle limits got updated for the group */
int limits_changed;
- struct rcu_head rcu_head;
+ /* Per cpu stats pointer */
+ struct tg_stats_cpu __percpu *stats_cpu;
+
+ /* List of tgs waiting for per cpu stats memory to be allocated */
+ struct list_head stats_alloc_node;
};
struct throtl_data
{
- /* List of throtl groups */
- struct hlist_head tg_list;
-
/* service tree for active throtl groups */
struct throtl_rb_root tg_service_tree;
- struct throtl_grp *root_tg;
struct request_queue *queue;
/* Total Number of queued bios on READ and WRITE lists */
@@ -108,6 +116,33 @@ struct throtl_data
int limits_changed;
};
+/* list and work item to allocate percpu group stats */
+static DEFINE_SPINLOCK(tg_stats_alloc_lock);
+static LIST_HEAD(tg_stats_alloc_list);
+
+static void tg_stats_alloc_fn(struct work_struct *);
+static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
+
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
+
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
+{
+ return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
+}
+
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
+{
+ return pd_to_blkg(&tg->pd);
+}
+
+static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
+{
+ return blkg_to_tg(td->queue->root_blkg);
+}
+
enum tg_state_flags {
THROTL_TG_FLAG_on_rr = 0, /* on round-robin busy list */
};
@@ -128,244 +163,148 @@ static inline int throtl_tg_##name(const struct throtl_grp *tg) \
THROTL_TG_FNS(on_rr);
-#define throtl_log_tg(td, tg, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
- blkg_path(&(tg)->blkg), ##args); \
+#define throtl_log_tg(td, tg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
#define throtl_log(td, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
-static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
-{
- if (blkg)
- return container_of(blkg, struct throtl_grp, blkg);
-
- return NULL;
-}
-
static inline unsigned int total_nr_queued(struct throtl_data *td)
{
return td->nr_queued[0] + td->nr_queued[1];
}
-static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
-{
- atomic_inc(&tg->ref);
- return tg;
-}
-
-static void throtl_free_tg(struct rcu_head *head)
+/*
+ * Worker for allocating per cpu stat for tgs. This is scheduled on the
+ * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * allocation.
+ */
+static void tg_stats_alloc_fn(struct work_struct *work)
{
- struct throtl_grp *tg;
+ static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */
+ struct delayed_work *dwork = to_delayed_work(work);
+ bool empty = false;
+
+alloc_stats:
+ if (!stats_cpu) {
+ stats_cpu = alloc_percpu(struct tg_stats_cpu);
+ if (!stats_cpu) {
+ /* allocation failed, try again after some time */
+ queue_delayed_work(system_nrt_wq, dwork,
+ msecs_to_jiffies(10));
+ return;
+ }
+ }
- tg = container_of(head, struct throtl_grp, rcu_head);
- free_percpu(tg->blkg.stats_cpu);
- kfree(tg);
-}
+ spin_lock_irq(&tg_stats_alloc_lock);
-static void throtl_put_tg(struct throtl_grp *tg)
-{
- BUG_ON(atomic_read(&tg->ref) <= 0);
- if (!atomic_dec_and_test(&tg->ref))
- return;
+ if (!list_empty(&tg_stats_alloc_list)) {
+ struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
+ struct throtl_grp,
+ stats_alloc_node);
+ swap(tg->stats_cpu, stats_cpu);
+ list_del_init(&tg->stats_alloc_node);
+ }
- /*
- * A group is freed in rcu manner. But having an rcu lock does not
- * mean that one can access all the fields of blkg and assume these
- * are valid. For example, don't try to follow throtl_data and
- * request queue links.
- *
- * Having a reference to blkg under an rcu allows acess to only
- * values local to groups like group stats and group rate limits
- */
- call_rcu(&tg->rcu_head, throtl_free_tg);
+ empty = list_empty(&tg_stats_alloc_list);
+ spin_unlock_irq(&tg_stats_alloc_lock);
+ if (!empty)
+ goto alloc_stats;
}
-static void throtl_init_group(struct throtl_grp *tg)
+static void throtl_pd_init(struct blkcg_gq *blkg)
{
- INIT_HLIST_NODE(&tg->tg_node);
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
bio_list_init(&tg->bio_lists[1]);
tg->limits_changed = false;
- /* Practically unlimited BW */
- tg->bps[0] = tg->bps[1] = -1;
- tg->iops[0] = tg->iops[1] = -1;
-
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * request queue which will be dropped by either request queue
- * exit or cgroup deletion path depending on who is exiting first.
- */
- atomic_set(&tg->ref, 1);
-}
-
-/* Should be called with rcu read lock held (needed for blkcg) */
-static void
-throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
-{
- hlist_add_head(&tg->tg_node, &td->tg_list);
- td->nr_undestroyed_grps++;
-}
-
-static void
-__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
-{
- struct backing_dev_info *bdi = &td->queue->backing_dev_info;
- unsigned int major, minor;
-
- if (!tg || tg->blkg.dev)
- return;
+ tg->bps[READ] = -1;
+ tg->bps[WRITE] = -1;
+ tg->iops[READ] = -1;
+ tg->iops[WRITE] = -1;
/*
- * Fill in device details for a group which might not have been
- * filled at group creation time as queue was being instantiated
- * and driver had not attached a device yet
+ * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
+ * but percpu allocator can't be called from IO path. Queue tg on
+ * tg_stats_alloc_list and allocate from work item.
*/
- if (bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- tg->blkg.dev = MKDEV(major, minor);
- }
+ spin_lock(&tg_stats_alloc_lock);
+ list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
+ queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+ spin_unlock(&tg_stats_alloc_lock);
}
-/*
- * Should be called with without queue lock held. Here queue lock will be
- * taken rarely. It will be taken only once during life time of a group
- * if need be
- */
-static void
-throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
{
- if (!tg || tg->blkg.dev)
- return;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
- spin_lock_irq(td->queue->queue_lock);
- __throtl_tg_fill_dev_details(td, tg);
- spin_unlock_irq(td->queue->queue_lock);
-}
+ spin_lock(&tg_stats_alloc_lock);
+ list_del_init(&tg->stats_alloc_node);
+ spin_unlock(&tg_stats_alloc_lock);
-static void throtl_init_add_tg_lists(struct throtl_data *td,
- struct throtl_grp *tg, struct blkio_cgroup *blkcg)
-{
- __throtl_tg_fill_dev_details(td, tg);
-
- /* Add group onto cgroup list */
- blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
- tg->blkg.dev, BLKIO_POLICY_THROTL);
-
- tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
- tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
- tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
- tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
-
- throtl_add_group_to_td_list(td, tg);
+ free_percpu(tg->stats_cpu);
}
-/* Should be called without queue lock and outside of rcu period */
-static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
{
- struct throtl_grp *tg = NULL;
- int ret;
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ int cpu;
- tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
- if (!tg)
- return NULL;
+ if (tg->stats_cpu == NULL)
+ return;
- ret = blkio_alloc_blkg_stats(&tg->blkg);
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
- if (ret) {
- kfree(tg);
- return NULL;
+ blkg_rwstat_reset(&sc->service_bytes);
+ blkg_rwstat_reset(&sc->serviced);
}
-
- throtl_init_group(tg);
- return tg;
}
-static struct
-throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
- struct throtl_grp *tg = NULL;
- void *key = td;
-
/*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- tg = td->root_tg;
- else
- tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
+ */
+ if (blkcg == &blkcg_root)
+ return td_root_tg(td);
- __throtl_tg_fill_dev_details(td, tg);
- return tg;
+ return blkg_to_tg(blkg_lookup(blkcg, td->queue));
}
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
- struct throtl_grp *tg = NULL, *__tg = NULL;
- struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
-
- /* no throttling for dead queue */
- if (unlikely(blk_queue_dead(q)))
- return NULL;
-
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
- if (tg) {
- rcu_read_unlock();
- return tg;
- }
-
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- */
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
-
- tg = throtl_alloc_tg(td);
-
- /* Group allocated and queue is still alive. take the lock */
- spin_lock_irq(q->queue_lock);
-
- /* Make sure @q is still alive */
- if (unlikely(blk_queue_dead(q))) {
- kfree(tg);
- return NULL;
- }
-
- /*
- * Initialize the new group. After sleeping, read the blkcg again.
- */
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
+ struct throtl_grp *tg = NULL;
/*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
*/
- __tg = throtl_find_tg(td, blkcg);
-
- if (__tg) {
- kfree(tg);
- rcu_read_unlock();
- return __tg;
- }
-
- /* Group allocation failed. Account the IO to root group */
- if (!tg) {
- tg = td->root_tg;
- return tg;
+ if (blkcg == &blkcg_root) {
+ tg = td_root_tg(td);
+ } else {
+ struct blkcg_gq *blkg;
+
+ blkg = blkg_lookup_create(blkcg, q);
+
+ /* if %NULL and @q is alive, fall back to root_tg */
+ if (!IS_ERR(blkg))
+ tg = blkg_to_tg(blkg);
+ else if (!blk_queue_dead(q))
+ tg = td_root_tg(td);
}
- throtl_init_add_tg_lists(td, tg, blkcg);
- rcu_read_unlock();
return tg;
}
@@ -734,16 +673,41 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
return 0;
}
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
+ int rw)
+{
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+ struct tg_stats_cpu *stats_cpu;
+ unsigned long flags;
+
+ /* If per cpu stats are not allocated yet, don't do any accounting. */
+ if (tg->stats_cpu == NULL)
+ return;
+
+ /*
+ * Disabling interrupts to provide mutual exclusion between two
+ * writes on same cpu. It probably is not needed for 64bit. Not
+ * optimizing that case yet.
+ */
+ local_irq_save(flags);
+
+ stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+ blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
+ blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
+
+ local_irq_restore(flags);
+}
+
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
- bool sync = rw_is_sync(bio->bi_rw);
/* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_size;
tg->io_disp[rw]++;
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+ throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
}
static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -753,7 +717,7 @@ static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
bio_list_add(&tg->bio_lists[rw], bio);
/* Take a bio reference on tg */
- throtl_ref_get_tg(tg);
+ blkg_get(tg_to_blkg(tg));
tg->nr_queued[rw]++;
td->nr_queued[rw]++;
throtl_enqueue_tg(td, tg);
@@ -786,8 +750,8 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
bio = bio_list_pop(&tg->bio_lists[rw]);
tg->nr_queued[rw]--;
- /* Drop bio reference on tg */
- throtl_put_tg(tg);
+ /* Drop bio reference on blkg */
+ blkg_put(tg_to_blkg(tg));
BUG_ON(td->nr_queued[rw] <= 0);
td->nr_queued[rw]--;
@@ -865,8 +829,8 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
static void throtl_process_limit_change(struct throtl_data *td)
{
- struct throtl_grp *tg;
- struct hlist_node *pos, *n;
+ struct request_queue *q = td->queue;
+ struct blkcg_gq *blkg, *n;
if (!td->limits_changed)
return;
@@ -875,7 +839,9 @@ static void throtl_process_limit_change(struct throtl_data *td)
throtl_log(td, "limits changed");
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
+ list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ struct throtl_grp *tg = blkg_to_tg(blkg);
+
if (!tg->limits_changed)
continue;
@@ -973,120 +939,159 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
}
}
-static void
-throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&tg->tg_node));
+ struct throtl_grp *tg = pd_to_tg(pd);
+ struct blkg_rwstat rwstat = { }, tmp;
+ int i, cpu;
- hlist_del_init(&tg->tg_node);
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- throtl_put_tg(tg);
- td->nr_undestroyed_grps--;
+ tmp = blkg_rwstat_read((void *)sc + off);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ rwstat.cnt[i] += tmp.cnt[i];
+ }
+
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-static void throtl_release_tgs(struct throtl_data *td)
+static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct hlist_node *pos, *n;
- struct throtl_grp *tg;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
- hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!blkiocg_del_blkio_group(&tg->blkg))
- throtl_destroy_tg(td, tg);
- }
+ blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
+ cft->private, true);
+ return 0;
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid throtl_data pointer as long as we are
- * rcu read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if queue was going away, cgroup deltion
- * path got to it first.
- */
-void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- unsigned long flags;
- struct throtl_data *td = key;
+ struct throtl_grp *tg = pd_to_tg(pd);
+ u64 v = *(u64 *)((void *)tg + off);
- spin_lock_irqsave(td->queue->queue_lock, flags);
- throtl_destroy_tg(td, tg_of_blkg(blkg));
- spin_unlock_irqrestore(td->queue->queue_lock, flags);
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
}
-static void throtl_update_blkio_group_common(struct throtl_data *td,
- struct throtl_grp *tg)
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- xchg(&tg->limits_changed, true);
- xchg(&td->limits_changed, true);
- /* Schedule a work now to process the limit change */
- throtl_schedule_delayed_work(td, 0);
+ struct throtl_grp *tg = pd_to_tg(pd);
+ unsigned int v = *(unsigned int *)((void *)tg + off);
+
+ if (v == -1)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, v);
}
-/*
- * For all update functions, key should be a valid pointer because these
- * update functions are called under blkcg_lock, that means, blkg is
- * valid and in turn key is valid. queue exit path can not race because
- * of blkcg_lock
- *
- * Can not take queue lock in update functions as queue lock under blkcg_lock
- * is not allowed. Under other paths we take blkcg_lock under queue_lock.
- */
-static void throtl_update_blkio_group_read_bps(void *key,
- struct blkio_group *blkg, u64 read_bps)
+static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[READ] = read_bps;
- throtl_update_blkio_group_common(td, tg);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+ &blkcg_policy_throtl, cft->private, false);
+ return 0;
}
-static void throtl_update_blkio_group_write_bps(void *key,
- struct blkio_group *blkg, u64 write_bps)
+static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
-
- tg->bps[WRITE] = write_bps;
- throtl_update_blkio_group_common(td, tg);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+ &blkcg_policy_throtl, cft->private, false);
+ return 0;
}
-static void throtl_update_blkio_group_read_iops(void *key,
- struct blkio_group *blkg, unsigned int read_iops)
+static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
+ bool is_u64)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkg_conf_ctx ctx;
+ struct throtl_grp *tg;
+ struct throtl_data *td;
+ int ret;
+
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+ if (ret)
+ return ret;
+
+ tg = blkg_to_tg(ctx.blkg);
+ td = ctx.blkg->q->td;
- tg->iops[READ] = read_iops;
- throtl_update_blkio_group_common(td, tg);
+ if (!ctx.v)
+ ctx.v = -1;
+
+ if (is_u64)
+ *(u64 *)((void *)tg + cft->private) = ctx.v;
+ else
+ *(unsigned int *)((void *)tg + cft->private) = ctx.v;
+
+ /* XXX: we don't need the following deferred processing */
+ xchg(&tg->limits_changed, true);
+ xchg(&td->limits_changed, true);
+ throtl_schedule_delayed_work(td, 0);
+
+ blkg_conf_finish(&ctx);
+ return 0;
}
-static void throtl_update_blkio_group_write_iops(void *key,
- struct blkio_group *blkg, unsigned int write_iops)
+static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
{
- struct throtl_data *td = key;
- struct throtl_grp *tg = tg_of_blkg(blkg);
+ return tg_set_conf(cgrp, cft, buf, true);
+}
- tg->iops[WRITE] = write_iops;
- throtl_update_blkio_group_common(td, tg);
+static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ return tg_set_conf(cgrp, cft, buf, false);
}
+static struct cftype throtl_files[] = {
+ {
+ .name = "throttle.read_bps_device",
+ .private = offsetof(struct throtl_grp, bps[READ]),
+ .read_seq_string = tg_print_conf_u64,
+ .write_string = tg_set_conf_u64,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.write_bps_device",
+ .private = offsetof(struct throtl_grp, bps[WRITE]),
+ .read_seq_string = tg_print_conf_u64,
+ .write_string = tg_set_conf_u64,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.read_iops_device",
+ .private = offsetof(struct throtl_grp, iops[READ]),
+ .read_seq_string = tg_print_conf_uint,
+ .write_string = tg_set_conf_uint,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.write_iops_device",
+ .private = offsetof(struct throtl_grp, iops[WRITE]),
+ .read_seq_string = tg_print_conf_uint,
+ .write_string = tg_set_conf_uint,
+ .max_write_len = 256,
+ },
+ {
+ .name = "throttle.io_service_bytes",
+ .private = offsetof(struct tg_stats_cpu, service_bytes),
+ .read_seq_string = tg_print_cpu_rwstat,
+ },
+ {
+ .name = "throttle.io_serviced",
+ .private = offsetof(struct tg_stats_cpu, serviced),
+ .read_seq_string = tg_print_cpu_rwstat,
+ },
+ { } /* terminate */
+};
+
static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
@@ -1094,19 +1099,13 @@ static void throtl_shutdown_wq(struct request_queue *q)
cancel_delayed_work_sync(&td->throtl_work);
}
-static struct blkio_policy_type blkio_policy_throtl = {
- .ops = {
- .blkio_unlink_group_fn = throtl_unlink_blkio_group,
- .blkio_update_group_read_bps_fn =
- throtl_update_blkio_group_read_bps,
- .blkio_update_group_write_bps_fn =
- throtl_update_blkio_group_write_bps,
- .blkio_update_group_read_iops_fn =
- throtl_update_blkio_group_read_iops,
- .blkio_update_group_write_iops_fn =
- throtl_update_blkio_group_write_iops,
- },
- .plid = BLKIO_POLICY_THROTL,
+static struct blkcg_policy blkcg_policy_throtl = {
+ .pd_size = sizeof(struct throtl_grp),
+ .cftypes = throtl_files,
+
+ .pd_init_fn = throtl_pd_init,
+ .pd_exit_fn = throtl_pd_exit,
+ .pd_reset_stats_fn = throtl_pd_reset_stats,
};
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
@@ -1114,7 +1113,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
struct throtl_data *td = q->td;
struct throtl_grp *tg;
bool rw = bio_data_dir(bio), update_disptime = true;
- struct blkio_cgroup *blkcg;
+ struct blkcg *blkcg;
bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
@@ -1122,33 +1121,31 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out;
}
+ /* bio_associate_current() needs ioc, try creating */
+ create_io_context(GFP_ATOMIC, q->node);
+
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
* just update the dispatch stats in lockless manner and return.
*/
-
rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- tg = throtl_find_tg(td, blkcg);
+ blkcg = bio_blkcg(bio);
+ tg = throtl_lookup_tg(td, blkcg);
if (tg) {
- throtl_tg_fill_dev_details(td, tg);
-
if (tg_no_rule_group(tg, rw)) {
- blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
- rw, rw_is_sync(bio->bi_rw));
- rcu_read_unlock();
- goto out;
+ throtl_update_dispatch_stats(tg_to_blkg(tg),
+ bio->bi_size, bio->bi_rw);
+ goto out_unlock_rcu;
}
}
- rcu_read_unlock();
/*
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
spin_lock_irq(q->queue_lock);
- tg = throtl_get_tg(td);
+ tg = throtl_lookup_create_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;
@@ -1189,6 +1186,7 @@ queue_bio:
tg->io_disp[rw], tg->iops[rw],
tg->nr_queued[READ], tg->nr_queued[WRITE]);
+ bio_associate_current(bio);
throtl_add_bio_tg(q->td, tg, bio);
throttled = true;
@@ -1199,6 +1197,8 @@ queue_bio:
out_unlock:
spin_unlock_irq(q->queue_lock);
+out_unlock_rcu:
+ rcu_read_unlock();
out:
return throttled;
}
@@ -1241,79 +1241,31 @@ void blk_throtl_drain(struct request_queue *q)
int blk_throtl_init(struct request_queue *q)
{
struct throtl_data *td;
- struct throtl_grp *tg;
+ int ret;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
- INIT_HLIST_HEAD(&td->tg_list);
td->tg_service_tree = THROTL_RB_ROOT;
td->limits_changed = false;
INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
- /* alloc and Init root group. */
+ q->td = td;
td->queue = q;
- tg = throtl_alloc_tg(td);
- if (!tg) {
+ /* activate policy */
+ ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
+ if (ret)
kfree(td);
- return -ENOMEM;
- }
-
- td->root_tg = tg;
-
- rcu_read_lock();
- throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
- rcu_read_unlock();
-
- /* Attach throtl data to request queue */
- q->td = td;
- return 0;
+ return ret;
}
void blk_throtl_exit(struct request_queue *q)
{
- struct throtl_data *td = q->td;
- bool wait = false;
-
- BUG_ON(!td);
-
+ BUG_ON(!q->td);
throtl_shutdown_wq(q);
-
- spin_lock_irq(q->queue_lock);
- throtl_release_tgs(td);
-
- /* If there are other groups */
- if (td->nr_undestroyed_grps > 0)
- wait = true;
-
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Wait for tg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other undestroyed groups out
- * there (other than root group). This can happen if cgroup deletion
- * path claimed the responsibility of cleaning up a group before
- * queue cleanup code get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
- /*
- * Just being safe to make sure after previous flush if some body did
- * update limits through cgroup and another work got queued, cancel
- * it.
- */
- throtl_shutdown_wq(q);
-}
-
-void blk_throtl_release(struct request_queue *q)
-{
+ blkcg_deactivate_policy(q, &blkcg_policy_throtl);
kfree(q->td);
}
@@ -1323,8 +1275,7 @@ static int __init throtl_init(void)
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
- blkio_policy_register(&blkio_policy_throtl);
- return 0;
+ return blkcg_policy_register(&blkcg_policy_throtl);
}
module_init(throtl_init);
diff --git a/block/blk.h b/block/blk.h
index d45be871329e..85f6ae42f7d3 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void);
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
-
/*
* Return the threshold (number of used requests) at which the queue is
@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/
void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
+ gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q);
-void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
- int node);
+int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* create_io_context - try to create task->io_context
- * @task: target task
* @gfp_mask: allocation mask
* @node: allocation node
*
- * If @task->io_context is %NULL, allocate a new io_context and install it.
- * Returns the current @task->io_context which may be %NULL if allocation
- * failed.
+ * If %current->io_context is %NULL, allocate a new io_context and install
+ * it. Returns the current %current->io_context which may be %NULL if
+ * allocation failed.
*
* Note that this function can't be called with IRQ disabled because
- * task_lock which protects @task->io_context is IRQ-unsafe.
+ * task_lock which protects %current->io_context is IRQ-unsafe.
*/
-static inline struct io_context *create_io_context(struct task_struct *task,
- gfp_t gfp_mask, int node)
+static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
{
WARN_ON_ONCE(irqs_disabled());
- if (unlikely(!task->io_context))
- create_io_context_slowpath(task, gfp_mask, node);
- return task->io_context;
+ if (unlikely(!current->io_context))
+ create_task_io_context(current, gfp_mask, node);
+ return current->io_context;
}
/*
@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
-extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
-static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c38536bd52c..673c977cc2bf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,7 +15,9 @@
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk.h"
-#include "cfq.h"
+#include "blk-cgroup.h"
+
+static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
/*
* tunables
@@ -171,8 +173,53 @@ enum wl_type_t {
SYNC_WORKLOAD = 2
};
+struct cfqg_stats {
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
+ /* number of ios merged */
+ struct blkg_rwstat merged;
+ /* total time spent on device in ns, may not be accurate w/ queueing */
+ struct blkg_rwstat service_time;
+ /* total time spent waiting in scheduler queue in ns */
+ struct blkg_rwstat wait_time;
+ /* number of IOs queued up */
+ struct blkg_rwstat queued;
+ /* total sectors transferred */
+ struct blkg_stat sectors;
+ /* total disk time and nr sectors dispatched by this group */
+ struct blkg_stat time;
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ /* time not charged to this cgroup */
+ struct blkg_stat unaccounted_time;
+ /* sum of number of ios queued across all samples */
+ struct blkg_stat avg_queue_size_sum;
+ /* count of samples taken for average */
+ struct blkg_stat avg_queue_size_samples;
+ /* how many times this group has been removed from service tree */
+ struct blkg_stat dequeue;
+ /* total time spent waiting for it to be assigned a timeslice. */
+ struct blkg_stat group_wait_time;
+ /* time spent idling for this blkcg_gq */
+ struct blkg_stat idle_time;
+ /* total time with empty current active q with other requests queued */
+ struct blkg_stat empty_time;
+ /* fields after this shouldn't be cleared on stat reset */
+ uint64_t start_group_wait_time;
+ uint64_t start_idle_time;
+ uint64_t start_empty_time;
+ uint16_t flags;
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+};
+
/* This is per cgroup per device grouping structure */
struct cfq_group {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
/* group service_tree member */
struct rb_node rb_node;
@@ -180,7 +227,7 @@ struct cfq_group {
u64 vdisktime;
unsigned int weight;
unsigned int new_weight;
- bool needs_update;
+ unsigned int dev_weight;
/* number of cfqq currently on this group */
int nr_cfqq;
@@ -206,20 +253,21 @@ struct cfq_group {
unsigned long saved_workload_slice;
enum wl_type_t saved_workload;
enum wl_prio_t saved_serving_prio;
- struct blkio_group blkg;
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- struct hlist_node cfqd_node;
- int ref;
-#endif
+
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
struct cfq_ttime ttime;
+ struct cfqg_stats stats;
};
struct cfq_io_cq {
struct io_cq icq; /* must be the first member */
struct cfq_queue *cfqq[2];
struct cfq_ttime ttime;
+ int ioprio; /* the current ioprio */
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ uint64_t blkcg_id; /* the current blkcg ID */
+#endif
};
/*
@@ -229,7 +277,7 @@ struct cfq_data {
struct request_queue *queue;
/* Root service tree for cfq_groups */
struct cfq_rb_root grp_service_tree;
- struct cfq_group root_group;
+ struct cfq_group *root_group;
/*
* The priority currently being served
@@ -303,12 +351,6 @@ struct cfq_data {
struct cfq_queue oom_cfqq;
unsigned long last_delayed_sync;
-
- /* List of cfq groups being managed on this device*/
- struct hlist_head cfqg_list;
-
- /* Number of groups which are on blkcg->blkg_list */
- unsigned int nr_blkcg_linked_grps;
};
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -371,21 +413,284 @@ CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
+static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct cfq_group, pd) : NULL;
+}
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+ return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
+static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
+{
+ return pd_to_blkg(&cfqg->pd);
+}
+
+#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+
+/* cfqg stats flags */
+enum cfqg_stats_flags {
+ CFQG_stats_waiting = 0,
+ CFQG_stats_idling,
+ CFQG_stats_empty,
+};
+
+#define CFQG_FLAG_FNS(name) \
+static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags |= (1 << CFQG_stats_##name); \
+} \
+static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
+{ \
+ stats->flags &= ~(1 << CFQG_stats_##name); \
+} \
+static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
+{ \
+ return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
+} \
+
+CFQG_FLAG_FNS(waiting)
+CFQG_FLAG_FNS(idling)
+CFQG_FLAG_FNS(empty)
+#undef CFQG_FLAG_FNS
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_waiting(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_group_wait_time))
+ blkg_stat_add(&stats->group_wait_time,
+ now - stats->start_group_wait_time);
+ cfqg_stats_clear_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_waiting(stats))
+ return;
+ if (cfqg == curr_cfqg)
+ return;
+ stats->start_group_wait_time = sched_clock();
+ cfqg_stats_mark_waiting(stats);
+}
+
+/* This should be called with the queue_lock held. */
+static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
+{
+ unsigned long long now;
+
+ if (!cfqg_stats_empty(stats))
+ return;
+
+ now = sched_clock();
+ if (time_after64(now, stats->start_empty_time))
+ blkg_stat_add(&stats->empty_time,
+ now - stats->start_empty_time);
+ cfqg_stats_clear_empty(stats);
+}
+
+static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
+{
+ blkg_stat_add(&cfqg->stats.dequeue, 1);
+}
+
+static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (blkg_rwstat_sum(&stats->queued))
+ return;
+
+ /*
+ * group is already marked empty. This can happen if cfqq got new
+ * request in parent group and moved to this group while being added
+ * to service tree. Just ignore the event and move on.
+ */
+ if (cfqg_stats_empty(stats))
+ return;
+
+ stats->start_empty_time = sched_clock();
+ cfqg_stats_mark_empty(stats);
+}
+
+static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ if (cfqg_stats_idling(stats)) {
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, stats->start_idle_time))
+ blkg_stat_add(&stats->idle_time,
+ now - stats->start_idle_time);
+ cfqg_stats_clear_idling(stats);
+ }
+}
+
+static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ BUG_ON(cfqg_stats_idling(stats));
+
+ stats->start_idle_time = sched_clock();
+ cfqg_stats_mark_idling(stats);
+}
+
+static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ blkg_stat_add(&stats->avg_queue_size_sum,
+ blkg_rwstat_sum(&stats->queued));
+ blkg_stat_add(&stats->avg_queue_size_samples, 1);
+ cfqg_stats_update_group_wait_time(stats);
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
+static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
+static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
+static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
+static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
+
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+
+static inline void cfqg_get(struct cfq_group *cfqg)
+{
+ return blkg_get(cfqg_to_blkg(cfqg));
+}
+
+static inline void cfqg_put(struct cfq_group *cfqg)
+{
+ return blkg_put(cfqg_to_blkg(cfqg));
+}
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- blkg_path(&(cfqq)->cfqg->blkg), ##args)
+ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
+ __pbuf, ##args); \
+} while (0)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
- blkg_path(&(cfqg)->blkg), ##args) \
+#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
+} while (0)
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
+ cfqg_stats_end_empty_time(&cfqg->stats);
+ cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
+}
+
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time)
+{
+ blkg_stat_add(&cfqg->stats.time, time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
+#endif
+}
+
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
+}
+
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
+{
+ blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
+}
+
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw)
+{
+ blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
+ blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
+ blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
+}
+
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw)
+{
+ struct cfqg_stats *stats = &cfqg->stats;
+ unsigned long long now = sched_clock();
+
+ if (time_after64(now, io_start_time))
+ blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
+ if (time_after64(io_start_time, start_time))
+ blkg_rwstat_add(&stats->wait_time, rw,
+ io_start_time - start_time);
+}
+
+static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
+{
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+ struct cfqg_stats *stats = &cfqg->stats;
+
+ /* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->service_bytes);
+ blkg_rwstat_reset(&stats->serviced);
+ blkg_rwstat_reset(&stats->merged);
+ blkg_rwstat_reset(&stats->service_time);
+ blkg_rwstat_reset(&stats->wait_time);
+ blkg_stat_reset(&stats->time);
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ blkg_stat_reset(&stats->unaccounted_time);
+ blkg_stat_reset(&stats->avg_queue_size_sum);
+ blkg_stat_reset(&stats->avg_queue_size_samples);
+ blkg_stat_reset(&stats->dequeue);
+ blkg_stat_reset(&stats->group_wait_time);
+ blkg_stat_reset(&stats->idle_time);
+ blkg_stat_reset(&stats->empty_time);
+#endif
+}
+
+#else /* CONFIG_CFQ_GROUP_IOSCHED */
+
+static inline void cfqg_get(struct cfq_group *cfqg) { }
+static inline void cfqg_put(struct cfq_group *cfqg) { }
-#else
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
-#endif
+
+static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
+ struct cfq_group *curr_cfqg, int rw) { }
+static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
+ unsigned long time, unsigned long unaccounted_time) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
+static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
+ uint64_t bytes, int rw) { }
+static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
+ uint64_t start_time, uint64_t io_start_time, int rw) { }
+
+#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -466,8 +771,9 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
}
static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
- struct io_context *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
+ struct cfq_io_cq *cic, struct bio *bio,
+ gfp_t gfp_mask);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
@@ -545,7 +851,7 @@ static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
{
u64 d = delta << CFQ_SERVICE_SHIFT;
- d = d * BLKIO_WEIGHT_DEFAULT;
+ d = d * CFQ_WEIGHT_DEFAULT;
do_div(d, cfqg->weight);
return d;
}
@@ -872,9 +1178,9 @@ static void
cfq_update_group_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
- if (cfqg->needs_update) {
+ if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight;
- cfqg->needs_update = false;
+ cfqg->new_weight = 0;
}
}
@@ -936,7 +1242,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfq_group_service_tree_del(st, cfqg);
cfqg->saved_workload_slice = 0;
- cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
+ cfqg_stats_update_dequeue(cfqg);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
@@ -1008,178 +1314,59 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
"sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
used_sl, cfqq->slice_dispatch, charge,
iops_mode(cfqd), cfqq->nr_sectors);
- cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
- unaccounted_sl);
- cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
+ cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
+ cfqg_stats_set_start_empty_time(cfqg);
}
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
-{
- if (blkg)
- return container_of(blkg, struct cfq_group, blkg);
- return NULL;
-}
-
-static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
- unsigned int weight)
-{
- struct cfq_group *cfqg = cfqg_of_blkg(blkg);
- cfqg->new_weight = weight;
- cfqg->needs_update = true;
-}
-
-static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
- struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
-{
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
-
- /*
- * Add group onto cgroup list. It might happen that bdi->dev is
- * not initialized yet. Initialize this new group without major
- * and minor info and this info will be filled in once a new thread
- * comes for IO.
- */
- if (bdi->dev) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, MKDEV(major, minor));
- } else
- cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
- (void *)cfqd, 0);
-
- cfqd->nr_blkcg_linked_grps++;
- cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
-
- /* Add group on cfqd list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
-}
-
-/*
- * Should be called from sleepable context. No request queue lock as per
- * cpu stats are allocated dynamically and alloc_percpu needs to be called
- * from sleepable context.
+/**
+ * cfq_init_cfqg_base - initialize base part of a cfq_group
+ * @cfqg: cfq_group to initialize
+ *
+ * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
+ * is enabled or not.
*/
-static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
+static void cfq_init_cfqg_base(struct cfq_group *cfqg)
{
- struct cfq_group *cfqg = NULL;
- int i, j, ret;
struct cfq_rb_root *st;
-
- cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
- if (!cfqg)
- return NULL;
+ int i, j;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
cfqg->ttime.last_end_request = jiffies;
-
- /*
- * Take the initial reference that will be released on destroy
- * This can be thought of a joint reference by cgroup and
- * elevator which will be dropped by either elevator exit
- * or cgroup deletion path depending on who is exiting first.
- */
- cfqg->ref = 1;
-
- ret = blkio_alloc_blkg_stats(&cfqg->blkg);
- if (ret) {
- kfree(cfqg);
- return NULL;
- }
-
- return cfqg;
}
-static struct cfq_group *
-cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfq_pd_init(struct blkcg_gq *blkg)
{
- struct cfq_group *cfqg = NULL;
- void *key = cfqd;
- struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
- unsigned int major, minor;
-
- /*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
- */
- if (blkcg == &blkio_root_cgroup)
- cfqg = &cfqd->root_group;
- else
- cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
-
- if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
- sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
- cfqg->blkg.dev = MKDEV(major, minor);
- }
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
- return cfqg;
+ cfq_init_cfqg_base(cfqg);
+ cfqg->weight = blkg->blkcg->cfq_weight;
}
/*
* Search for the cfq group current task belongs to. request_queue lock must
* be held.
*/
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
{
- struct blkio_cgroup *blkcg;
- struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue;
+ struct cfq_group *cfqg = NULL;
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
- cfqg = cfq_find_cfqg(cfqd, blkcg);
- if (cfqg) {
- rcu_read_unlock();
- return cfqg;
- }
-
- /*
- * Need to allocate a group. Allocation of group also needs allocation
- * of per cpu stats which in-turn takes a mutex() and can block. Hence
- * we need to drop rcu lock and queue_lock before we call alloc.
- *
- * Not taking any queue reference here and assuming that queue is
- * around by the time we return. CFQ queue allocation code does
- * the same. It might be racy though.
- */
-
- rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
-
- cfqg = cfq_alloc_cfqg(cfqd);
-
- spin_lock_irq(q->queue_lock);
-
- rcu_read_lock();
- blkcg = task_blkio_cgroup(current);
-
- /*
- * If some other thread already allocated the group while we were
- * not holding queue lock, free up the group
- */
- __cfqg = cfq_find_cfqg(cfqd, blkcg);
+ /* avoid lookup for the common case where there's no blkcg */
+ if (blkcg == &blkcg_root) {
+ cfqg = cfqd->root_group;
+ } else {
+ struct blkcg_gq *blkg;
- if (__cfqg) {
- kfree(cfqg);
- rcu_read_unlock();
- return __cfqg;
+ blkg = blkg_lookup_create(blkcg, q);
+ if (!IS_ERR(blkg))
+ cfqg = blkg_to_cfqg(blkg);
}
- if (!cfqg)
- cfqg = &cfqd->root_group;
-
- cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
- rcu_read_unlock();
- return cfqg;
-}
-
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
-{
- cfqg->ref++;
return cfqg;
}
@@ -1187,94 +1374,224 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
/* Currently, all async queues are mapped to root group */
if (!cfq_cfqq_sync(cfqq))
- cfqg = &cfqq->cfqd->root_group;
+ cfqg = cfqq->cfqd->root_group;
cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
- cfqq->cfqg->ref++;
+ cfqg_get(cfqg);
}
-static void cfq_put_cfqg(struct cfq_group *cfqg)
+static u64 cfqg_prfill_weight_device(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct cfq_rb_root *st;
- int i, j;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
- BUG_ON(cfqg->ref <= 0);
- cfqg->ref--;
- if (cfqg->ref)
- return;
- for_each_cfqg_st(cfqg, i, j, st)
- BUG_ON(!RB_EMPTY_ROOT(&st->rb));
- free_percpu(cfqg->blkg.stats_cpu);
- kfree(cfqg);
+ if (!cfqg->dev_weight)
+ return 0;
+ return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
+static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- /* Something wrong if we are trying to remove same group twice */
- BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
+ cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
+ false);
+ return 0;
+}
- hlist_del_init(&cfqg->cfqd_node);
+static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+ return 0;
+}
- BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
- cfqd->nr_blkcg_linked_grps--;
+static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
+ const char *buf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkg_conf_ctx ctx;
+ struct cfq_group *cfqg;
+ int ret;
- /*
- * Put the reference taken at the time of creation so that when all
- * queues are gone, group can be destroyed.
- */
- cfq_put_cfqg(cfqg);
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+ cfqg = blkg_to_cfqg(ctx.blkg);
+ if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
+ cfqg->dev_weight = ctx.v;
+ cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
+ ret = 0;
+ }
+
+ blkg_conf_finish(&ctx);
+ return ret;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd)
+static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
- struct hlist_node *pos, *n;
- struct cfq_group *cfqg;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg_gq *blkg;
+ struct hlist_node *n;
- hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
- /*
- * If cgroup removal path got to blk_group first and removed
- * it from cgroup list, then it will take care of destroying
- * cfqg also.
- */
- if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
- cfq_destroy_cfqg(cfqd, cfqg);
+ if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
+ return -EINVAL;
+
+ spin_lock_irq(&blkcg->lock);
+ blkcg->cfq_weight = (unsigned int)val;
+
+ hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ if (cfqg && !cfqg->dev_weight)
+ cfqg->new_weight = blkcg->cfq_weight;
}
+
+ spin_unlock_irq(&blkcg->lock);
+ return 0;
}
-/*
- * Blk cgroup controller notification saying that blkio_group object is being
- * delinked as associated cgroup object is going away. That also means that
- * no new IO will come in this group. So get rid of this group as soon as
- * any pending IO in the group is finished.
- *
- * This function is called under rcu_read_lock(). key is the rcu protected
- * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
- * read lock.
- *
- * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
- * it should not be NULL as even if elevator was exiting, cgroup deltion
- * path got to it first.
- */
-static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
+static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- unsigned long flags;
- struct cfq_data *cfqd = key;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
- cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
+ cft->private, false);
+ return 0;
}
-#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
+static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
{
- return &cfqd->root_group;
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
+ cft->private, true);
+ return 0;
}
-static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- return cfqg;
+ struct cfq_group *cfqg = pd_to_cfqg(pd);
+ u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
+ u64 v = 0;
+
+ if (samples) {
+ v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
+ do_div(v, samples);
+ }
+ __blkg_prfill_u64(sf, pd, v);
+ return 0;
+}
+
+/* print avg_queue_size */
+static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
+ &blkcg_policy_cfq, 0, false);
+ return 0;
+}
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+
+static struct cftype cfq_blkcg_files[] = {
+ {
+ .name = "weight_device",
+ .read_seq_string = cfqg_print_weight_device,
+ .write_string = cfqg_set_weight_device,
+ .max_write_len = 256,
+ },
+ {
+ .name = "weight",
+ .read_seq_string = cfq_print_weight,
+ .write_u64 = cfq_set_weight,
+ },
+ {
+ .name = "time",
+ .private = offsetof(struct cfq_group, stats.time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "sectors",
+ .private = offsetof(struct cfq_group, stats.sectors),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "io_service_bytes",
+ .private = offsetof(struct cfq_group, stats.service_bytes),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_serviced",
+ .private = offsetof(struct cfq_group, stats.serviced),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_service_time",
+ .private = offsetof(struct cfq_group, stats.service_time),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_wait_time",
+ .private = offsetof(struct cfq_group, stats.wait_time),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_merged",
+ .private = offsetof(struct cfq_group, stats.merged),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+ {
+ .name = "io_queued",
+ .private = offsetof(struct cfq_group, stats.queued),
+ .read_seq_string = cfqg_print_rwstat,
+ },
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+ {
+ .name = "avg_queue_size",
+ .read_seq_string = cfqg_print_avg_queue_size,
+ },
+ {
+ .name = "group_wait_time",
+ .private = offsetof(struct cfq_group, stats.group_wait_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "idle_time",
+ .private = offsetof(struct cfq_group, stats.idle_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "empty_time",
+ .private = offsetof(struct cfq_group, stats.empty_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "dequeue",
+ .private = offsetof(struct cfq_group, stats.dequeue),
+ .read_seq_string = cfqg_print_stat,
+ },
+ {
+ .name = "unaccounted_time",
+ .private = offsetof(struct cfq_group, stats.unaccounted_time),
+ .read_seq_string = cfqg_print_stat,
+ },
+#endif /* CONFIG_DEBUG_BLK_CGROUP */
+ { } /* terminate */
+};
+#else /* GROUP_IOSCHED */
+static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
+ struct blkcg *blkcg)
+{
+ return cfqd->root_group;
}
static inline void
@@ -1282,9 +1599,6 @@ cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
}
-static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
-static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
-
#endif /* GROUP_IOSCHED */
/*
@@ -1551,12 +1865,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
+ rq->cmd_flags);
}
static struct request *
@@ -1612,8 +1924,7 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
- cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
@@ -1648,8 +1959,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
- bio_data_dir(bio), cfq_bio_sync(bio));
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
}
static void
@@ -1671,8 +1981,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
- cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
- rq_data_dir(next), rq_is_sync(next));
+ cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
cfqq = RQ_CFQQ(next);
/*
@@ -1713,7 +2022,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
del_timer(&cfqd->idle_slice_timer);
- cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -1722,7 +2031,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
- cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_update_avg_queue_size(cfqq->cfqg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@@ -2043,7 +2352,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* task has exited, don't wait
*/
cic = cfqd->active_cic;
- if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
+ if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
return;
/*
@@ -2070,7 +2379,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
- cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
+ cfqg_stats_set_start_idle_time(cfqq->cfqg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
group_idle ? 1 : 0);
}
@@ -2093,8 +2402,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
- cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
}
/*
@@ -2677,7 +2985,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
kmem_cache_free(cfq_pool, cfqq);
- cfq_put_cfqg(cfqg);
+ cfqg_put(cfqg);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -2736,7 +3044,7 @@ static void cfq_exit_icq(struct io_cq *icq)
}
}
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
{
struct task_struct *tsk = current;
int ioprio_class;
@@ -2744,7 +3052,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
if (!cfq_cfqq_prio_changed(cfqq))
return;
- ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -2756,11 +3064,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfqq->ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
- cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
cfqq->ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
@@ -2778,19 +3086,24 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
cfq_clear_cfqq_prio_changed(cfqq);
}
-static void changed_ioprio(struct cfq_io_cq *cic)
+static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
{
+ int ioprio = cic->icq.ioc->ioprio;
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
- if (unlikely(!cfqd))
+ /*
+ * Check whether ioprio has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
return;
cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) {
struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
- GFP_ATOMIC);
+ new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
+ GFP_ATOMIC);
if (new_cfqq) {
cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
cfq_put_queue(cfqq);
@@ -2800,6 +3113,8 @@ static void changed_ioprio(struct cfq_io_cq *cic)
cfqq = cic->cfqq[BLK_RW_SYNC];
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
+
+ cic->ioprio = ioprio;
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
@@ -2823,17 +3138,24 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void changed_cgroup(struct cfq_io_cq *cic)
+static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
{
- struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct request_queue *q;
+ struct cfq_queue *sync_cfqq;
+ uint64_t id;
- if (unlikely(!cfqd))
- return;
+ rcu_read_lock();
+ id = bio_blkcg(bio)->id;
+ rcu_read_unlock();
- q = cfqd->queue;
+ /*
+ * Check whether blkcg has changed. The condition may trigger
+ * spuriously on a newly created cic but there's no harm.
+ */
+ if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
+ return;
+ sync_cfqq = cic_to_cfqq(cic, 1);
if (sync_cfqq) {
/*
* Drop reference to sync queue. A new sync queue will be
@@ -2843,21 +3165,26 @@ static void changed_cgroup(struct cfq_io_cq *cic)
cic_set_cfqq(cic, NULL, 1);
cfq_put_queue(sync_cfqq);
}
+
+ cic->blkcg_id = id;
}
+#else
+static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
- struct io_context *ioc, gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
+ struct blkcg *blkcg;
struct cfq_queue *cfqq, *new_cfqq = NULL;
- struct cfq_io_cq *cic;
struct cfq_group *cfqg;
retry:
- cfqg = cfq_get_cfqg(cfqd);
- cic = cfq_cic_lookup(cfqd, ioc);
- /* cic always exists here */
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+ cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
cfqq = cic_to_cfqq(cic, is_sync);
/*
@@ -2870,6 +3197,7 @@ retry:
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
+ rcu_read_unlock();
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
@@ -2885,7 +3213,7 @@ retry:
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, ioc);
+ cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
} else
@@ -2895,6 +3223,7 @@ retry:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
+ rcu_read_unlock();
return cfqq;
}
@@ -2904,6 +3233,9 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_NONE:
+ ioprio = IOPRIO_NORM;
+ /* fall through */
case IOPRIO_CLASS_BE:
return &cfqd->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
@@ -2914,11 +3246,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
- gfp_t gfp_mask)
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio = task_ioprio(ioc);
- const int ioprio_class = task_ioprio_class(ioc);
+ const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
@@ -2928,7 +3260,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
}
if (!cfqq)
- cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -3010,7 +3342,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
enable_idle = 0;
- else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
+ else if (!atomic_read(&cic->icq.ioc->active_ref) ||
!cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
@@ -3174,8 +3506,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else {
- cfq_blkiocg_update_idle_time_stats(
- &cfqq->cfqg->blkg);
+ cfqg_stats_update_idle_time(cfqq->cfqg);
cfq_mark_cfqq_must_dispatch(cfqq);
}
}
@@ -3197,14 +3528,13 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request");
- cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
+ cfq_init_prio_data(cfqq, RQ_CIC(rq));
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
- cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
- &cfqd->serving_group->blkg, rq_data_dir(rq),
- rq_is_sync(rq));
+ cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
+ rq->cmd_flags);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -3300,9 +3630,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfqd->rq_in_driver--;
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
- cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
- rq_start_time_ns(rq), rq_io_start_time_ns(rq),
- rq_data_dir(rq), rq_is_sync(rq));
+ cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
+ rq_io_start_time_ns(rq), rq->cmd_flags);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@@ -3399,7 +3728,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
if (cfqq) {
- cfq_init_prio_data(cfqq, cic->icq.ioc);
+ cfq_init_prio_data(cfqq, cic);
return __cfq_may_queue(cfqq);
}
@@ -3421,7 +3750,7 @@ static void cfq_put_request(struct request *rq)
cfqq->allocated[rw]--;
/* Put down rq reference on cfqg */
- cfq_put_cfqg(RQ_CFQG(rq));
+ cfqg_put(RQ_CFQG(rq));
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
@@ -3465,32 +3794,25 @@ split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
+ gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
- unsigned int changed;
might_sleep_if(gfp_mask & __GFP_WAIT);
spin_lock_irq(q->queue_lock);
- /* handle changed notifications */
- changed = icq_get_changed(&cic->icq);
- if (unlikely(changed & ICQ_IOPRIO_CHANGED))
- changed_ioprio(cic);
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- if (unlikely(changed & ICQ_CGROUP_CHANGED))
- changed_cgroup(cic);
-#endif
-
+ check_ioprio_changed(cic, bio);
+ check_blkcg_changed(cic, bio);
new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
+ cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
@@ -3516,8 +3838,9 @@ new_queue:
cfqq->allocated[rw]++;
cfqq->ref++;
+ cfqg_get(cfqq->cfqg);
rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
+ rq->elv.priv[1] = cfqq->cfqg;
spin_unlock_irq(q->queue_lock);
return 0;
}
@@ -3614,7 +3937,6 @@ static void cfq_exit_queue(struct elevator_queue *e)
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
- bool wait = false;
cfq_shutdown_timer_wq(cfqd);
@@ -3624,89 +3946,52 @@ static void cfq_exit_queue(struct elevator_queue *e)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
cfq_put_async_queues(cfqd);
- cfq_release_cfq_groups(cfqd);
-
- /*
- * If there are groups which we could not unlink from blkcg list,
- * wait for a rcu period for them to be freed.
- */
- if (cfqd->nr_blkcg_linked_grps)
- wait = true;
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
- /*
- * Wait for cfqg->blkg->key accessors to exit their grace periods.
- * Do this wait only if there are other unlinked groups out
- * there. This can happen if cgroup deletion path claimed the
- * responsibility of cleaning up a group before queue cleanup code
- * get to the group.
- *
- * Do not call synchronize_rcu() unconditionally as there are drivers
- * which create/delete request queue hundreds of times during scan/boot
- * and synchronize_rcu() can take significant time and slow down boot.
- */
- if (wait)
- synchronize_rcu();
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* Free up per cpu stats for root group */
- free_percpu(cfqd->root_group.blkg.stats_cpu);
+#ifndef CONFIG_CFQ_GROUP_IOSCHED
+ kfree(cfqd->root_group);
#endif
+ blkcg_deactivate_policy(q, &blkcg_policy_cfq);
kfree(cfqd);
}
-static void *cfq_init_queue(struct request_queue *q)
+static int cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
- int i, j;
- struct cfq_group *cfqg;
- struct cfq_rb_root *st;
+ struct blkcg_gq *blkg __maybe_unused;
+ int i, ret;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
- return NULL;
+ return -ENOMEM;
+
+ cfqd->queue = q;
+ q->elevator->elevator_data = cfqd;
/* Init root service tree */
cfqd->grp_service_tree = CFQ_RB_ROOT;
- /* Init root group */
- cfqg = &cfqd->root_group;
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
-
- /* Give preference to root group over other groups */
- cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
-
+ /* Init root group and prefer root group over other groups by default */
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /*
- * Set root group reference to 2. One reference will be dropped when
- * all groups on cfqd->cfqg_list are being deleted during queue exit.
- * Other reference will remain there as we don't want to delete this
- * group as it is statically allocated and gets destroyed when
- * throtl_data goes away.
- */
- cfqg->ref = 2;
-
- if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
- kfree(cfqg);
- kfree(cfqd);
- return NULL;
- }
-
- rcu_read_lock();
+ ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
+ if (ret)
+ goto out_free;
- cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
- (void *)cfqd, 0);
- rcu_read_unlock();
- cfqd->nr_blkcg_linked_grps++;
+ cfqd->root_group = blkg_to_cfqg(q->root_blkg);
+#else
+ ret = -ENOMEM;
+ cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
+ GFP_KERNEL, cfqd->queue->node);
+ if (!cfqd->root_group)
+ goto out_free;
- /* Add group on cfqd->cfqg_list */
- hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
+ cfq_init_cfqg_base(cfqd->root_group);
#endif
+ cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
+
/*
* Not strictly needed (since RB_ROOT just clears the node and we
* zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3718,13 +4003,17 @@ static void *cfq_init_queue(struct request_queue *q)
/*
* Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
- * will not attempt to free it.
+ * will not attempt to free it. oom_cfqq is linked to root_group
+ * but shouldn't hold a reference as it'll never be unlinked. Lose
+ * the reference from linking right away.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
- cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
- cfqd->queue = q;
+ spin_lock_irq(q->queue_lock);
+ cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
+ cfqg_put(cfqd->root_group);
+ spin_unlock_irq(q->queue_lock);
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
@@ -3750,7 +4039,11 @@ static void *cfq_init_queue(struct request_queue *q)
* second, in order to have larger depth for async operations.
*/
cfqd->last_delayed_sync = jiffies - HZ;
- return cfqd;
+ return 0;
+
+out_free:
+ kfree(cfqd);
+ return ret;
}
/*
@@ -3877,15 +4170,13 @@ static struct elevator_type iosched_cfq = {
};
#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkio_policy_type blkio_policy_cfq = {
- .ops = {
- .blkio_unlink_group_fn = cfq_unlink_blkio_group,
- .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
- },
- .plid = BLKIO_POLICY_PROP,
+static struct blkcg_policy blkcg_policy_cfq = {
+ .pd_size = sizeof(struct cfq_group),
+ .cftypes = cfq_blkcg_files,
+
+ .pd_init_fn = cfq_pd_init,
+ .pd_reset_stats_fn = cfq_pd_reset_stats,
};
-#else
-static struct blkio_policy_type blkio_policy_cfq;
#endif
static int __init cfq_init(void)
@@ -3906,24 +4197,31 @@ static int __init cfq_init(void)
#else
cfq_group_idle = 0;
#endif
+
+ ret = blkcg_policy_register(&blkcg_policy_cfq);
+ if (ret)
+ return ret;
+
cfq_pool = KMEM_CACHE(cfq_queue, 0);
if (!cfq_pool)
- return -ENOMEM;
+ goto err_pol_unreg;
ret = elv_register(&iosched_cfq);
- if (ret) {
- kmem_cache_destroy(cfq_pool);
- return ret;
- }
-
- blkio_policy_register(&blkio_policy_cfq);
+ if (ret)
+ goto err_free_pool;
return 0;
+
+err_free_pool:
+ kmem_cache_destroy(cfq_pool);
+err_pol_unreg:
+ blkcg_policy_unregister(&blkcg_policy_cfq);
+ return ret;
}
static void __exit cfq_exit(void)
{
- blkio_policy_unregister(&blkio_policy_cfq);
+ blkcg_policy_unregister(&blkcg_policy_cfq);
elv_unregister(&iosched_cfq);
kmem_cache_destroy(cfq_pool);
}
diff --git a/block/cfq.h b/block/cfq.h
deleted file mode 100644
index 2a155927e37c..000000000000
--- a/block/cfq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _CFQ_H
-#define _CFQ_H
-#include "blk-cgroup.h"
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync)
-{
- blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue)
-{
- blkiocg_update_dequeue_stats(blkg, dequeue);
-}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time)
-{
- blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
-}
-
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
-{
- blkiocg_set_start_empty_time(blkg);
-}
-
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_remove_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync)
-{
- blkiocg_update_io_merged_stats(blkg, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_idle_time_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
-{
- blkiocg_update_avg_queue_size_stats(blkg);
-}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
-{
- blkiocg_update_set_idle_time_stats(blkg);
-}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync)
-{
- blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
-}
-
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
-{
- blkiocg_update_completion_stats(blkg, start_time, io_start_time,
- direction, sync);
-}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {
- blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
-}
-
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return blkiocg_del_blkio_group(blkg);
-}
-
-#else /* CFQ_GROUP_IOSCHED */
-static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
- struct blkio_group *curr_blkg, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
- unsigned long dequeue) {}
-
-static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
- unsigned long time, unsigned long unaccounted_time) {}
-static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
-static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
- bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
-{
-}
-static inline void
-cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
-
-static inline void
-cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
-
-static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
- uint64_t bytes, bool direction, bool sync) {}
-static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
-
-static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
- struct blkio_group *blkg, void *key, dev_t dev) {}
-static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
-{
- return 0;
-}
-
-#endif /* CFQ_GROUP_IOSCHED */
-#endif
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 7bf12d793fcd..599b12e5380f 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(struct request_queue *q)
+static int deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
- return NULL;
+ return -ENOMEM;
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- return dd;
+
+ q->elevator->elevator_data = dd;
+ return 0;
}
/*
diff --git a/block/elevator.c b/block/elevator.c
index f016855a46b0..6a55d418896f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -38,6 +38,7 @@
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-cgroup.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -121,15 +122,6 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static int elevator_init_queue(struct request_queue *q,
- struct elevator_queue *eq)
-{
- eq->elevator_data = eq->type->ops.elevator_init_fn(q);
- if (eq->elevator_data)
- return 0;
- return -ENOMEM;
-}
-
static char chosen_elevator[ELV_NAME_MAX];
static int __init elevator_setup(char *str)
@@ -188,7 +180,6 @@ static void elevator_release(struct kobject *kobj)
int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
- struct elevator_queue *eq;
int err;
if (unlikely(q->elevator))
@@ -222,17 +213,16 @@ int elevator_init(struct request_queue *q, char *name)
}
}
- eq = elevator_alloc(q, e);
- if (!eq)
+ q->elevator = elevator_alloc(q, e);
+ if (!q->elevator)
return -ENOMEM;
- err = elevator_init_queue(q, eq);
+ err = e->ops.elevator_init_fn(q);
if (err) {
- kobject_put(&eq->kobj);
+ kobject_put(&q->elevator->kobj);
return err;
}
- q->elevator = eq;
return 0;
}
EXPORT_SYMBOL(elevator_init);
@@ -564,25 +554,6 @@ void elv_drain_elevator(struct request_queue *q)
}
}
-void elv_quiesce_start(struct request_queue *q)
-{
- if (!q->elevator)
- return;
-
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-}
-
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
@@ -692,12 +663,13 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
return NULL;
}
-int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask)
{
struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_set_req_fn)
- return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
+ return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
return 0;
}
@@ -801,8 +773,9 @@ static struct kobj_type elv_ktype = {
.release = elevator_release,
};
-int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
+int elv_register_queue(struct request_queue *q)
{
+ struct elevator_queue *e = q->elevator;
int error;
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -820,11 +793,6 @@ int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
}
return error;
}
-
-int elv_register_queue(struct request_queue *q)
-{
- return __elv_register_queue(q, q->elevator);
-}
EXPORT_SYMBOL(elv_register_queue);
void elv_unregister_queue(struct request_queue *q)
@@ -907,53 +875,60 @@ EXPORT_SYMBOL_GPL(elv_unregister);
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- struct elevator_queue *old_elevator, *e;
+ struct elevator_queue *old = q->elevator;
+ bool registered = old->registered;
int err;
- /* allocate new elevator */
- e = elevator_alloc(q, new_e);
- if (!e)
- return -ENOMEM;
+ /*
+ * Turn on BYPASS and drain all requests w/ elevator private data.
+ * Block layer doesn't call into a quiesced elevator - all requests
+ * are directly put on the dispatch list without elevator data
+ * using INSERT_BACK. All requests have SOFTBARRIER set and no
+ * merge happens either.
+ */
+ blk_queue_bypass_start(q);
+
+ /* unregister and clear all auxiliary data of the old elevator */
+ if (registered)
+ elv_unregister_queue(q);
+
+ spin_lock_irq(q->queue_lock);
+ ioc_clear_queue(q);
+ spin_unlock_irq(q->queue_lock);
- err = elevator_init_queue(q, e);
+ /* allocate, init and register new elevator */
+ err = -ENOMEM;
+ q->elevator = elevator_alloc(q, new_e);
+ if (!q->elevator)
+ goto fail_init;
+
+ err = new_e->ops.elevator_init_fn(q);
if (err) {
- kobject_put(&e->kobj);
- return err;
+ kobject_put(&q->elevator->kobj);
+ goto fail_init;
}
- /* turn on BYPASS and drain all requests w/ elevator private data */
- elv_quiesce_start(q);
-
- /* unregister old queue, register new one and kill old elevator */
- if (q->elevator->registered) {
- elv_unregister_queue(q);
- err = __elv_register_queue(q, e);
+ if (registered) {
+ err = elv_register_queue(q);
if (err)
goto fail_register;
}
- /* done, clear io_cq's, switch elevators and turn off BYPASS */
- spin_lock_irq(q->queue_lock);
- ioc_clear_queue(q);
- old_elevator = q->elevator;
- q->elevator = e;
- spin_unlock_irq(q->queue_lock);
-
- elevator_exit(old_elevator);
- elv_quiesce_end(q);
+ /* done, kill the old one and finish */
+ elevator_exit(old);
+ blk_queue_bypass_end(q);
- blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
return 0;
fail_register:
- /*
- * switch failed, exit the new io scheduler and reattach the old
- * one again (along with re-adding the sysfs dir)
- */
- elevator_exit(e);
+ elevator_exit(q->elevator);
+fail_init:
+ /* switch failed, restore and re-register old elevator */
+ q->elevator = old;
elv_register_queue(q);
- elv_quiesce_end(q);
+ blk_queue_bypass_end(q);
return err;
}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 413a0b1d788c..5d1bf70e33d5 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(struct request_queue *q)
+static int noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
if (!nd)
- return NULL;
+ return -ENOMEM;
+
INIT_LIST_HEAD(&nd->queue);
- return nd;
+ q->elevator->elevator_data = nd;
+ return 0;
}
static void noop_exit_queue(struct elevator_queue *e)