diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 22:15:02 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:22 +0100 |
commit | 0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a (patch) | |
tree | 271f62b5f75c239831c7def1c445a6e990366730 /block/cfq-iosched.c | |
parent | blkcg: move rcu_read_lock() outside of blkio_group get functions (diff) | |
download | linux-0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a.tar.xz linux-0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a.zip |
blkcg: update blkg get functions take blkio_cgroup as parameter
In both blkg get functions - throtl_get_tg() and cfq_get_cfqg(),
instead of obtaining blkcg of %current explicitly, let the caller
specify the blkcg to use as parameter and make both functions hold on
to the blkcg.
This is part of block cgroup interface cleanup and will help making
blkcg API more modular.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6063c4482b86..0f7a81fc7c73 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1122,17 +1122,19 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg) * Search for the cfq group current task belongs to. request_queue lock must * be held. */ -static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) +static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, + struct blkio_cgroup *blkcg) { - struct blkio_cgroup *blkcg; struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct request_queue *q = cfqd->queue; - blkcg = task_blkio_cgroup(current); cfqg = cfq_find_cfqg(cfqd, blkcg); if (cfqg) return cfqg; + if (!css_tryget(&blkcg->css)) + return NULL; + /* * Need to allocate a group. Allocation of group also needs allocation * of per cpu stats which in-turn takes a mutex() and can block. Hence @@ -1142,16 +1144,14 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) * around by the time we return. CFQ queue allocation code does * the same. It might be racy though. */ - rcu_read_unlock(); spin_unlock_irq(q->queue_lock); cfqg = cfq_alloc_cfqg(cfqd); spin_lock_irq(q->queue_lock); - rcu_read_lock(); - blkcg = task_blkio_cgroup(current); + css_put(&blkcg->css); /* * If some other thread already allocated the group while we were @@ -1278,7 +1278,8 @@ static bool cfq_clear_queue(struct request_queue *q) } #else /* GROUP_IOSCHED */ -static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) +static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, + struct blkio_cgroup *blkcg) { return &cfqd->root_group; } @@ -2860,6 +2861,7 @@ static struct cfq_queue * cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { + struct blkio_cgroup *blkcg; struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_io_cq *cic; struct cfq_group *cfqg; @@ -2867,7 +2869,9 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, retry: rcu_read_lock(); - cfqg = cfq_get_cfqg(cfqd); + blkcg = task_blkio_cgroup(current); + + cfqg = cfq_get_cfqg(cfqd, blkcg); cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); |