summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2023-04-13 02:06:46 +0200
committerJens Axboe <axboe@kernel.dk>2023-04-13 14:46:48 +0200
commit83462a6c971cdc550475b672cf29bd3b53bedf84 (patch)
tree4233724720c205a3798d7642ae8d22d12900bc8a /block
parents390/dasd: fix hanging blockdevice after request requeue (diff)
downloadlinux-83462a6c971cdc550475b672cf29bd3b53bedf84.tar.xz
linux-83462a6c971cdc550475b672cf29bd3b53bedf84.zip
blkcg: Drop unnecessary RCU read [un]locks from blkg_conf_prep/finish()
Now that all RCU flavors have been combined either holding a spin lock, disabling irq or disabling preemption implies RCU read lock, so there's no need to use rcu_read_[un]lock() explicitly while holding queue_lock. This shouldn't cause any behavior changes. v2: Description updated. Leave __acquires/release on queue_lock alone. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20230413000649.115785-2-tj@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 18331cb92914..0a2c19d74d95 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -699,12 +699,12 @@ struct block_device *blkcg_conf_open_bdev(char **inputp)
*
* Parse per-blkg config update from @input and initialize @ctx with the
* result. @ctx->blkg points to the blkg to be updated and @ctx->body the
- * part of @input following MAJ:MIN. This function returns with RCU read
- * lock and queue lock held and must be paired with blkg_conf_finish().
+ * part of @input following MAJ:MIN. This function returns with queue lock
+ * held and must be paired with blkg_conf_finish().
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
- __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
+ __acquires(&bdev->bd_queue->queue_lock)
{
struct block_device *bdev;
struct gendisk *disk;
@@ -726,7 +726,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
if (ret)
goto fail;
- rcu_read_lock();
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
@@ -755,7 +754,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
spin_unlock_irq(&q->queue_lock);
- rcu_read_unlock();
new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
if (unlikely(!new_blkg)) {
@@ -769,7 +767,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail_exit_queue;
}
- rcu_read_lock();
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
@@ -805,7 +802,6 @@ fail_preloaded:
radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(&q->queue_lock);
- rcu_read_unlock();
fail_exit_queue:
blk_queue_exit(q);
fail:
@@ -832,10 +828,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
+ __releases(&ctx->bdev->bd_queue->queue_lock)
{
spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
- rcu_read_unlock();
blkdev_put_no_open(ctx->bdev);
}
EXPORT_SYMBOL_GPL(blkg_conf_finish);