summaryrefslogtreecommitdiffstats
path: root/block/blk-iolatency.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-11-14 17:02:09 +0100
committerJens Axboe <axboe@kernel.dk>2018-11-15 20:13:22 +0100
commitd53375608ebf13c37721cf30677eba4333d18020 (patch)
treea2aa7530ab7067dd10907383fa4fd62ea99d19ee /block/blk-iolatency.c
parentblock: remove queue_lockdep_assert_held (diff)
downloadlinux-d53375608ebf13c37721cf30677eba4333d18020.tar.xz
linux-d53375608ebf13c37721cf30677eba4333d18020.zip
block: remove the unused lock argument to rq_qos_throttle
Unused now that the legacy request path is gone. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-iolatency.c')
-rw-r--r--block/blk-iolatency.c24
1 files changed, 6 insertions, 18 deletions
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 38c35c32aff2..8edf1b353ad1 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -276,10 +276,8 @@ static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
struct iolatency_grp *iolat,
- spinlock_t *lock, bool issue_as_root,
+ bool issue_as_root,
bool use_memdelay)
- __releases(lock)
- __acquires(lock)
{
struct rq_wait *rqw = &iolat->rq_wait;
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
@@ -311,14 +309,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
if (iolatency_may_queue(iolat, &wait, first_block))
break;
first_block = false;
-
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else {
- io_schedule();
- }
+ io_schedule();
} while (1);
finish_wait(&rqw->wait, &wait);
@@ -478,8 +469,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
scale_change(iolat, direction > 0);
}
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
- spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
struct blkcg *blkcg;
@@ -495,13 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
bio_associate_blkcg(bio, &blkcg->css);
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
}
if (!blkg)
goto out;
@@ -518,7 +506,7 @@ out:
}
check_scale_change(iolat);
- __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+ __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
blkg = blkg->parent;
}