summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-05-11 13:48:18 +0200
committerMike Snitzer <snitzer@redhat.com>2017-05-15 03:54:33 +0200
commit6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc (patch)
treef9efea7081881af53eabb2f36d7aeafbfd44f71d
parentdm cache policy smq: put newly promoted entries at the top of the multiqueue (diff)
downloadlinux-6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc.tar.xz
linux-6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc.zip
dm cache policy smq: stop preemptively demoting blocks
It causes a lot of churn if the working set's size is close to the fast device's size. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-smq.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 54421a846a0c..758480a1893d 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
percent_to_target(mq, CLEAN_TARGET);
}
-static bool free_target_met(struct smq_policy *mq, bool idle)
+static bool free_target_met(struct smq_policy *mq)
{
unsigned nr_free;
- if (!idle)
- return true;
-
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
percent_to_target(mq, FREE_TARGET);
@@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
* We always claim to be 'idle' to ensure some demotions happen
* with continuous loads.
*/
- if (!free_target_met(mq, true))
+ if (!free_target_met(mq))
queue_demotion(mq);
return;
}
@@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
spin_lock_irqsave(&mq->lock, flags);
r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) {
- /* find some writeback work to do */
- if (mq->migrations_allowed && !free_target_met(mq, idle))
- queue_demotion(mq);
-
- else if (!clean_target_met(mq, idle))
+ if (!clean_target_met(mq, idle)) {
queue_writeback(mq);
-
- r = btracker_issue(mq->bg_work, result);
+ r = btracker_issue(mq->bg_work, result);
+ }
}
spin_unlock_irqrestore(&mq->lock, flags);