summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorKemeng Shi <shikemeng@huaweicloud.com>2024-05-14 14:52:50 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-07-04 04:29:54 +0200
commit9bb48a70386326acf22e4523d1f72c912f5854fe (patch)
tree7ecfd9992e25fc045279e9f5354dc23ee2bbe98b /mm/page-writeback.c
parentwriteback: factor out domain_over_bg_thresh to remove repeated code (diff)
downloadlinux-9bb48a70386326acf22e4523d1f72c912f5854fe.tar.xz
linux-9bb48a70386326acf22e4523d1f72c912f5854fe.zip
writeback: factor out code of freerun to remove repeated code
Factor out code of freerun into new helper functions domain_poll_intv and domain_dirty_freerun to remove repeated code. Link: https://lkml.kernel.org/r/20240514125254.142203-5-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c89
1 files changed, 49 insertions, 40 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index daba24fbfc84..72d55c2fe432 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -139,6 +139,7 @@ struct dirty_throttle_control {
unsigned long wb_bg_thresh;
unsigned long pos_ratio;
+ bool freerun;
};
/*
@@ -1702,6 +1703,49 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
}
}
+static unsigned long domain_poll_intv(struct dirty_throttle_control *dtc,
+ bool strictlimit)
+{
+ unsigned long dirty, thresh;
+
+ if (strictlimit) {
+ dirty = dtc->wb_dirty;
+ thresh = dtc->wb_thresh;
+ } else {
+ dirty = dtc->dirty;
+ thresh = dtc->thresh;
+ }
+
+ return dirty_poll_interval(dirty, thresh);
+}
+
+/*
+ * Throttle it only when the background writeback cannot catch-up. This avoids
+ * (excessively) small writeouts when the wb limits are ramping up in case of
+ * !strictlimit.
+ *
+ * In strictlimit case make decision based on the wb counters and limits. Small
+ * writeouts when the wb limits are ramping up are the price we consciously pay
+ * for strictlimit-ing.
+ */
+static void domain_dirty_freerun(struct dirty_throttle_control *dtc,
+ bool strictlimit)
+{
+ unsigned long dirty, thresh, bg_thresh;
+
+ if (unlikely(strictlimit)) {
+ wb_dirty_limits(dtc);
+ dirty = dtc->wb_dirty;
+ thresh = dtc->wb_thresh;
+ bg_thresh = dtc->wb_bg_thresh;
+ } else {
+ dirty = dtc->dirty;
+ thresh = dtc->thresh;
+ bg_thresh = dtc->bg_thresh;
+ }
+ dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh);
+}
+
/*
* balance_dirty_pages() must be called by processes which are generating dirty
* data. It looks at the number of dirty pages in the machine and will force
@@ -1734,27 +1778,12 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
for (;;) {
unsigned long now = jiffies;
- unsigned long dirty, thresh, bg_thresh;
- unsigned long m_dirty = 0; /* stop bogus uninit warnings */
- unsigned long m_thresh = 0;
- unsigned long m_bg_thresh = 0;
nr_dirty = global_node_page_state(NR_FILE_DIRTY);
domain_dirty_avail(gdtc, true);
domain_dirty_limits(gdtc);
-
- if (unlikely(strictlimit)) {
- wb_dirty_limits(gdtc);
-
- dirty = gdtc->wb_dirty;
- thresh = gdtc->wb_thresh;
- bg_thresh = gdtc->wb_bg_thresh;
- } else {
- dirty = gdtc->dirty;
- thresh = gdtc->thresh;
- bg_thresh = gdtc->bg_thresh;
- }
+ domain_dirty_freerun(gdtc, strictlimit);
if (mdtc) {
/*
@@ -1763,17 +1792,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
*/
domain_dirty_avail(mdtc, true);
domain_dirty_limits(mdtc);
-
- if (unlikely(strictlimit)) {
- wb_dirty_limits(mdtc);
- m_dirty = mdtc->wb_dirty;
- m_thresh = mdtc->wb_thresh;
- m_bg_thresh = mdtc->wb_bg_thresh;
- } else {
- m_dirty = mdtc->dirty;
- m_thresh = mdtc->thresh;
- m_bg_thresh = mdtc->bg_thresh;
- }
+ domain_dirty_freerun(mdtc, strictlimit);
}
/*
@@ -1790,31 +1809,21 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
wb_start_background_writeback(wb);
/*
- * Throttle it only when the background writeback cannot
- * catch-up. This avoids (excessively) small writeouts
- * when the wb limits are ramping up in case of !strictlimit.
- *
- * In strictlimit case make decision based on the wb counters
- * and limits. Small writeouts when the wb limits are ramping
- * up are the price we consciously pay for strictlimit-ing.
- *
* If memcg domain is in effect, @dirty should be under
* both global and memcg freerun ceilings.
*/
- if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
- (!mdtc ||
- m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
+ if (gdtc->freerun && (!mdtc || mdtc->freerun)) {
unsigned long intv;
unsigned long m_intv;
free_running:
- intv = dirty_poll_interval(dirty, thresh);
+ intv = domain_poll_intv(gdtc, strictlimit);
m_intv = ULONG_MAX;
current->dirty_paused_when = now;
current->nr_dirtied = 0;
if (mdtc)
- m_intv = dirty_poll_interval(m_dirty, m_thresh);
+ m_intv = domain_poll_intv(mdtc, strictlimit);
current->nr_dirtied_pause = min(intv, m_intv);
break;
}