summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2007-11-15 01:59:15 +0100
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-15 03:45:38 +0100
commit5fce25a9df4865bdd5e3dc4853b269dc1677a02a (patch)
tree207fe2bf726bac89e402eb738b9548cfc7cae2f5 /mm/page-writeback.c
parentmake getdelays cgroupstats aware (diff)
downloadlinux-5fce25a9df4865bdd5e3dc4853b269dc1677a02a.tar.xz
linux-5fce25a9df4865bdd5e3dc4853b269dc1677a02a.zip
mm: speed up writeback ramp-up on clean systems
We allow violation of bdi limits if there is a lot of room on the system. Once we hit half the total limit we start enforcing bdi limits and bdi ramp-up should happen. Doing it this way avoids many small writeouts on an otherwise idle system and should also speed up the ramp-up. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 838a5e31394c..81a91e6f1f99 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -355,8 +355,8 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
*/
static void balance_dirty_pages(struct address_space *mapping)
{
- long bdi_nr_reclaimable;
- long bdi_nr_writeback;
+ long nr_reclaimable, bdi_nr_reclaimable;
+ long nr_writeback, bdi_nr_writeback;
long background_thresh;
long dirty_thresh;
long bdi_thresh;
@@ -376,11 +376,26 @@ static void balance_dirty_pages(struct address_space *mapping)
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
+
+ nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS);
+ nr_writeback = global_page_state(NR_WRITEBACK);
+
bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
+
if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
break;
+ /*
+ * Throttle it only when the background writeback cannot
+ * catch-up. This avoids (excessively) small writeouts
+ * when the bdi limits are ramping up.
+ */
+ if (nr_reclaimable + nr_writeback <
+ (background_thresh + dirty_thresh) / 2)
+ break;
+
if (!bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;