summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-06-12 01:53:57 +0200
committerWu Fengguang <fengguang.wu@intel.com>2011-06-19 18:25:46 +0200
commit36715cef0770b7e2547892b7c3197fc024274630 (patch)
tree34b690df719e6e46a37e0cef40b8c21f34bc36f8 /mm
parentwriteback: trace event writeback_queue_io (diff)
downloadlinux-36715cef0770b7e2547892b7c3197fc024274630.tar.xz
linux-36715cef0770b7e2547892b7c3197fc024274630.zip
writeback: skip tmpfs early in balance_dirty_pages_ratelimited_nr()
This helps prevent tmpfs dirtiers from skewing the per-cpu bdp_ratelimits. Acked-by: Jan Kara <jack@suse.cz> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b2529f8f8be0..1965d05a29cc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -490,9 +490,6 @@ static void balance_dirty_pages(struct address_space *mapping,
bool dirty_exceeded = false;
struct backing_dev_info *bdi = mapping->backing_dev_info;
- if (!bdi_cap_account_dirty(bdi))
- return;
-
for (;;) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
@@ -631,9 +628,13 @@ static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
unsigned long nr_pages_dirtied)
{
+ struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long ratelimit;
unsigned long *p;
+ if (!bdi_cap_account_dirty(bdi))
+ return;
+
ratelimit = ratelimit_pages;
if (mapping->backing_dev_info->dirty_exceeded)
ratelimit = 8;