summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-23 00:23:20 +0200
committerJens Axboe <axboe@fb.com>2015-06-02 16:38:12 +0200
commit8a73179956e649df0d4b3250db17734f272d8266 (patch)
tree941373ecb8e170f874c6343ba35877b337f33443 /mm
parentwriteback: clean up wb_dirty_limit() (diff)
downloadlinux-8a73179956e649df0d4b3250db17734f272d8266.tar.xz
linux-8a73179956e649df0d4b3250db17734f272d8266.zip
writeback: reorganize [__]wb_update_bandwidth()
__wb_update_bandwidth() is called from two places - fs/fs-writeback.c::balance_dirty_pages() and mm/page-writeback.c::wb_writeback(). The latter updates only the write bandwidth while the former also deals with the dirty ratelimit. The two callsites are distinguished by whether @thresh parameter is zero or not, which is cryptic. In addition, the two files define their own different versions of wb_update_bandwidth() on top of __wb_update_bandwidth(), which is confusing to say the least. This patch cleans up [__]wb_update_bandwidth() in the following ways. * __wb_update_bandwidth() now takes explicit @update_ratelimit parameter to gate dirty ratelimit handling. * mm/page-writeback.c::wb_update_bandwidth() is flattened into its caller - balance_dirty_pages(). * fs/fs-writeback.c::wb_update_bandwidth() is moved to mm/page-writeback.c and __wb_update_bandwidth() is made static. * While at it, add a lockdep assertion to __wb_update_bandwidth(). Except for the lockdep addition, this is pure reorganization and doesn't introduce any behavioral changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c7745a7fe11e..bebdd41b8d8e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1160,19 +1160,22 @@ static void wb_update_dirty_ratelimit(struct bdi_writeback *wb,
trace_bdi_dirty_ratelimit(wb->bdi, dirty_rate, task_ratelimit);
}
-void __wb_update_bandwidth(struct bdi_writeback *wb,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long wb_thresh,
- unsigned long wb_dirty,
- unsigned long start_time)
+static void __wb_update_bandwidth(struct bdi_writeback *wb,
+ unsigned long thresh,
+ unsigned long bg_thresh,
+ unsigned long dirty,
+ unsigned long wb_thresh,
+ unsigned long wb_dirty,
+ unsigned long start_time,
+ bool update_ratelimit)
{
unsigned long now = jiffies;
unsigned long elapsed = now - wb->bw_time_stamp;
unsigned long dirtied;
unsigned long written;
+ lockdep_assert_held(&wb->list_lock);
+
/*
* rate-limit, only update once every 200ms.
*/
@@ -1189,7 +1192,7 @@ void __wb_update_bandwidth(struct bdi_writeback *wb,
if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
goto snapshot;
- if (thresh) {
+ if (update_ratelimit) {
global_update_bandwidth(thresh, dirty, now);
wb_update_dirty_ratelimit(wb, thresh, bg_thresh, dirty,
wb_thresh, wb_dirty,
@@ -1203,20 +1206,9 @@ snapshot:
wb->bw_time_stamp = now;
}
-static void wb_update_bandwidth(struct bdi_writeback *wb,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long wb_thresh,
- unsigned long wb_dirty,
- unsigned long start_time)
+void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
{
- if (time_is_after_eq_jiffies(wb->bw_time_stamp + BANDWIDTH_INTERVAL))
- return;
- spin_lock(&wb->list_lock);
- __wb_update_bandwidth(wb, thresh, bg_thresh, dirty,
- wb_thresh, wb_dirty, start_time);
- spin_unlock(&wb->list_lock);
+ __wb_update_bandwidth(wb, 0, 0, 0, 0, 0, start_time, false);
}
/*
@@ -1467,8 +1459,15 @@ static void balance_dirty_pages(struct address_space *mapping,
if (dirty_exceeded && !wb->dirty_exceeded)
wb->dirty_exceeded = 1;
- wb_update_bandwidth(wb, dirty_thresh, background_thresh,
- nr_dirty, wb_thresh, wb_dirty, start_time);
+ if (time_is_before_jiffies(wb->bw_time_stamp +
+ BANDWIDTH_INTERVAL)) {
+ spin_lock(&wb->list_lock);
+ __wb_update_bandwidth(wb, dirty_thresh,
+ background_thresh, nr_dirty,
+ wb_thresh, wb_dirty, start_time,
+ true);
+ spin_unlock(&wb->list_lock);
+ }
dirty_ratelimit = wb->dirty_ratelimit;
pos_ratio = wb_position_ratio(wb, dirty_thresh,