diff options
author | Tejun Heo <tj@kernel.org> | 2015-05-23 00:23:31 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-06-02 16:38:13 +0200 |
commit | aa661bbe1e61ce80ca4ae98804f673ede94b0827 (patch) | |
tree | 43103c366edc05bb598c042ec7afa31fb9106939 /fs/fs-writeback.c | |
parent | writeback: separate out domain_dirty_limits() (diff) | |
download | linux-aa661bbe1e61ce80ca4ae98804f673ede94b0827.tar.xz linux-aa661bbe1e61ce80ca4ae98804f673ede94b0827.zip |
writeback: move over_bground_thresh() to mm/page-writeback.c
and rename it to wb_over_bg_thresh(). The function is closely tied to
the dirty throttling mechanism implemented in page-writeback.c. This
relocation will allow future updates necessary for cgroup writeback
support.
While at it, add function comment.
This is pure reorganization and doesn't introduce any behavioral
changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 20 |
1 files changed, 2 insertions, 18 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 51c8a5b14cdf..da355879ba7c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1071,22 +1071,6 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, return nr_pages - work.nr_pages; } -static bool over_bground_thresh(struct bdi_writeback *wb) -{ - unsigned long background_thresh, dirty_thresh; - - global_dirty_limits(&background_thresh, &dirty_thresh); - - if (global_page_state(NR_FILE_DIRTY) + - global_page_state(NR_UNSTABLE_NFS) > background_thresh) - return true; - - if (wb_stat(wb, WB_RECLAIMABLE) > wb_calc_thresh(wb, background_thresh)) - return true; - - return false; -} - /* * Explicit flushing or periodic writeback of "old" data. * @@ -1136,7 +1120,7 @@ static long wb_writeback(struct bdi_writeback *wb, * For background writeout, stop when we are below the * background dirty threshold */ - if (work->for_background && !over_bground_thresh(wb)) + if (work->for_background && !wb_over_bg_thresh(wb)) break; /* @@ -1227,7 +1211,7 @@ static unsigned long get_nr_dirty_pages(void) static long wb_check_background_flush(struct bdi_writeback *wb) { - if (over_bground_thresh(wb)) { + if (wb_over_bg_thresh(wb)) { struct wb_writeback_work work = { .nr_pages = LONG_MAX, |