summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2018-06-04 17:20:36 +0200
committerJaegeuk Kim <jaegeuk@kernel.org>2018-06-04 23:33:20 +0200
commitc29fd0c0e26dacb7a33ad166587059818a94b4e0 (patch)
tree6a25f17833047796fccde3cd7c4e17d35199e640 /fs/f2fs/node.c
parentf2fs: don't change wbc->sync_mode (diff)
downloadlinux-c29fd0c0e26dacb7a33ad166587059818a94b4e0.tar.xz
linux-c29fd0c0e26dacb7a33ad166587059818a94b4e0.zip
f2fs: let sync node IO interrupt async one
Although mixed sync/async IOs can have continuous LBA, as they have different IO priority, block IO scheduler will add them into different queues and commit them separately, result in splited IOs which causes wrose performance. This patch gives high priority to synchronous IO of nodes, means that once synchronous flow starts, it can interrupt asynchronous writeback flow of system flusher, so more big IOs can be expected. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 64ad5466887f..47d0e64a95a8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1596,21 +1596,28 @@ int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
int step = 0;
int nwritten = 0;
int ret = 0;
- int nr_pages;
+ int nr_pages, done = 0;
pagevec_init(&pvec);
next_step:
index = 0;
- while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY))) {
+ while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
+ NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
bool submitted = false;
+ /* give a priority to WB_SYNC threads */
+ if (atomic_read(&sbi->wb_sync_req[NODE]) &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ done = 1;
+ break;
+ }
+
/*
* flushing sequence with step:
* 0. indirect nodes
@@ -1738,6 +1745,11 @@ static int f2fs_write_node_pages(struct address_space *mapping,
if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
goto skip_write;
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_inc(&sbi->wb_sync_req[NODE]);
+ else if (atomic_read(&sbi->wb_sync_req[NODE]))
+ goto skip_write;
+
trace_f2fs_writepages(mapping->host, wbc, NODE);
diff = nr_pages_to_write(sbi, NODE, wbc);
@@ -1745,6 +1757,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_dec(&sbi->wb_sync_req[NODE]);
return 0;
skip_write: