diff options
author | Gao Xiang <gaoxiang25@huawei.com> | 2019-10-08 14:56:13 +0200 |
---|---|---|
committer | Gao Xiang <gaoxiang25@huawei.com> | 2019-10-15 18:07:56 +0200 |
commit | bda17a4577da729d17b8f87bf3279b9db201d8ca (patch) | |
tree | d0a3e5da54710a7992164a226005e3b5342fb985 /fs/erofs | |
parent | erofs: clean up collection handling routines (diff) | |
download | linux-bda17a4577da729d17b8f87bf3279b9db201d8ca.tar.xz linux-bda17a4577da729d17b8f87bf3279b9db201d8ca.zip |
erofs: remove dead code since managed cache is now built-in
After commit 4279f3f9889f ("staging: erofs: turn cache
strategies into mount options"), cache strategies are
changed into mount options rather than old build configs.
Let's kill useless code for obsoleted build options.
Link: https://lore.kernel.org/r/20191008125616.183715-2-gaoxiang25@huawei.com
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Diffstat (limited to 'fs/erofs')
-rw-r--r-- | fs/erofs/utils.c | 13 | ||||
-rw-r--r-- | fs/erofs/zdata.c | 25 |
2 files changed, 10 insertions, 28 deletions
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index d92b3e753a6f..f66043ee16b9 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) } static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) + struct erofs_workgroup *grp) { /* * If managed cache is on, refcount of workgroups @@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, } static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, - unsigned long nr_shrink, - bool cleanup) + unsigned long nr_shrink) { pgoff_t first_index = 0; void *batch[PAGEVEC_SIZE]; @@ -208,7 +206,7 @@ repeat: first_index = grp->index + 1; /* try to shrink each valid workgroup */ - if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) + if (!erofs_try_to_release_workgroup(sbi, grp)) continue; ++freed; @@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb) struct erofs_sb_info *const sbi = EROFS_SB(sb); mutex_lock(&sbi->umount_mutex); - erofs_shrink_workstation(sbi, ~0UL, true); + /* clean up all remaining workgroups in memory */ + erofs_shrink_workstation(sbi, ~0UL); spin_lock(&erofs_sb_list_lock); list_del(&sbi->list); @@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink, spin_unlock(&erofs_sb_list_lock); sbi->shrinker_run_no = run_no; - freed += erofs_shrink_workstation(sbi, nr, false); + freed += erofs_shrink_workstation(sbi, nr); spin_lock(&erofs_sb_list_lock); /* Get the next list element before we move this one */ diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index ef32757d1aac..93f8bc1a64f6 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -574,7 +574,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, struct list_head *pagepool) { struct inode *const inode = fe->inode; - struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct erofs_map_blocks *const map = &fe->map; struct z_erofs_collector *const clt = &fe->clt; const loff_t offset = page_offset(page); @@ -997,8 +997,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, struct address_space *mc, gfp_t gfp) { - /* determined at compile time to avoid too many #ifdefs */ - const bool nocache = __builtin_constant_p(mc) ? !mc : false; const pgoff_t index = pcl->obj.index; bool tocache = false; @@ -1019,7 +1017,7 @@ repeat: * the cached page has not been allocated and * an placeholder is out there, prepare it now. */ - if (!nocache && page == PAGE_UNALLOCATED) { + if (page == PAGE_UNALLOCATED) { tocache = true; goto out_allocpage; } @@ -1032,21 +1030,6 @@ repeat: mapping = READ_ONCE(page->mapping); /* - * if managed cache is disabled, it's no way to - * get such a cached-like page. - */ - if (nocache) { - /* if managed cache is disabled, it is impossible `justfound' */ - DBG_BUGON(justfound); - - /* and it should be locked, not uptodate, and not truncated */ - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!mapping); - goto out; - } - - /* * unmanaged (file) pages are all locked solidly, * therefore it is impossible for `mapping' to be NULL. */ @@ -1102,7 +1085,7 @@ out_allocpage: cpu_relax(); goto repeat; } - if (nocache || !tocache) + if (!tocache) goto out; if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { page->mapping = Z_EROFS_MAPPING_STAGING; @@ -1208,7 +1191,7 @@ static bool z_erofs_vle_submit_all(struct super_block *sb, struct z_erofs_unzip_io *fgq, bool force_fg) { - struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); + struct erofs_sb_info *const sbi = EROFS_SB(sb); z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; struct z_erofs_unzip_io *q[NR_JOBQUEUES]; struct bio *bio; |