summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/compress.c
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2020-09-14 11:05:13 +0200
committerJaegeuk Kim <jaegeuk@kernel.org>2020-09-29 18:16:32 +0200
commit31083031709eea6530f0551d59eecdb2e68265ef (patch)
treedac54d5eb4d654411f6d0836155ead473e3755ec /fs/f2fs/compress.c
parentf2fs: fix to do sanity check on segment/section count (diff)
downloadlinux-31083031709eea6530f0551d59eecdb2e68265ef.tar.xz
linux-31083031709eea6530f0551d59eecdb2e68265ef.zip
f2fs: compress: introduce page array slab cache
Add a per-sbi slab cache "f2fs_page_array_entry-%u:%u" for memory allocation of page pointer array in compress context. Signed-off-by: Chao Yu <yuchao0@huawei.com> [Jaegeuk Kim: Fix wrong memory allocation] Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/compress.c')
-rw-r--r--fs/f2fs/compress.c116
1 files changed, 86 insertions, 30 deletions
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 7895186cc765..4b417185f630 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -17,6 +17,30 @@
#include "node.h"
#include <trace/events/f2fs.h>
+static void *page_array_alloc(struct inode *inode, int nr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int size = sizeof(struct page *) * nr;
+
+ if (likely(size <= sbi->page_array_slab_size))
+ return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
+ return f2fs_kzalloc(sbi, size, GFP_NOFS);
+}
+
+static void page_array_free(struct inode *inode, void *pages, int nr)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int size = sizeof(struct page *) * nr;
+
+ if (!pages)
+ return;
+
+ if (likely(size <= sbi->page_array_slab_size))
+ kmem_cache_free(sbi->page_array_slab, pages);
+ else
+ kfree(pages);
+}
+
struct f2fs_compress_ops {
int (*init_compress_ctx)(struct compress_ctx *cc);
void (*destroy_compress_ctx)(struct compress_ctx *cc);
@@ -130,19 +154,16 @@ struct page *f2fs_compress_control_page(struct page *page)
int f2fs_init_compress_ctx(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
-
if (cc->nr_rpages)
return 0;
- cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
- cc->log_cluster_size, GFP_NOFS);
+ cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
{
- kfree(cc->rpages);
+ page_array_free(cc->inode, cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -573,11 +594,11 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
static int f2fs_compress_pages(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
- unsigned int max_len, nr_cpages;
+ unsigned int max_len, new_nr_cpages;
+ struct page **new_cpages;
int i, ret;
trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
@@ -592,8 +613,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
- cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- cc->nr_cpages, GFP_NOFS);
+ cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -635,16 +655,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);
- nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+ new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+ /* Now we're going to cut unnecessary tail pages */
+ new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
+ if (!new_cpages) {
+ ret = -ENOMEM;
+ goto out_vunmap_cbuf;
+ }
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
- (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
+ (new_nr_cpages * PAGE_SIZE) -
+ (cc->clen + COMPRESS_HEADER_SIZE));
vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);
- for (i = nr_cpages; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++) {
+ if (i < new_nr_cpages) {
+ new_cpages[i] = cc->cpages[i];
+ continue;
+ }
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -652,7 +684,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
- cc->nr_cpages = nr_cpages;
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = new_cpages;
+ cc->nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -667,7 +701,7 @@ out_free_cpages:
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- kfree(cc->cpages);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -706,8 +740,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
- dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- dic->cluster_size, GFP_NOFS);
+ dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_free_dic;
@@ -1046,6 +1079,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
{
struct compress_ctx cc = {
+ .inode = inode,
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
@@ -1149,7 +1183,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
*/
down_read(&sbi->node_write);
} else if (!f2fs_trylock_op(sbi)) {
- return -EAGAIN;
+ goto out_free;
}
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -1179,8 +1213,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
- cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
- cc->log_cluster_size, GFP_NOFS);
+ cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
@@ -1274,11 +1307,13 @@ unlock_continue:
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc);
return 0;
out_destroy_crypt:
- kfree(cic->rpages);
+ page_array_free(cc->inode, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@@ -1296,6 +1331,9 @@ out_unlock_op:
up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
+out_free:
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
return -EAGAIN;
}
@@ -1322,7 +1360,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
- kfree(cic->rpages);
+ page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
kfree(cic);
}
@@ -1419,8 +1457,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
- kfree(cc->cpages);
- cc->cpages = NULL;
if (!err)
return 0;
f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
@@ -1446,8 +1482,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);
- dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
- cc->log_cluster_size, GFP_NOFS);
+ dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!dic->rpages) {
kfree(dic);
return ERR_PTR(-ENOMEM);
@@ -1466,8 +1501,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
- dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- dic->nr_cpages, GFP_NOFS);
+ dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
if (!dic->cpages)
goto out_free;
@@ -1502,7 +1536,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- kfree(dic->tpages);
+ page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@@ -1511,10 +1545,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- kfree(dic->cpages);
+ page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}
- kfree(dic->rpages);
+ page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kfree(dic);
}
@@ -1543,3 +1577,25 @@ unlock:
unlock_page(rpage);
}
}
+
+int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+{
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ char slab_name[32];
+
+ sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
+
+ sbi->page_array_slab_size = sizeof(struct page *) <<
+ F2FS_OPTION(sbi).compress_log_size;
+
+ sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
+ sbi->page_array_slab_size);
+ if (!sbi->page_array_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
+{
+ kmem_cache_destroy(sbi->page_array_slab);
+}