summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2015-05-18 19:19:47 +0200
committerTheodore Ts'o <tytso@mit.edu>2015-05-18 19:19:47 +0200
commit8ee0371470038371729a39ee6669a2132ac47649 (patch)
treeffb2781725fa6cdbd98a19d03165c7772189ba81 /fs
parentext4: clean up superblock encryption mode fields (diff)
downloadlinux-8ee0371470038371729a39ee6669a2132ac47649.tar.xz
linux-8ee0371470038371729a39ee6669a2132ac47649.zip
ext4 crypto: use slab caches
Use slab caches the ext4_crypto_ctx and ext4_crypt_info structures for slighly better memory efficiency and debuggability. Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/crypto.c60
-rw-r--r--fs/ext4/crypto_key.c12
-rw-r--r--fs/ext4/ext4.h1
3 files changed, 39 insertions, 34 deletions
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 3a25aa4f3d94..1c34f0eb125b 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -55,6 +55,9 @@ static mempool_t *ext4_bounce_page_pool;
static LIST_HEAD(ext4_free_crypto_ctxs);
static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
+static struct kmem_cache *ext4_crypto_ctx_cachep;
+struct kmem_cache *ext4_crypt_info_cachep;
+
/**
* ext4_release_crypto_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
@@ -79,7 +82,7 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
if (ctx->tfm)
crypto_free_tfm(ctx->tfm);
- kfree(ctx);
+ kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
} else {
spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
@@ -88,23 +91,6 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
}
/**
- * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
- * @mask: The allocation mask.
- *
- * Return: An allocated and initialized encryption context on success. An error
- * value or NULL otherwise.
- */
-static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
-{
- struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
- mask);
-
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- return ctx;
-}
-
-/**
* ext4_get_crypto_ctx() - Gets an encryption context
* @inode: The inode for which we are doing the crypto
*
@@ -121,8 +107,6 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
BUG_ON(ci == NULL);
- if (!ext4_read_workqueue)
- ext4_init_crypto();
/*
* We first try getting the ctx from a free list because in
@@ -141,9 +125,9 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
- ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
- if (IS_ERR(ctx)) {
- res = PTR_ERR(ctx);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ if (!ctx) {
+ res = -ENOMEM;
goto out;
}
ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
@@ -217,7 +201,7 @@ void ext4_exit_crypto(void)
}
if (pos->tfm)
crypto_free_tfm(pos->tfm);
- kfree(pos);
+ kmem_cache_free(ext4_crypto_ctx_cachep, pos);
}
INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
if (ext4_bounce_page_pool)
@@ -226,6 +210,12 @@ void ext4_exit_crypto(void)
if (ext4_read_workqueue)
destroy_workqueue(ext4_read_workqueue);
ext4_read_workqueue = NULL;
+ if (ext4_crypto_ctx_cachep)
+ kmem_cache_destroy(ext4_crypto_ctx_cachep);
+ ext4_crypto_ctx_cachep = NULL;
+ if (ext4_crypt_info_cachep)
+ kmem_cache_destroy(ext4_crypt_info_cachep);
+ ext4_crypt_info_cachep = NULL;
}
/**
@@ -238,23 +228,31 @@ void ext4_exit_crypto(void)
*/
int ext4_init_crypto(void)
{
- int i, res;
+ int i, res = -ENOMEM;
mutex_lock(&crypto_init);
if (ext4_read_workqueue)
goto already_initialized;
ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
- if (!ext4_read_workqueue) {
- res = -ENOMEM;
+ if (!ext4_read_workqueue)
+ goto fail;
+
+ ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!ext4_crypto_ctx_cachep)
+ goto fail;
+
+ ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!ext4_crypt_info_cachep)
goto fail;
- }
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
struct ext4_crypto_ctx *ctx;
- ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
- if (IS_ERR(ctx)) {
- res = PTR_ERR(ctx);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ if (!ctx) {
+ res = -ENOMEM;
goto fail;
}
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 0075e43ffea6..d6abe4687cd5 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -96,7 +96,7 @@ void ext4_free_encryption_info(struct inode *inode)
key_put(ci->ci_keyring_key);
crypto_free_ablkcipher(ci->ci_ctfm);
memzero_explicit(&ci->ci_raw, sizeof(ci->ci_raw));
- kfree(ci);
+ kmem_cache_free(ext4_crypt_info_cachep, ci);
ei->i_crypt_info = NULL;
}
@@ -113,6 +113,12 @@ int _ext4_get_encryption_info(struct inode *inode)
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int res;
+ if (!ext4_read_workqueue) {
+ res = ext4_init_crypto();
+ if (res)
+ return res;
+ }
+
if (ei->i_crypt_info) {
if (!ei->i_crypt_info->ci_keyring_key ||
key_validate(ei->i_crypt_info->ci_keyring_key) == 0)
@@ -134,7 +140,7 @@ int _ext4_get_encryption_info(struct inode *inode)
return -EINVAL;
res = 0;
- crypt_info = kmalloc(sizeof(struct ext4_crypt_info), GFP_KERNEL);
+ crypt_info = kmem_cache_alloc(ext4_crypt_info_cachep, GFP_KERNEL);
if (!crypt_info)
return -ENOMEM;
@@ -188,7 +194,7 @@ out:
if (res < 0) {
if (res == -ENOKEY)
res = 0;
- kfree(crypt_info);
+ kmem_cache_free(ext4_crypt_info_cachep, crypt_info);
} else {
ei->i_crypt_info = crypt_info;
crypt_info->ci_keyring_key = keyring_key;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 213536fdab9f..23e33fb3202e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2059,6 +2059,7 @@ int ext4_get_policy(struct inode *inode,
struct ext4_encryption_policy *policy);
/* crypto.c */
+extern struct kmem_cache *ext4_crypt_info_cachep;
bool ext4_valid_contents_enc_mode(uint32_t mode);
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
extern struct workqueue_struct *ext4_read_workqueue;