summaryrefslogtreecommitdiffstats
path: root/crypto/lzo.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-05-27 19:28:55 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2014-06-20 15:26:05 +0200
commit42614b0582542a2ab26d86956d92b4f54c69d735 (patch)
tree7dd0927fff88427d50b45d2b279694f1a5f70954 /crypto/lzo.c
parentcrypto: skcipher - Don't use __crypto_dequeue_request() (diff)
downloadlinux-42614b0582542a2ab26d86956d92b4f54c69d735.tar.xz
linux-42614b0582542a2ab26d86956d92b4f54c69d735.zip
crypto: lzo - try kmalloc() before vmalloc()
zswap allocates one LZO context per online cpu. Using vmalloc() for small (16KB) memory areas has drawback of slowing down /proc/vmallocinfo and /proc/meminfo reads, TLB pressure and poor NUMA locality, as default NUMA policy at boot time is to interleave pages : edumazet:~# grep lzo /proc/vmallocinfo | head -4 0xffffc90006062000-0xffffc90006067000 20480 lzo_init+0x1b/0x30 pages=4 vmalloc N0=2 N1=2 0xffffc90006067000-0xffffc9000606c000 20480 lzo_init+0x1b/0x30 pages=4 vmalloc N0=2 N1=2 0xffffc9000606c000-0xffffc90006071000 20480 lzo_init+0x1b/0x30 pages=4 vmalloc N0=2 N1=2 0xffffc90006071000-0xffffc90006076000 20480 lzo_init+0x1b/0x30 pages=4 vmalloc N0=2 N1=2 This patch tries a regular kmalloc() and fallback to vmalloc in case memory is too fragmented. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/lzo.c')
-rw-r--r--crypto/lzo.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 1c2aa69c54b8..252e791d0ccc 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/lzo.h>
struct lzo_ctx {
@@ -30,7 +31,10 @@ static int lzo_init(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
+ ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!ctx->lzo_comp_mem)
+ ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
if (!ctx->lzo_comp_mem)
return -ENOMEM;
@@ -41,7 +45,10 @@ static void lzo_exit(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
- vfree(ctx->lzo_comp_mem);
+ if (is_vmalloc_addr(ctx->lzo_comp_mem))
+ vfree(ctx->lzo_comp_mem);
+ else
+ kfree(ctx->lzo_comp_mem);
}
static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,