diff options
author | Theodore Ts'o <tytso@mit.edu> | 2015-05-31 19:34:24 +0200 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2015-05-31 19:34:24 +0200 |
commit | 95ea68b4c7105179f507d31f7bf571623373aa0b (patch) | |
tree | 5aead7a970083ed83e40f57c7a25323075de792e | |
parent | ext4 crypto: use per-inode tfm structure (diff) | |
download | linux-95ea68b4c7105179f507d31f7bf571623373aa0b.tar.xz linux-95ea68b4c7105179f507d31f7bf571623373aa0b.zip |
ext4 crypto: fix memory leaks in ext4_encrypted_zeroout
ext4_encrypted_zeroout() could end up leaking a bio and bounce page.
Fortunately it's not used much. While we're fixing things up,
refactor out common code into the static function alloc_bounce_page()
and fix up error handling if mempool_alloc() fails.
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r-- | fs/ext4/crypto.c | 62 |
1 files changed, 31 insertions, 31 deletions
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c index c3a9b08309db..1c9a8c499369 100644 --- a/fs/ext4/crypto.c +++ b/fs/ext4/crypto.c @@ -314,6 +314,26 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, return 0; } +static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) +{ + struct page *ciphertext_page = alloc_page(GFP_NOFS); + + if (!ciphertext_page) { + /* This is a potential bottleneck, but at least we'll have + * forward progress. */ + ciphertext_page = mempool_alloc(ext4_bounce_page_pool, + GFP_NOFS); + if (ciphertext_page == NULL) + return ERR_PTR(-ENOMEM); + ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + } else { + ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + } + ctx->flags |= EXT4_WRITE_PATH_FL; + ctx->w.bounce_page = ciphertext_page; + return ciphertext_page; +} + /** * ext4_encrypt() - Encrypts a page * @inode: The inode for which the encryption should take place @@ -343,28 +363,17 @@ struct page *ext4_encrypt(struct inode *inode, return (struct page *) ctx; /* The encryption operation will require a bounce page. */ - ciphertext_page = alloc_page(GFP_NOFS); - if (!ciphertext_page) { - /* This is a potential bottleneck, but at least we'll have - * forward progress. */ - ciphertext_page = mempool_alloc(ext4_bounce_page_pool, - GFP_NOFS); - if (WARN_ON_ONCE(!ciphertext_page)) { - ciphertext_page = mempool_alloc(ext4_bounce_page_pool, - GFP_NOFS | __GFP_WAIT); - } - ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; - } else { - ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; - } - ctx->flags |= EXT4_WRITE_PATH_FL; - ctx->w.bounce_page = ciphertext_page; + ciphertext_page = alloc_bounce_page(ctx); + if (IS_ERR(ciphertext_page)) + goto errout; ctx->w.control_page = plaintext_page; err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, plaintext_page, ciphertext_page); if (err) { + ciphertext_page = ERR_PTR(err); + errout: ext4_release_crypto_ctx(ctx); - return ERR_PTR(err); + return ciphertext_page; } SetPagePrivate(ciphertext_page); set_page_private(ciphertext_page, (unsigned long)ctx); @@ -424,21 +433,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) if (IS_ERR(ctx)) return PTR_ERR(ctx); - ciphertext_page = alloc_page(GFP_NOFS); - if (!ciphertext_page) { - /* This is a potential bottleneck, but at least we'll have - * forward progress. */ - ciphertext_page = mempool_alloc(ext4_bounce_page_pool, - GFP_NOFS); - if (WARN_ON_ONCE(!ciphertext_page)) { - ciphertext_page = mempool_alloc(ext4_bounce_page_pool, - GFP_NOFS | __GFP_WAIT); - } - ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; - } else { - ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; + ciphertext_page = alloc_bounce_page(ctx); + if (IS_ERR(ciphertext_page)) { + err = PTR_ERR(ciphertext_page); + goto errout; } - ctx->w.bounce_page = ciphertext_page; while (len--) { err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, @@ -460,6 +459,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) goto errout; } err = submit_bio_wait(WRITE, bio); + bio_put(bio); if (err) goto errout; } |