summaryrefslogtreecommitdiffstats
path: root/fs/crypto/bio.c
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2019-05-20 18:29:39 +0200
committerEric Biggers <ebiggers@google.com>2019-05-28 19:27:52 +0200
commitd2d0727b1654e11563f181f4d3d48b9275514480 (patch)
treefca4761fc4cf02a31f43aac5ae86e5276bbc298b /fs/crypto/bio.c
parentLinux 5.2-rc2 (diff)
downloadlinux-d2d0727b1654e11563f181f4d3d48b9275514480.tar.xz
linux-d2d0727b1654e11563f181f4d3d48b9275514480.zip
fscrypt: simplify bounce page handling
Currently, bounce page handling for writes to encrypted files is unnecessarily complicated. A fscrypt_ctx is allocated along with each bounce page, page_private(bounce_page) points to this fscrypt_ctx, and fscrypt_ctx::w::control_page points to the original pagecache page. However, because writes don't use the fscrypt_ctx for anything else, there's no reason why page_private(bounce_page) can't just point to the original pagecache page directly. Therefore, this patch makes this change. In the process, it also cleans up the API exposed to filesystems that allows testing whether a page is a bounce page, getting the pagecache page from a bounce page, and freeing a bounce page. Reviewed-by: Chandan Rajendra <chandan@linux.ibm.com> Signed-off-by: Eric Biggers <ebiggers@google.com>
Diffstat (limited to 'fs/crypto/bio.c')
-rw-r--r--fs/crypto/bio.c38
1 files changed, 5 insertions, 33 deletions
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index b46021ebde85..c857b70b5328 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -70,46 +70,18 @@ void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
-void fscrypt_pullback_bio_page(struct page **page, bool restore)
-{
- struct fscrypt_ctx *ctx;
- struct page *bounce_page;
-
- /* The bounce data pages are unmapped. */
- if ((*page)->mapping)
- return;
-
- /* The bounce data page is unmapped. */
- bounce_page = *page;
- ctx = (struct fscrypt_ctx *)page_private(bounce_page);
-
- /* restore control page */
- *page = ctx->w.control_page;
-
- if (restore)
- fscrypt_restore_control_page(bounce_page);
-}
-EXPORT_SYMBOL(fscrypt_pullback_bio_page);
-
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
- struct fscrypt_ctx *ctx;
- struct page *ciphertext_page = NULL;
+ struct page *ciphertext_page;
struct bio *bio;
int ret, err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
- ctx = fscrypt_get_ctx(GFP_NOFS);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
- if (IS_ERR(ciphertext_page)) {
- err = PTR_ERR(ciphertext_page);
- goto errout;
- }
+ ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
+ if (!ciphertext_page)
+ return -ENOMEM;
while (len--) {
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
@@ -147,7 +119,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
err = 0;
errout:
- fscrypt_release_ctx(ctx);
+ fscrypt_free_bounce_page(ciphertext_page);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);