summaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_worker.c
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2018-08-07 01:32:16 +0200
committerJens Axboe <axboe@kernel.dk>2018-09-06 23:12:24 +0200
commit3d0e63754fa47d65edff172c1156f44b6fca5ca1 (patch)
treefd3d733cba29c446722002093870c065b7d5961b /drivers/block/drbd/drbd_worker.c
parentMerge tag 'for-linus-20180906' of git://git.kernel.dk/linux-block (diff)
downloadlinux-3d0e63754fa47d65edff172c1156f44b6fca5ca1.tar.xz
linux-3d0e63754fa47d65edff172c1156f44b6fca5ca1.zip
drbd: Convert from ahash to shash
In preparing to remove all stack VLA usage from the kernel[1], this removes the discouraged use of AHASH_REQUEST_ON_STACK in favor of the smaller SHASH_DESC_ON_STACK by converting from ahash-wrapped-shash to direct shash. By removing a layer of indirection this both improves performance and reduces stack usage. The stack allocation will be made a fixed size in a later patch to the crypto subsystem. The bulk of the lines in this change are simple s/ahash/shash/, but the main logic differences are in drbd_csum_ee() and drbd_csum_bio(), which externalizes the page walking with k(un)map_atomic() instead of using scattergather. [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Acked-by: Lars Ellenberg <lars.ellenberg@linbit.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/drbd/drbd_worker.c')
-rw-r--r--drivers/block/drbd/drbd_worker.c63
1 files changed, 32 insertions, 31 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b8f77e83d456..c7ef48efe871 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -295,60 +295,61 @@ void drbd_request_endio(struct bio *bio)
complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
- AHASH_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, tfm);
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
+ void *src;
- ahash_request_set_tfm(req, tfm);
- ahash_request_set_callback(req, 0, NULL, NULL);
+ desc->tfm = tfm;
+ desc->flags = 0;
- sg_init_table(&sg, 1);
- crypto_ahash_init(req);
+ crypto_shash_init(desc);
+ src = kmap_atomic(page);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
- sg_set_page(&sg, page, PAGE_SIZE, 0);
- ahash_request_set_crypt(req, &sg, NULL, sg.length);
- crypto_ahash_update(req);
+ crypto_shash_update(desc, src, PAGE_SIZE);
+ kunmap_atomic(src);
page = tmp;
+ src = kmap_atomic(page);
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
- sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
- ahash_request_set_crypt(req, &sg, digest, sg.length);
- crypto_ahash_finup(req);
- ahash_request_zero(req);
+ crypto_shash_update(desc, src, len ?: PAGE_SIZE);
+ kunmap_atomic(src);
+
+ crypto_shash_final(desc, digest);
+ shash_desc_zero(desc);
}
-void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
{
- AHASH_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, tfm);
struct bio_vec bvec;
struct bvec_iter iter;
- ahash_request_set_tfm(req, tfm);
- ahash_request_set_callback(req, 0, NULL, NULL);
+ desc->tfm = tfm;
+ desc->flags = 0;
- sg_init_table(&sg, 1);
- crypto_ahash_init(req);
+ crypto_shash_init(desc);
bio_for_each_segment(bvec, bio, iter) {
- sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
- ahash_request_set_crypt(req, &sg, NULL, sg.length);
- crypto_ahash_update(req);
+ u8 *src;
+
+ src = kmap_atomic(bvec.bv_page);
+ crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
+ kunmap_atomic(src);
+
/* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */
if (bio_op(bio) == REQ_OP_WRITE_SAME)
break;
}
- ahash_request_set_crypt(req, NULL, digest, 0);
- crypto_ahash_final(req);
- ahash_request_zero(req);
+ crypto_shash_final(desc, digest);
+ shash_desc_zero(desc);
}
/* MAYBE merge common code with w_e_end_ov_req */
@@ -367,7 +368,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
@@ -1205,7 +1206,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
@@ -1255,7 +1256,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1327,7 +1328,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);