diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2022-12-13 17:13:10 +0100 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2022-12-30 15:56:27 +0100 |
commit | aa9695157f65c55e5c85a1c194859d3c03e68018 (patch) | |
tree | 92cab03ac269b23b3211b3d5ae41317c389fa536 | |
parent | crypto: arm/sha1 - Fix clang function cast warnings (diff) | |
download | linux-aa9695157f65c55e5c85a1c194859d3c03e68018.tar.xz linux-aa9695157f65c55e5c85a1c194859d3c03e68018.zip |
crypto: scatterwalk - use kmap_local() not kmap_atomic()
kmap_atomic() is used to create short-lived mappings of pages that may
not be accessible via the kernel direct map. This is only needed on
32-bit architectures that implement CONFIG_HIGHMEM, but it can be used
on 64-bit other architectures too, where the returned mapping is simply
the kernel direct address of the page.
However, kmap_atomic() does not support migration on CONFIG_HIGHMEM
configurations, due to the use of per-CPU kmap slots, and so it disables
preemption on all architectures, not just the 32-bit ones. This implies
that all scatterwalk based crypto routines essentially execute with
preemption disabled all the time, which is less than ideal.
So let's switch scatterwalk_map/_unmap and the shash/ahash routines to
kmap_local() instead, which serves a similar purpose, but without the
resulting impact on preemption on architectures that have no need for
CONFIG_HIGHMEM.
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "Elliott, Robert (Servers)" <elliott@hpe.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | crypto/ahash.c | 4 | ||||
-rw-r--r-- | crypto/shash.c | 4 | ||||
-rw-r--r-- | include/crypto/scatterwalk.h | 4 |
3 files changed, 6 insertions, 6 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c index c2ca631a111f..4b089f1b770f 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -45,7 +45,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk) unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); - walk->data = kmap_atomic(walk->pg); + walk->data = kmap_local_page(walk->pg); walk->data += offset; if (offset & alignmask) { @@ -95,7 +95,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) } } - kunmap_atomic(walk->data); + kunmap_local(walk->data); crypto_yield(walk->flags); if (err) diff --git a/crypto/shash.c b/crypto/shash.c index 868b6ba2b3b7..58b46f198449 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -320,10 +320,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { void *data; - data = kmap_atomic(sg_page(sg)); + data = kmap_local_page(sg_page(sg)); err = crypto_shash_digest(desc, data + offset, nbytes, req->result); - kunmap_atomic(data); + kunmap_local(data); } else err = crypto_shash_init(desc) ?: shash_ahash_finup(req, desc); diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index f2c42b4111b1..32fc4473175b 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -53,7 +53,7 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk) static inline void scatterwalk_unmap(void *vaddr) { - kunmap_atomic(vaddr); + kunmap_local(vaddr); } static inline void scatterwalk_start(struct scatter_walk *walk, @@ -65,7 +65,7 @@ static inline void scatterwalk_start(struct scatter_walk *walk, static inline void *scatterwalk_map(struct scatter_walk *walk) { - return kmap_atomic(scatterwalk_page(walk)) + + return kmap_local_page(scatterwalk_page(walk)) + offset_in_page(walk->offset); } |