summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-11-13 01:17:33 +0100
committerDavid S. Miller <davem@sunset.davemloft.net>2006-01-09 23:15:46 +0100
commitfa9b98fdab5b57ecb4dd3d6c2489e262af458c44 (patch)
tree454d374f957985d5931378d86090b6aca5bbc877 /crypto
parent[CRYPTO] sha1: Rename i/j to done/partial (diff)
downloadlinux-fa9b98fdab5b57ecb4dd3d6c2489e262af458c44.tar.xz
linux-fa9b98fdab5b57ecb4dd3d6c2489e262af458c44.zip
[CRYPTO] sha1: Avoid shifting count left and right
This patch avoids shifting the count left and right needlessly for each call to sha1_update(). It instead can be done only once at the end in sha1_final(). Keeping the previous test example (sha1_update() successively called with len=64), a 1.3% performance increase can be observed on i386, or 0.2% on ARM. The generated code is also smaller on ARM. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/sha1.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/crypto/sha1.c b/crypto/sha1.c
index 7b1abca29365..8048e2dd3c14 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -52,8 +52,8 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len)
unsigned int partial, done;
const u8 *src;
- partial = (sctx->count >> 3) & 0x3f;
- sctx->count += len << 3;
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
done = 0;
src = data;
@@ -88,10 +88,10 @@ static void sha1_final(void* ctx, u8 *out)
__be64 bits;
static const u8 padding[64] = { 0x80, };
- bits = cpu_to_be64(sctx->count);
+ bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 */
- index = (sctx->count >> 3) & 0x3f;
+ index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(sctx, padding, padlen);