summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/asn1/a_sign.c14
-rw-r--r--crypto/bio/b_sock.c14
-rw-r--r--crypto/bio/bf_null.c10
-rw-r--r--crypto/bio/bio.h18
-rw-r--r--crypto/bn/bn_add.c24
-rw-r--r--crypto/bn/bn_exp.c60
-rw-r--r--crypto/bn/bn_gcd.c212
-rw-r--r--crypto/bn/bn_mul.c90
-rw-r--r--crypto/bn/bn_prime.c22
-rw-r--r--crypto/bn/bn_sqr.c22
-rw-r--r--crypto/bn/bn_sqrt.c70
-rw-r--r--crypto/conf/conf_def.c20
-rw-r--r--crypto/des/destest.c14
-rw-r--r--crypto/dsa/dsa_ameth.c10
-rw-r--r--crypto/dso/dso_vms.c34
-rw-r--r--crypto/ec/ec.h16
-rw-r--r--crypto/ec/ec2_smpl.c12
-rw-r--r--crypto/ec/ec_lcl.h16
-rw-r--r--crypto/ec/ec_mult.c14
-rw-r--r--crypto/ec/ecp_nistp224.c26
-rw-r--r--crypto/ec/ecp_nistp256.c68
-rw-r--r--crypto/ec/ecp_nistp521.c74
-rw-r--r--crypto/ec/ecp_smpl.c40
-rw-r--r--crypto/o_time.c48
-rw-r--r--crypto/pem/pem.h12
-rw-r--r--crypto/rc4/rc4_enc.c116
-rw-r--r--crypto/rsa/rsa_pss.c24
-rw-r--r--crypto/threads/mttest.c12
-rw-r--r--crypto/whrlpool/wp_dgst.c24
-rw-r--r--crypto/x509v3/v3_scts.c28
30 files changed, 582 insertions, 582 deletions
diff --git a/crypto/asn1/a_sign.c b/crypto/asn1/a_sign.c
index 2a8c3a336c..51c6a0c34d 100644
--- a/crypto/asn1/a_sign.c
+++ b/crypto/asn1/a_sign.c
@@ -252,13 +252,13 @@ int ASN1_item_sign_ctx(const ASN1_ITEM *it,
rv = pkey->ameth->item_sign(ctx, it, asn, algor1, algor2, signature);
if (rv == 1)
outl = signature->length;
- /*-
- * Return value meanings:
- * <=0: error.
- * 1: method does everything.
- * 2: carry on as normal.
- * 3: ASN1 method sets algorithm identifiers: just sign.
- */
+ /*-
+ * Return value meanings:
+ * <=0: error.
+ * 1: method does everything.
+ * 2: carry on as normal.
+ * 3: ASN1 method sets algorithm identifiers: just sign.
+ */
if (rv <= 0)
ASN1err(ASN1_F_ASN1_ITEM_SIGN_CTX, ERR_R_EVP_LIB);
if (rv <= 1)
diff --git a/crypto/bio/b_sock.c b/crypto/bio/b_sock.c
index ebdec48c64..ad33aa1f42 100644
--- a/crypto/bio/b_sock.c
+++ b/crypto/bio/b_sock.c
@@ -529,13 +529,13 @@ int BIO_socket_ioctl(int fd, long type, void *arg)
i = ioctlsocket(fd, type, (char *)arg);
# else
# if defined(OPENSSL_SYS_VMS)
- /*-
- * 2011-02-18 SMS.
- * VMS ioctl() can't tolerate a 64-bit "void *arg", but we
- * observe that all the consumers pass in an "unsigned long *",
- * so we arrange a local copy with a short pointer, and use
- * that, instead.
- */
+ /*-
+ * 2011-02-18 SMS.
+ * VMS ioctl() can't tolerate a 64-bit "void *arg", but we
+ * observe that all the consumers pass in an "unsigned long *",
+ * so we arrange a local copy with a short pointer, and use
+ * that, instead.
+ */
# if __INITIAL_POINTER_SIZE == 64
# define ARG arg_32p
# pragma pointer_size save
diff --git a/crypto/bio/bf_null.c b/crypto/bio/bf_null.c
index d9d0dc6bf4..e129dfe1e7 100644
--- a/crypto/bio/bf_null.c
+++ b/crypto/bio/bf_null.c
@@ -103,11 +103,11 @@ static int nullf_free(BIO *a)
{
if (a == NULL)
return (0);
- /*-
- a->ptr=NULL;
- a->init=0;
- a->flags=0;
- */
+ /*-
+ a->ptr=NULL;
+ a->init=0;
+ a->flags=0;
+ */
return (1);
}
diff --git a/crypto/bio/bio.h b/crypto/bio/bio.h
index 6ba1bd7334..2d3e9e7922 100644
--- a/crypto/bio/bio.h
+++ b/crypto/bio/bio.h
@@ -344,15 +344,15 @@ struct bio_st {
DECLARE_STACK_OF(BIO)
typedef struct bio_f_buffer_ctx_struct {
- /*-
- * Buffers are setup like this:
- *
- * <---------------------- size ----------------------->
- * +---------------------------------------------------+
- * | consumed | remaining | free space |
- * +---------------------------------------------------+
- * <-- off --><------- len ------->
- */
+ /*-
+ * Buffers are setup like this:
+ *
+ * <---------------------- size ----------------------->
+ * +---------------------------------------------------+
+ * | consumed | remaining | free space |
+ * +---------------------------------------------------+
+ * <-- off --><------- len ------->
+ */
/*- BIO *bio; *//*
* this is now in the BIO struct
*/
diff --git a/crypto/bn/bn_add.c b/crypto/bn/bn_add.c
index ccdcdd1d7c..f569a7efde 100644
--- a/crypto/bn/bn_add.c
+++ b/crypto/bn/bn_add.c
@@ -68,12 +68,12 @@ int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
bn_check_top(a);
bn_check_top(b);
- /*-
- * a + b a+b
- * a + -b a-b
- * -a + b b-a
- * -a + -b -(a+b)
- */
+ /*-
+ * a + b a+b
+ * a + -b a-b
+ * -a + b b-a
+ * -a + -b -(a+b)
+ */
if (a_neg ^ b->neg) {
/* only one is negative */
if (a_neg) {
@@ -260,12 +260,12 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
bn_check_top(a);
bn_check_top(b);
- /*-
- * a - b a-b
- * a - -b a+b
- * -a - b -(a+b)
- * -a - -b b-a
- */
+ /*-
+ * a - b a-b
+ * a - -b a+b
+ * -a - b -(a+b)
+ * -a - -b b-a
+ */
if (a->neg) {
if (b->neg) {
tmp = a;
diff --git a/crypto/bn/bn_exp.c b/crypto/bn/bn_exp.c
index eebcb96b55..28a9fd53bb 100644
--- a/crypto/bn/bn_exp.c
+++ b/crypto/bn/bn_exp.c
@@ -203,36 +203,36 @@ int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,
bn_check_top(p);
bn_check_top(m);
- /*-
- * For even modulus m = 2^k*m_odd, it might make sense to compute
- * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
- * exponentiation for the odd part), using appropriate exponent
- * reductions, and combine the results using the CRT.
- *
- * For now, we use Montgomery only if the modulus is odd; otherwise,
- * exponentiation using the reciprocal-based quick remaindering
- * algorithm is used.
- *
- * (Timing obtained with expspeed.c [computations a^p mod m
- * where a, p, m are of the same length: 256, 512, 1024, 2048,
- * 4096, 8192 bits], compared to the running time of the
- * standard algorithm:
- *
- * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
- * 55 .. 77 % [UltraSparc processor, but
- * debug-solaris-sparcv8-gcc conf.]
- *
- * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
- * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
- *
- * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
- * at 2048 and more bits, but at 512 and 1024 bits, it was
- * slower even than the standard algorithm!
- *
- * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
- * should be obtained when the new Montgomery reduction code
- * has been integrated into OpenSSL.)
- */
+ /*-
+ * For even modulus m = 2^k*m_odd, it might make sense to compute
+ * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
+ * exponentiation for the odd part), using appropriate exponent
+ * reductions, and combine the results using the CRT.
+ *
+ * For now, we use Montgomery only if the modulus is odd; otherwise,
+ * exponentiation using the reciprocal-based quick remaindering
+ * algorithm is used.
+ *
+ * (Timing obtained with expspeed.c [computations a^p mod m
+ * where a, p, m are of the same length: 256, 512, 1024, 2048,
+ * 4096, 8192 bits], compared to the running time of the
+ * standard algorithm:
+ *
+ * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
+ * 55 .. 77 % [UltraSparc processor, but
+ * debug-solaris-sparcv8-gcc conf.]
+ *
+ * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
+ * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
+ *
+ * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
+ * at 2048 and more bits, but at 512 and 1024 bits, it was
+ * slower even than the standard algorithm!
+ *
+ * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
+ * should be obtained when the new Montgomery reduction code
+ * has been integrated into OpenSSL.)
+ */
#define MONT_MUL_MOD
#define MONT_EXP_WORD
diff --git a/crypto/bn/bn_gcd.c b/crypto/bn/bn_gcd.c
index 13432d09e7..9902e4eee9 100644
--- a/crypto/bn/bn_gcd.c
+++ b/crypto/bn/bn_gcd.c
@@ -283,13 +283,13 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
goto err;
}
sign = -1;
- /*-
- * From B = a mod |n|, A = |n| it follows that
- *
- * 0 <= B < A,
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- */
+ /*-
+ * From B = a mod |n|, A = |n| it follows that
+ *
+ * 0 <= B < A,
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ */
if (BN_is_odd(n) && (BN_num_bits(n) <= (BN_BITS <= 32 ? 450 : 2048))) {
/*
@@ -301,12 +301,12 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
int shift;
while (!BN_is_zero(B)) {
- /*-
- * 0 < B < |n|,
- * 0 < A <= |n|,
- * (1) -sign*X*a == B (mod |n|),
- * (2) sign*Y*a == A (mod |n|)
- */
+ /*-
+ * 0 < B < |n|,
+ * 0 < A <= |n|,
+ * (1) -sign*X*a == B (mod |n|),
+ * (2) sign*Y*a == A (mod |n|)
+ */
/*
* Now divide B by the maximum possible power of two in the
@@ -352,18 +352,18 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
goto err;
}
- /*-
- * We still have (1) and (2).
- * Both A and B are odd.
- * The following computations ensure that
- *
- * 0 <= B < |n|,
- * 0 < A < |n|,
- * (1) -sign*X*a == B (mod |n|),
- * (2) sign*Y*a == A (mod |n|),
- *
- * and that either A or B is even in the next iteration.
- */
+ /*-
+ * We still have (1) and (2).
+ * Both A and B are odd.
+ * The following computations ensure that
+ *
+ * 0 <= B < |n|,
+ * 0 < A < |n|,
+ * (1) -sign*X*a == B (mod |n|),
+ * (2) sign*Y*a == A (mod |n|),
+ *
+ * and that either A or B is even in the next iteration.
+ */
if (BN_ucmp(B, A) >= 0) {
/* -sign*(X + Y)*a == B - A (mod |n|) */
if (!BN_uadd(X, X, Y))
@@ -392,11 +392,11 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
while (!BN_is_zero(B)) {
BIGNUM *tmp;
- /*-
- * 0 < B < A,
- * (*) -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|)
- */
+ /*-
+ * 0 < B < A,
+ * (*) -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|)
+ */
/* (D, M) := (A/B, A%B) ... */
if (BN_num_bits(A) == BN_num_bits(B)) {
@@ -443,12 +443,12 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
goto err;
}
- /*-
- * Now
- * A = D*B + M;
- * thus we have
- * (**) sign*Y*a == D*B + M (mod |n|).
- */
+ /*-
+ * Now
+ * A = D*B + M;
+ * thus we have
+ * (**) sign*Y*a == D*B + M (mod |n|).
+ */
tmp = A; /* keep the BIGNUM object, the value does not
* matter */
@@ -458,25 +458,25 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
B = M;
/* ... so we have 0 <= B < A again */
- /*-
- * Since the former M is now B and the former B is now A,
- * (**) translates into
- * sign*Y*a == D*A + B (mod |n|),
- * i.e.
- * sign*Y*a - D*A == B (mod |n|).
- * Similarly, (*) translates into
- * -sign*X*a == A (mod |n|).
- *
- * Thus,
- * sign*Y*a + D*sign*X*a == B (mod |n|),
- * i.e.
- * sign*(Y + D*X)*a == B (mod |n|).
- *
- * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- * Note that X and Y stay non-negative all the time.
- */
+ /*-
+ * Since the former M is now B and the former B is now A,
+ * (**) translates into
+ * sign*Y*a == D*A + B (mod |n|),
+ * i.e.
+ * sign*Y*a - D*A == B (mod |n|).
+ * Similarly, (*) translates into
+ * -sign*X*a == A (mod |n|).
+ *
+ * Thus,
+ * sign*Y*a + D*sign*X*a == B (mod |n|),
+ * i.e.
+ * sign*(Y + D*X)*a == B (mod |n|).
+ *
+ * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ * Note that X and Y stay non-negative all the time.
+ */
/*
* most of the time D is very small, so we can optimize tmp :=
@@ -513,13 +513,13 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
}
}
- /*-
- * The while loop (Euclid's algorithm) ends when
- * A == gcd(a,n);
- * we have
- * sign*Y*a == A (mod |n|),
- * where Y is non-negative.
- */
+ /*-
+ * The while loop (Euclid's algorithm) ends when
+ * A == gcd(a,n);
+ * we have
+ * sign*Y*a == A (mod |n|),
+ * where Y is non-negative.
+ */
if (sign < 0) {
if (!BN_sub(Y, n, Y))
@@ -604,22 +604,22 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
goto err;
}
sign = -1;
- /*-
- * From B = a mod |n|, A = |n| it follows that
- *
- * 0 <= B < A,
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- */
+ /*-
+ * From B = a mod |n|, A = |n| it follows that
+ *
+ * 0 <= B < A,
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ */
while (!BN_is_zero(B)) {
BIGNUM *tmp;
- /*-
- * 0 < B < A,
- * (*) -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|)
- */
+ /*-
+ * 0 < B < A,
+ * (*) -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|)
+ */
/*
* Turn BN_FLG_CONSTTIME flag on, so that when BN_div is invoked,
@@ -632,12 +632,12 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
if (!BN_div(D, M, pA, B, ctx))
goto err;
- /*-
- * Now
- * A = D*B + M;
- * thus we have
- * (**) sign*Y*a == D*B + M (mod |n|).
- */
+ /*-
+ * Now
+ * A = D*B + M;
+ * thus we have
+ * (**) sign*Y*a == D*B + M (mod |n|).
+ */
tmp = A; /* keep the BIGNUM object, the value does not
* matter */
@@ -647,25 +647,25 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
B = M;
/* ... so we have 0 <= B < A again */
- /*-
- * Since the former M is now B and the former B is now A,
- * (**) translates into
- * sign*Y*a == D*A + B (mod |n|),
- * i.e.
- * sign*Y*a - D*A == B (mod |n|).
- * Similarly, (*) translates into
- * -sign*X*a == A (mod |n|).
- *
- * Thus,
- * sign*Y*a + D*sign*X*a == B (mod |n|),
- * i.e.
- * sign*(Y + D*X)*a == B (mod |n|).
- *
- * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- * Note that X and Y stay non-negative all the time.
- */
+ /*-
+ * Since the former M is now B and the former B is now A,
+ * (**) translates into
+ * sign*Y*a == D*A + B (mod |n|),
+ * i.e.
+ * sign*Y*a - D*A == B (mod |n|).
+ * Similarly, (*) translates into
+ * -sign*X*a == A (mod |n|).
+ *
+ * Thus,
+ * sign*Y*a + D*sign*X*a == B (mod |n|),
+ * i.e.
+ * sign*(Y + D*X)*a == B (mod |n|).
+ *
+ * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ * Note that X and Y stay non-negative all the time.
+ */
if (!BN_mul(tmp, D, X, ctx))
goto err;
@@ -679,13 +679,13 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
sign = -sign;
}
- /*-
- * The while loop (Euclid's algorithm) ends when
- * A == gcd(a,n);
- * we have
- * sign*Y*a == A (mod |n|),
- * where Y is non-negative.
- */
+ /*-
+ * The while loop (Euclid's algorithm) ends when
+ * A == gcd(a,n);
+ * we have
+ * sign*Y*a == A (mod |n|),
+ * where Y is non-negative.
+ */
if (sign < 0) {
if (!BN_sub(Y, n, Y))
diff --git a/crypto/bn/bn_mul.c b/crypto/bn/bn_mul.c
index f681fa58b8..9b66e666b7 100644
--- a/crypto/bn/bn_mul.c
+++ b/crypto/bn/bn_mul.c
@@ -484,11 +484,11 @@ void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), n, dna, dnb, p);
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
@@ -499,12 +499,12 @@ void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- * c1 holds the carry bits
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ * c1 holds the carry bits
+ */
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -642,11 +642,11 @@ void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
}
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
@@ -657,12 +657,12 @@ void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- * c1 holds the carry bits
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ * c1 holds the carry bits
+ */
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -774,13 +774,13 @@ void bn_mul_high(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, BN_ULONG *l, int n2,
bn_mul_recursive(r, &(a[n]), &(b[n]), n, 0, 0, &(t[n2]));
}
- /*-
- * s0 == low(al*bl)
- * s1 == low(ah*bh)+low((al-ah)*(bh-bl))+low(al*bl)+high(al*bl)
- * We know s0 and s1 so the only unknown is high(al*bl)
- * high(al*bl) == s1 - low(ah*bh+s0+(al-ah)*(bh-bl))
- * high(al*bl) == s1 - (r[0]+l[0]+t[0])
- */
+ /*-
+ * s0 == low(al*bl)
+ * s1 == low(ah*bh)+low((al-ah)*(bh-bl))+low(al*bl)+high(al*bl)
+ * We know s0 and s1 so the only unknown is high(al*bl)
+ * high(al*bl) == s1 - low(ah*bh+s0+(al-ah)*(bh-bl))
+ * high(al*bl) == s1 - (r[0]+l[0]+t[0])
+ */
if (l != NULL) {
lp = &(t[n2 + n]);
c1 = (int)(bn_add_words(lp, &(r[0]), &(l[0]), n));
@@ -805,22 +805,22 @@ void bn_mul_high(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, BN_ULONG *l, int n2,
lp[i] = ((~mp[i]) + 1) & BN_MASK2;
}
- /*-
- * s[0] = low(al*bl)
- * t[3] = high(al*bl)
- * t[10] = (a[0]-a[1])*(b[1]-b[0]) neg is the sign
- * r[10] = (a[1]*b[1])
- */
- /*-
- * R[10] = al*bl
- * R[21] = al*bl + ah*bh + (a[0]-a[1])*(b[1]-b[0])
- * R[32] = ah*bh
- */
- /*-
- * R[1]=t[3]+l[0]+r[0](+-)t[0] (have carry/borrow)
- * R[2]=r[0]+t[3]+r[1](+-)t[1] (have carry/borrow)
- * R[3]=r[1]+(carry/borrow)
- */
+ /*-
+ * s[0] = low(al*bl)
+ * t[3] = high(al*bl)
+ * t[10] = (a[0]-a[1])*(b[1]-b[0]) neg is the sign
+ * r[10] = (a[1]*b[1])
+ */
+ /*-
+ * R[10] = al*bl
+ * R[21] = al*bl + ah*bh + (a[0]-a[1])*(b[1]-b[0])
+ * R[32] = ah*bh
+ */
+ /*-
+ * R[1]=t[3]+l[0]+r[0](+-)t[0] (have carry/borrow)
+ * R[2]=r[0]+t[3]+r[1](+-)t[1] (have carry/borrow)
+ * R[3]=r[1]+(carry/borrow)
+ */
if (l != NULL) {
lp = &(t[n2]);
c1 = (int)(bn_add_words(lp, &(t[n2 + n]), &(l[0]), n));
diff --git a/crypto/bn/bn_prime.c b/crypto/bn/bn_prime.c
index 39dc9b577c..b12295e84e 100644
--- a/crypto/bn/bn_prime.c
+++ b/crypto/bn/bn_prime.c
@@ -527,17 +527,17 @@ static int probable_prime(BIGNUM *rnd, int bits)
if (is_single_word) {
BN_ULONG rnd_word = BN_get_word(rnd);
- /*-
- * In the case that the candidate prime is a single word then
- * we check that:
- * 1) It's greater than primes[i] because we shouldn't reject
- * 3 as being a prime number because it's a multiple of
- * three.
- * 2) That it's not a multiple of a known prime. We don't
- * check that rnd-1 is also coprime to all the known
- * primes because there aren't many small primes where
- * that's true.
- */
+ /*-
+ * In the case that the candidate prime is a single word then
+ * we check that:
+ * 1) It's greater than primes[i] because we shouldn't reject
+ * 3 as being a prime number because it's a multiple of
+ * three.
+ * 2) That it's not a multiple of a known prime. We don't
+ * check that rnd-1 is also coprime to all the known
+ * primes because there aren't many small primes where
+ * that's true.
+ */
for (i = 1; i < NUMPRIMES && primes[i] < rnd_word; i++) {
if ((mods[i] + delta) % primes[i] == 0) {
delta += 2;
diff --git a/crypto/bn/bn_sqr.c b/crypto/bn/bn_sqr.c
index 30a7670ef0..f794c107bd 100644
--- a/crypto/bn/bn_sqr.c
+++ b/crypto/bn/bn_sqr.c
@@ -242,23 +242,23 @@ void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t)
bn_sqr_recursive(r, a, n, p);
bn_sqr_recursive(&(r[n2]), &(a[n]), n, p);
- /*-
- * t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
/* t[32] is negative */
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
- /*-
- * t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
- * r[10] holds (a[0]*a[0])
- * r[32] holds (a[1]*a[1])
- * c1 holds the carry bits
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
+ * r[10] holds (a[0]*a[0])
+ * r[32] holds (a[1]*a[1])
+ * c1 holds the carry bits
+ */
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
diff --git a/crypto/bn/bn_sqrt.c b/crypto/bn/bn_sqrt.c
index 772c8080bb..1b259f31c6 100644
--- a/crypto/bn/bn_sqrt.c
+++ b/crypto/bn/bn_sqrt.c
@@ -132,14 +132,14 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
/* we'll set q later (if needed) */
if (e == 1) {
- /*-
- * The easy case: (|p|-1)/2 is odd, so 2 has an inverse
- * modulo (|p|-1)/2, and square roots can be computed
- * directly by modular exponentiation.
- * We have
- * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
- * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
- */
+ /*-
+ * The easy case: (|p|-1)/2 is odd, so 2 has an inverse
+ * modulo (|p|-1)/2, and square roots can be computed
+ * directly by modular exponentiation.
+ * We have
+ * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
+ * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
+ */
if (!BN_rshift(q, p, 2))
goto end;
q->neg = 0;
@@ -277,24 +277,24 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
goto end;
}
- /*-
- * Now we know that (if p is indeed prime) there is an integer
- * k, 0 <= k < 2^e, such that
- *
- * a^q * y^k == 1 (mod p).
- *
- * As a^q is a square and y is not, k must be even.
- * q+1 is even, too, so there is an element
- *
- * X := a^((q+1)/2) * y^(k/2),
- *
- * and it satisfies
- *
- * X^2 = a^q * a * y^k
- * = a,
- *
- * so it is the square root that we are looking for.
- */
+ /*-
+ * Now we know that (if p is indeed prime) there is an integer
+ * k, 0 <= k < 2^e, such that
+ *
+ * a^q * y^k == 1 (mod p).
+ *
+ * As a^q is a square and y is not, k must be even.
+ * q+1 is even, too, so there is an element
+ *
+ * X := a^((q+1)/2) * y^(k/2),
+ *
+ * and it satisfies
+ *
+ * X^2 = a^q * a * y^k
+ * = a,
+ *
+ * so it is the square root that we are looking for.
+ */
/* t := (q-1)/2 (note that q is odd) */
if (!BN_rshift1(t, q))
@@ -333,15 +333,15 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
goto end;
while (1) {
- /*-
- * Now b is a^q * y^k for some even k (0 <= k < 2^E
- * where E refers to the original value of e, which we
- * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
- *
- * We have a*b = x^2,
- * y^2^(e-1) = -1,
- * b^2^(e-1) = 1.
- */
+ /*-
+ * Now b is a^q * y^k for some even k (0 <= k < 2^E
+ * where E refers to the original value of e, which we
+ * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
+ *
+ * We have a*b = x^2,
+ * y^2^(e-1) = -1,
+ * b^2^(e-1) = 1.
+ */
if (BN_is_one(b)) {
if (!BN_copy(ret, x))
diff --git a/crypto/conf/conf_def.c b/crypto/conf/conf_def.c
index cdded0d6b5..5e226705ed 100644
--- a/crypto/conf/conf_def.c
+++ b/crypto/conf/conf_def.c
@@ -567,16 +567,16 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from)
}
e++;
}
- /*-
- * So at this point we have
- * np which is the start of the name string which is
- * '\0' terminated.
- * cp which is the start of the section string which is
- * '\0' terminated.
- * e is the 'next point after'.
- * r and rr are the chars replaced by the '\0'
- * rp and rrp is where 'r' and 'rr' came from.
- */
+ /*-
+ * So at this point we have
+ * np which is the start of the name string which is
+ * '\0' terminated.
+ * cp which is the start of the section string which is
+ * '\0' terminated.
+ * e is the 'next point after'.
+ * r and rr are the chars replaced by the '\0'
+ * rp and rrp is where 'r' and 'rr' came from.
+ */
p = _CONF_get_string(conf, cp, np);
if (rrp != NULL)
*rrp = rr;
diff --git a/crypto/des/destest.c b/crypto/des/destest.c
index 0e993a35f4..5b7dbc2f44 100644
--- a/crypto/des/destest.c
+++ b/crypto/des/destest.c
@@ -397,13 +397,13 @@ int main(int argc, char *argv[])
DES_ENCRYPT);
DES_ede3_cbcm_encrypt(&cbc_data[16], &cbc_out[16], i - 16, &ks, &ks2,
&ks3, &iv3, &iv2, DES_ENCRYPT);
- /*- if (memcmp(cbc_out,cbc3_ok,
- (unsigned int)(strlen((char *)cbc_data)+1+7)/8*8) != 0)
- {
- printf("des_ede3_cbc_encrypt encrypt error\n");
- err=1;
- }
- */
+/*- if (memcmp(cbc_out,cbc3_ok,
+ (unsigned int)(strlen((char *)cbc_data)+1+7)/8*8) != 0)
+ {
+ printf("des_ede3_cbc_encrypt encrypt error\n");
+ err=1;
+ }
+*/
memcpy(iv3, cbc_iv, sizeof(cbc_iv));
memset(iv2, '\0', sizeof iv2);
DES_ede3_cbcm_encrypt(cbc_out, cbc_in, i, &ks, &ks2, &ks3, &iv3, &iv2,
diff --git a/crypto/dsa/dsa_ameth.c b/crypto/dsa/dsa_ameth.c
index 52271215bd..1569a33164 100644
--- a/crypto/dsa/dsa_ameth.c
+++ b/crypto/dsa/dsa_ameth.c
@@ -200,11 +200,11 @@ static int dsa_priv_decode(EVP_PKEY *pkey, PKCS8_PRIV_KEY_INFO *p8)
goto decerr;
if (sk_ASN1_TYPE_num(ndsa) != 2)
goto decerr;
- /*-
- * Handle Two broken types:
- * SEQUENCE {parameters, priv_key}
- * SEQUENCE {pub_key, priv_key}
- */
+ /*-
+ * Handle Two broken types:
+ * SEQUENCE {parameters, priv_key}
+ * SEQUENCE {pub_key, priv_key}
+ */
t1 = sk_ASN1_TYPE_value(ndsa, 0);
t2 = sk_ASN1_TYPE_value(ndsa, 1);
diff --git a/crypto/dso/dso_vms.c b/crypto/dso/dso_vms.c
index 511858a681..14d885df15 100644
--- a/crypto/dso/dso_vms.c
+++ b/crypto/dso/dso_vms.c
@@ -178,23 +178,23 @@ static int vms_load(DSO *dso)
goto err;
}
- /*-
- * A file specification may look like this:
- *
- * node::dev:[dir-spec]name.type;ver
- *
- * or (for compatibility with TOPS-20):
- *
- * node::dev:<dir-spec>name.type;ver
- *
- * and the dir-spec uses '.' as separator. Also, a dir-spec
- * may consist of several parts, with mixed use of [] and <>:
- *
- * [dir1.]<dir2>
- *
- * We need to split the file specification into the name and
- * the rest (both before and after the name itself).
- */
+ /*-
+ * A file specification may look like this:
+ *
+ * node::dev:[dir-spec]name.type;ver
+ *
+ * or (for compatibility with TOPS-20):
+ *
+ * node::dev:<dir-spec>name.type;ver
+ *
+ * and the dir-spec uses '.' as separator. Also, a dir-spec
+ * may consist of several parts, with mixed use of [] and <>:
+ *
+ * [dir1.]<dir2>
+ *
+ * We need to split the file specification into the name and
+ * the rest (both before and after the name itself).
+ */
/*
* Start with trying to find the end of a dir-spec, and save the position
* of the byte after in sp1
diff --git a/crypto/ec/ec.h b/crypto/ec/ec.h
index 4c955ac3d3..a3d50e73d7 100644
--- a/crypto/ec/ec.h
+++ b/crypto/ec/ec.h
@@ -116,14 +116,14 @@ typedef enum {
typedef struct ec_method_st EC_METHOD;
typedef struct ec_group_st
- /*-
- EC_METHOD *meth;
- -- field definition
- -- curve coefficients
- -- optional generator with associated information (order, cofactor)
- -- optional extra data (precomputed table for fast computation of multiples of generator)
- -- ASN1 stuff
- */
+ /*-
+ EC_METHOD *meth;
+ -- field definition
+ -- curve coefficients
+ -- optional generator with associated information (order, cofactor)
+ -- optional extra data (precomputed table for fast computation of multiples of generator)
+ -- ASN1 stuff
+ */
EC_GROUP;
typedef struct ec_point_st EC_POINT;
diff --git a/crypto/ec/ec2_smpl.c b/crypto/ec/ec2_smpl.c
index 358b81609d..c1fb63d94d 100644
--- a/crypto/ec/ec2_smpl.c
+++ b/crypto/ec/ec2_smpl.c
@@ -639,12 +639,12 @@ int ec_GF2m_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point,
if (lh == NULL)
goto err;
- /*-
- * We have a curve defined by a Weierstrass equation
- * y^2 + x*y = x^3 + a*x^2 + b.
- * <=> x^3 + a*x^2 + x*y + b + y^2 = 0
- * <=> ((x + a) * x + y ) * x + b + y^2 = 0
- */
+ /*-
+ * We have a curve defined by a Weierstrass equation
+ * y^2 + x*y = x^3 + a*x^2 + b.
+ * <=> x^3 + a*x^2 + x*y + b + y^2 = 0
+ * <=> ((x + a) * x + y ) * x + b + y^2 = 0
+ */
if (!BN_GF2m_add(lh, point->X, group->a))
goto err;
if (!field_mul(group, lh, lh, point->X, ctx))
diff --git a/crypto/ec/ec_lcl.h b/crypto/ec/ec_lcl.h
index cc551ab37d..9db7106c5a 100644
--- a/crypto/ec/ec_lcl.h
+++ b/crypto/ec/ec_lcl.h
@@ -120,14 +120,14 @@ struct ec_method_st {
void (*point_finish) (EC_POINT *);
void (*point_clear_finish) (EC_POINT *);
int (*point_copy) (EC_POINT *, const EC_POINT *);
- /*-
- * used by EC_POINT_set_to_infinity,
- * EC_POINT_set_Jprojective_coordinates_GFp,
- * EC_POINT_get_Jprojective_coordinates_GFp,
- * EC_POINT_set_affine_coordinates_GFp, ..._GF2m,
- * EC_POINT_get_affine_coordinates_GFp, ..._GF2m,
- * EC_POINT_set_compressed_coordinates_GFp, ..._GF2m:
- */
+ /*-
+ * used by EC_POINT_set_to_infinity,
+ * EC_POINT_set_Jprojective_coordinates_GFp,
+ * EC_POINT_get_Jprojective_coordinates_GFp,
+ * EC_POINT_set_affine_coordinates_GFp, ..._GF2m,
+ * EC_POINT_get_affine_coordinates_GFp, ..._GF2m,
+ * EC_POINT_set_compressed_coordinates_GFp, ..._GF2m:
+ */
int (*point_set_to_infinity) (const EC_GROUP *, EC_POINT *);
int (*point_set_Jprojective_coordinates_GFp) (const EC_GROUP *,
EC_POINT *, const BIGNUM *x,
diff --git a/crypto/ec/ec_mult.c b/crypto/ec/ec_mult.c
index 7bfc01b6c2..fe87c418aa 100644
--- a/crypto/ec/ec_mult.c
+++ b/crypto/ec/ec_mult.c
@@ -469,13 +469,13 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
if (!(tmp = EC_POINT_new(group)))
goto err;
- /*-
- * prepare precomputed values:
- * val_sub[i][0] := points[i]
- * val_sub[i][1] := 3 * points[i]
- * val_sub[i][2] := 5 * points[i]
- * ...
- */
+ /*-
+ * prepare precomputed values:
+ * val_sub[i][0] := points[i]
+ * val_sub[i][1] := 3 * points[i]
+ * val_sub[i][2] := 5 * points[i]
+ * ...
+ */
for (i = 0; i < num + num_scalar; i++) {
if (i < num) {
if (!EC_POINT_copy(val_sub[i][0], points[i]))
diff --git a/crypto/ec/ecp_nistp224.c b/crypto/ec/ecp_nistp224.c
index de7a25b3f0..ffb50d848c 100644
--- a/crypto/ec/ecp_nistp224.c
+++ b/crypto/ec/ecp_nistp224.c
@@ -613,11 +613,11 @@ static void felem_reduce(felem out, const widefelem in)
/* output[3] <= 2^56 + 2^16 */
out[2] = output[2] & 0x00ffffffffffffff;
- /*-
- * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
- * out[3] <= 2^56 + 2^16 (due to final carry),
- * so out < 2*p
- */
+ /*-
+ * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
+ * out[3] <= 2^56 + 2^16 (due to final carry),
+ * so out < 2*p
+ */
out[3] = output[3];
}
@@ -1043,10 +1043,10 @@ static void point_add(felem x3, felem y3, felem z3,
felem_scalar(ftmp5, 2);
/* ftmp5[i] < 2 * 2^57 = 2^58 */
- /*-
- * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
- * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
- */
+ /*-
+ * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
+ * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
+ */
felem_diff_128_64(tmp2, ftmp5);
/* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
felem_reduce(x_out, tmp2);
@@ -1061,10 +1061,10 @@ static void point_add(felem x3, felem y3, felem z3,
felem_mul(tmp2, ftmp3, ftmp2);
/* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
- /*-
- * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
- * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
- */
+ /*-
+ * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
+ * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
+ */
widefelem_diff(tmp2, tmp);
/* tmp2[i] < 2^118 + 2^120 < 2^121 */
felem_reduce(y_out, tmp2);
diff --git a/crypto/ec/ecp_nistp256.c b/crypto/ec/ecp_nistp256.c
index 5a21a3c13d..51ac99c85b 100644
--- a/crypto/ec/ecp_nistp256.c
+++ b/crypto/ec/ecp_nistp256.c
@@ -432,25 +432,25 @@ static void felem_shrink(smallfelem out, const felem in)
/* As tmp[3] < 2^65, high is either 1 or 0 */
high <<= 63;
high >>= 63;
- /*-
- * high is:
- * all ones if the high word of tmp[3] is 1
- * all zeros if the high word of tmp[3] if 0 */
+ /*-
+ * high is:
+ * all ones if the high word of tmp[3] is 1
+ * all zeros if the high word of tmp[3] if 0 */
low = tmp[3];
mask = low >> 63;
- /*-
- * mask is:
- * all ones if the MSB of low is 1
- * all zeros if the MSB of low if 0 */
+ /*-
+ * mask is:
+ * all ones if the MSB of low is 1
+ * all zeros if the MSB of low if 0 */
low &= bottom63bits;
low -= kPrime3Test;
/* if low was greater than kPrime3Test then the MSB is zero */
low = ~low;
low >>= 63;
- /*-
- * low is:
- * all ones if low was > kPrime3Test
- * all zeros if low was <= kPrime3Test */
+ /*-
+ * low is:
+ * all ones if low was > kPrime3Test
+ * all zeros if low was <= kPrime3Test */
mask = (mask & low) | high;
tmp[0] -= mask & kPrime[0];
tmp[1] -= mask & kPrime[1];
@@ -790,17 +790,17 @@ static void felem_reduce(felem out, const longfelem in)
felem_reduce_(out, in);
- /*-
- * out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
- * out[1] > 2^100 - 2^64 - 7*2^96 > 0
- * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
- * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
- *
- * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
- * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
- * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
- * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
- */
+ /*-
+ * out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
+ * out[1] > 2^100 - 2^64 - 7*2^96 > 0
+ * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
+ * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
+ *
+ * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
+ * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
+ * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
+ * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
+ */
}
/*-
@@ -819,17 +819,17 @@ static void felem_reduce_zero105(felem out, const longfelem in)
felem_reduce_(out, in);
- /*-
- * out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
- * out[1] > 2^105 - 2^71 - 2^103 > 0
- * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
- * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
- *
- * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
- * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
- */
+ /*-
+ * out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
+ * out[1] > 2^105 - 2^71 - 2^103 > 0
+ * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
+ * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
+ *
+ * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
+ * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
+ */
}
/*
diff --git a/crypto/ec/ecp_nistp521.c b/crypto/ec/ecp_nistp521.c
index c1ef3fedac..fa6766ec30 100644
--- a/crypto/ec/ecp_nistp521.c
+++ b/crypto/ec/ecp_nistp521.c
@@ -414,15 +414,15 @@ static void felem_square(largefelem out, const felem in)
felem_scalar(inx2, in, 2);
felem_scalar(inx4, in, 4);
- /*-
- * We have many cases were we want to do
- * in[x] * in[y] +
- * in[y] * in[x]
- * This is obviously just
- * 2 * in[x] * in[y]
- * However, rather than do the doubling on the 128 bit result, we
- * double one of the inputs to the multiplication by reading from
- * |inx2| */
+ /*-
+ * We have many cases were we want to do
+ * in[x] * in[y] +
+ * in[y] * in[x]
+ * This is obviously just
+ * 2 * in[x] * in[y]
+ * However, rather than do the doubling on the 128 bit result, we
+ * double one of the inputs to the multiplication by reading from
+ * |inx2| */
out[0] = ((uint128_t) in[0]) * in[0];
out[1] = ((uint128_t) in[0]) * inx2[1];
@@ -1055,13 +1055,13 @@ point_double(felem x_out, felem y_out, felem z_out,
felem_scalar64(ftmp2, 3);
/* ftmp2[i] < 3*2^60 + 3*2^15 */
felem_mul(tmp, ftmp, ftmp2);
- /*-
- * tmp[i] < 17(3*2^121 + 3*2^76)
- * = 61*2^121 + 61*2^76
- * < 64*2^121 + 64*2^76
- * = 2^127 + 2^82
- * < 2^128
- */
+ /*-
+ * tmp[i] < 17(3*2^121 + 3*2^76)
+ * = 61*2^121 + 61*2^76
+ * < 64*2^121 + 64*2^76
+ * = 2^127 + 2^82
+ * < 2^128
+ */
felem_reduce(alpha, tmp);
/* x' = alpha^2 - 8*beta */
@@ -1096,30 +1096,30 @@ point_double(felem x_out, felem y_out, felem z_out,
felem_diff64(beta, x_out);
/* beta[i] < 2^61 + 2^60 + 2^16 */
felem_mul(tmp, alpha, beta);
- /*-
- * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
- * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
- * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
- * < 2^128
- */
+ /*-
+ * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
+ * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
+ * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
+ * < 2^128
+ */
felem_square(tmp2, gamma);
- /*-
- * tmp2[i] < 17*(2^59 + 2^14)^2
- * = 17*(2^118 + 2^74 + 2^28)
- */
+ /*-
+ * tmp2[i] < 17*(2^59 + 2^14)^2
+ * = 17*(2^118 + 2^74 + 2^28)
+ */
felem_scalar128(tmp2, 8);
- /*-
- * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
- * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
- * < 2^126
- */
+ /*-
+ * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
+ * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
+ * < 2^126
+ */
felem_diff128(tmp, tmp2);
- /*-
- * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
- * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
- * 2^74 + 2^69 + 2^34 + 2^30
- * < 2^128
- */
+ /*-
+ * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
+ * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
+ * 2^74 + 2^69 + 2^34 + 2^30
+ * < 2^128
+ */
felem_reduce(y_out, tmp);
}
diff --git a/crypto/ec/ecp_smpl.c b/crypto/ec/ecp_smpl.c
index 52b3e35972..34ae6d5ff5 100644
--- a/crypto/ec/ecp_smpl.c
+++ b/crypto/ec/ecp_smpl.c
@@ -320,11 +320,11 @@ int ec_GFp_simple_group_check_discriminant(const EC_GROUP *group, BN_CTX *ctx)
goto err;
}
- /*-
- * check the discriminant:
- * y^2 = x^3 + a*x + b is an elliptic curve <=> 4*a^3 + 27*b^2 != 0 (mod p)
- * 0 =< a, b < p
- */
+ /*-
+ * check the discriminant:
+ * y^2 = x^3 + a*x + b is an elliptic curve <=> 4*a^3 + 27*b^2 != 0 (mod p)
+ * 0 =< a, b < p
+ */
if (BN_is_zero(a)) {
if (BN_is_zero(b))
goto err;
@@ -1033,15 +1033,15 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point,
if (Z6 == NULL)
goto err;
- /*-
- * We have a curve defined by a Weierstrass equation
- * y^2 = x^3 + a*x + b.
- * The point to consider is given in Jacobian projective coordinates
- * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3).
- * Substituting this and multiplying by Z^6 transforms the above equation into
- * Y^2 = X^3 + a*X*Z^4 + b*Z^6.
- * To test this, we add up the right-hand side in 'rh'.
- */
+ /*-
+ * We have a curve defined by a Weierstrass equation
+ * y^2 = x^3 + a*x + b.
+ * The point to consider is given in Jacobian projective coordinates
+ * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3).
+ * Substituting this and multiplying by Z^6 transforms the above equation into
+ * Y^2 = X^3 + a*X*Z^4 + b*Z^6.
+ * To test this, we add up the right-hand side in 'rh'.
+ */
/* rh := X^2 */
if (!field_sqr(group, rh, point->X, ctx))
@@ -1151,12 +1151,12 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a,
if (Zb23 == NULL)
goto end;
- /*-
- * We have to decide whether
- * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3),
- * or equivalently, whether
- * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3).
- */
+ /*-
+ * We have to decide whether
+ * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3),
+ * or equivalently, whether
+ * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3).
+ */
if (!b->Z_is_one) {
if (!field_sqr(group, Zb23, b->Z, ctx))
diff --git a/crypto/o_time.c b/crypto/o_time.c
index 4317e6cd87..4e3dff3cbe 100644
--- a/crypto/o_time.c
+++ b/crypto/o_time.c
@@ -159,30 +159,30 @@ struct tm *OPENSSL_gmtime(const time_t *timer, struct tm *result)
* do it the hard way.
*/
{
- /*-
- * The VMS epoch is the astronomical Smithsonian date,
- if I remember correctly, which is November 17, 1858.
- Furthermore, time is measure in thenths of microseconds
- and stored in quadwords (64 bit integers). unix_epoch
- below is January 1st 1970 expressed as a VMS time. The
- following code was used to get this number:
-
- #include <stdio.h>
- #include <stdlib.h>
- #include <lib$routines.h>
- #include <starlet.h>
-
- main()
- {
- unsigned long systime[2];
- unsigned short epoch_values[7] =
- { 1970, 1, 1, 0, 0, 0, 0 };
-
- lib$cvt_vectim(epoch_values, systime);
-
- printf("%u %u", systime[0], systime[1]);
- }
- */
+ /*-
+ * The VMS epoch is the astronomical Smithsonian date,
+ if I remember correctly, which is November 17, 1858.
+ Furthermore, time is measure in thenths of microseconds
+ and stored in quadwords (64 bit integers). unix_epoch
+ below is January 1st 1970 expressed as a VMS time. The
+ following code was used to get this number:
+
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <lib$routines.h>
+ #include <starlet.h>
+
+ main()
+ {
+ unsigned long systime[2];
+ unsigned short epoch_values[7] =
+ { 1970, 1, 1, 0, 0, 0, 0 };
+
+ lib$cvt_vectim(epoch_values, systime);
+
+ printf("%u %u", systime[0], systime[1]);
+ }
+ */
unsigned long unix_epoch[2] = { 1273708544, 8164711 };
unsigned long deltatime[2];
unsigned long systime[2];
diff --git a/crypto/pem/pem.h b/crypto/pem/pem.h
index daf989f573..fa042683e5 100644
--- a/crypto/pem/pem.h
+++ b/crypto/pem/pem.h
@@ -183,9 +183,9 @@ typedef struct pem_ctx_st {
int num_recipient;
PEM_USER **recipient;
- /*-
- XXX(ben): don#t think this is used!
- STACK *x509_chain; / * certificate chain */
+/*-
+ XXX(ben): don#t think this is used!
+ STACK *x509_chain; / * certificate chain */
EVP_MD *md; /* signature type */
int md_enc; /* is the md encrypted or not? */
@@ -195,9 +195,9 @@ typedef struct pem_ctx_st {
EVP_CIPHER *dec; /* date encryption cipher */
int key_len; /* key length */
unsigned char *key; /* key */
- /*-
- unused, and wrong size
- unsigned char iv[8]; */
+ /*-
+ unused, and wrong size
+ unsigned char iv[8]; */
int data_enc; /* is the data encrypted */
int data_len;
diff --git a/crypto/rc4/rc4_enc.c b/crypto/rc4/rc4_enc.c
index 19ddd2358d..0f0a2487a7 100644
--- a/crypto/rc4/rc4_enc.c
+++ b/crypto/rc4/rc4_enc.c
@@ -80,36 +80,36 @@ void RC4(RC4_KEY *key, size_t len, const unsigned char *indata,
d = key->data;
#if defined(RC4_CHUNK) && !defined(PEDANTIC)
- /*-
- * The original reason for implementing this(*) was the fact that
- * pre-21164a Alpha CPUs don't have byte load/store instructions
- * and e.g. a byte store has to be done with 64-bit load, shift,
- * and, or and finally 64-bit store. Peaking data and operating
- * at natural word size made it possible to reduce amount of
- * instructions as well as to perform early read-ahead without
- * suffering from RAW (read-after-write) hazard. This resulted
- * in ~40%(**) performance improvement on 21064 box with gcc.
- * But it's not only Alpha users who win here:-) Thanks to the
- * early-n-wide read-ahead this implementation also exhibits
- * >40% speed-up on SPARC and 20-30% on 64-bit MIPS (depending
- * on sizeof(RC4_INT)).
- *
- * (*) "this" means code which recognizes the case when input
- * and output pointers appear to be aligned at natural CPU
- * word boundary
- * (**) i.e. according to 'apps/openssl speed rc4' benchmark,
- * crypto/rc4/rc4speed.c exhibits almost 70% speed-up...
- *
- * Cavets.
- *
- * - RC4_CHUNK="unsigned long long" should be a #1 choice for
- * UltraSPARC. Unfortunately gcc generates very slow code
- * (2.5-3 times slower than one generated by Sun's WorkShop
- * C) and therefore gcc (at least 2.95 and earlier) should
- * always be told that RC4_CHUNK="unsigned long".
- *
- * <appro@fy.chalmers.se>
- */
+ /*-
+ * The original reason for implementing this(*) was the fact that
+ * pre-21164a Alpha CPUs don't have byte load/store instructions
+ * and e.g. a byte store has to be done with 64-bit load, shift,
+ * and, or and finally 64-bit store. Peaking data and operating
+ * at natural word size made it possible to reduce amount of
+ * instructions as well as to perform early read-ahead without
+ * suffering from RAW (read-after-write) hazard. This resulted
+ * in ~40%(**) performance improvement on 21064 box with gcc.
+ * But it's not only Alpha users who win here:-) Thanks to the
+ * early-n-wide read-ahead this implementation also exhibits
+ * >40% speed-up on SPARC and 20-30% on 64-bit MIPS (depending
+ * on sizeof(RC4_INT)).
+ *
+ * (*) "this" means code which recognizes the case when input
+ * and output pointers appear to be aligned at natural CPU
+ * word boundary
+ * (**) i.e. according to 'apps/openssl speed rc4' benchmark,
+ * crypto/rc4/rc4speed.c exhibits almost 70% speed-up...
+ *
+ * Cavets.
+ *
+ * - RC4_CHUNK="unsigned long long" should be a #1 choice for
+ * UltraSPARC. Unfortunately gcc generates very slow code
+ * (2.5-3 times slower than one generated by Sun's WorkShop
+ * C) and therefore gcc (at least 2.95 and earlier) should
+ * always be told that RC4_CHUNK="unsigned long".
+ *
+ * <appro@fy.chalmers.se>
+ */
# define RC4_STEP ( \
x=(x+1) &0xff, \
@@ -131,34 +131,34 @@ void RC4(RC4_KEY *key, size_t len, const unsigned char *indata,
1
};
- /*-
- * I reckon we can afford to implement both endian
- * cases and to decide which way to take at run-time
- * because the machine code appears to be very compact
- * and redundant 1-2KB is perfectly tolerable (i.e.
- * in case the compiler fails to eliminate it:-). By
- * suggestion from Terrel Larson <terr@terralogic.net>
- * who also stands for the is_endian union:-)
- *
- * Special notes.
- *
- * - is_endian is declared automatic as doing otherwise
- * (declaring static) prevents gcc from eliminating
- * the redundant code;
- * - compilers (those I've tried) don't seem to have
- * problems eliminating either the operators guarded
- * by "if (sizeof(RC4_CHUNK)==8)" or the condition
- * expressions themselves so I've got 'em to replace
- * corresponding #ifdefs from the previous version;
- * - I chose to let the redundant switch cases when
- * sizeof(RC4_CHUNK)!=8 be (were also #ifdefed
- * before);
- * - in case you wonder "&(sizeof(RC4_CHUNK)*8-1)" in
- * [LB]ESHFT guards against "shift is out of range"
- * warnings when sizeof(RC4_CHUNK)!=8
- *
- * <appro@fy.chalmers.se>
- */
+ /*-
+ * I reckon we can afford to implement both endian
+ * cases and to decide which way to take at run-time
+ * because the machine code appears to be very compact
+ * and redundant 1-2KB is perfectly tolerable (i.e.
+ * in case the compiler fails to eliminate it:-). By
+ * suggestion from Terrel Larson <terr@terralogic.net>
+ * who also stands for the is_endian union:-)
+ *
+ * Special notes.
+ *
+ * - is_endian is declared automatic as doing otherwise
+ * (declaring static) prevents gcc from eliminating
+ * the redundant code;
+ * - compilers (those I've tried) don't seem to have
+ * problems eliminating either the operators guarded
+ * by "if (sizeof(RC4_CHUNK)==8)" or the condition
+ * expressions themselves so I've got 'em to replace
+ * corresponding #ifdefs from the previous version;
+ * - I chose to let the redundant switch cases when
+ * sizeof(RC4_CHUNK)!=8 be (were also #ifdefed
+ * before);
+ * - in case you wonder "&(sizeof(RC4_CHUNK)*8-1)" in
+ * [LB]ESHFT guards against "shift is out of range"
+ * warnings when sizeof(RC4_CHUNK)!=8
+ *
+ * <appro@fy.chalmers.se>
+ */
if (!is_endian.little) { /* BIG-ENDIAN CASE */
# define BESHFT(c) (((sizeof(RC4_CHUNK)-(c)-1)*8)&(sizeof(RC4_CHUNK)*8-1))
for (; len & (0 - sizeof(RC4_CHUNK)); len -= sizeof(RC4_CHUNK)) {
diff --git a/crypto/rsa/rsa_pss.c b/crypto/rsa/rsa_pss.c
index dd3a0e70a8..318f5b8264 100644
--- a/crypto/rsa/rsa_pss.c
+++ b/crypto/rsa/rsa_pss.c
@@ -98,12 +98,12 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const unsigned char *mHash,
hLen = M_EVP_MD_size(Hash);
if (hLen < 0)
goto err;
- /*-
- * Negative sLen has special meanings:
- * -1 sLen == hLen
- * -2 salt length is autorecovered from signature
- * -N reserved
- */
+ /*-
+ * Negative sLen has special meanings:
+ * -1 sLen == hLen
+ * -2 salt length is autorecovered from signature
+ * -N reserved
+ */
if (sLen == -1)
sLen = hLen;
else if (sLen == -2)
@@ -202,12 +202,12 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM,
hLen = M_EVP_MD_size(Hash);
if (hLen < 0)
goto err;
- /*-
- * Negative sLen has special meanings:
- * -1 sLen == hLen
- * -2 salt length is maximized
- * -N reserved
- */
+ /*-
+ * Negative sLen has special meanings:
+ * -1 sLen == hLen
+ * -2 salt length is maximized
+ * -N reserved
+ */
if (sLen == -1)
sLen = hLen;
else if (sLen == -2)
diff --git a/crypto/threads/mttest.c b/crypto/threads/mttest.c
index 6f3a13cb33..eea89d5994 100644
--- a/crypto/threads/mttest.c
+++ b/crypto/threads/mttest.c
@@ -798,12 +798,12 @@ void solaris_locking_callback(int mode, int type, char *file, int line)
(type & CRYPTO_READ) ? "r" : "w", file, line);
# endif
- /*-
- if (CRYPTO_LOCK_SSL_CERT == type)
- fprintf(stderr,"(t,m,f,l) %ld %d %s %d\n",
- CRYPTO_thread_id(),
- mode,file,line);
- */
+ /*-
+ if (CRYPTO_LOCK_SSL_CERT == type)
+ fprintf(stderr,"(t,m,f,l) %ld %d %s %d\n",
+ CRYPTO_thread_id(),
+ mode,file,line);
+ */
if (mode & CRYPTO_LOCK) {
/*-
if (mode & CRYPTO_READ)
diff --git a/crypto/whrlpool/wp_dgst.c b/crypto/whrlpool/wp_dgst.c
index bf27c315bf..bb99799a71 100644
--- a/crypto/whrlpool/wp_dgst.c
+++ b/crypto/whrlpool/wp_dgst.c
@@ -131,18 +131,18 @@ void WHIRLPOOL_BitUpdate(WHIRLPOOL_CTX *c, const void *_inp, size_t bits)
} else /* bit-oriented loop */
#endif
{
- /*-
- inp
- |
- +-------+-------+-------
- |||||||||||||||||||||
- +-------+-------+-------
- +-------+-------+-------+-------+-------
- |||||||||||||| c->data
- +-------+-------+-------+-------+-------
- |
- c->bitoff/8
- */
+ /*-
+ inp
+ |
+ +-------+-------+-------
+ |||||||||||||||||||||
+ +-------+-------+-------
+ +-------+-------+-------+-------+-------
+ |||||||||||||| c->data
+ +-------+-------+-------+-------+-------
+ |
+ c->bitoff/8
+ */
while (bits) {
unsigned int byteoff = bitoff / 8;
unsigned char b;
diff --git a/crypto/x509v3/v3_scts.c b/crypto/x509v3/v3_scts.c
index 858f719147..9a4c3eba0b 100644
--- a/crypto/x509v3/v3_scts.c
+++ b/crypto/x509v3/v3_scts.c
@@ -222,14 +222,14 @@ static STACK_OF(SCT) *d2i_SCT_LIST(STACK_OF(SCT) **a,
sct->version = *p2++;
if (sct->version == 0) { /* SCT v1 */
- /*-
- * Fixed-length header:
- * struct {
- * (1 byte) Version sct_version;
- * (32 bytes) LogID id;
- * (8 bytes) uint64 timestamp;
- * (2 bytes + ?) CtExtensions extensions;
- */
+ /*-
+ * Fixed-length header:
+ * struct {
+ * (1 byte) Version sct_version;
+ * (32 bytes) LogID id;
+ * (8 bytes) uint64 timestamp;
+ * (2 bytes + ?) CtExtensions extensions;
+ */
if (sctlen < 43)
goto err;
sctlen -= 43;
@@ -248,12 +248,12 @@ static STACK_OF(SCT) *d2i_SCT_LIST(STACK_OF(SCT) **a,
p2 += fieldlen;
sctlen -= fieldlen;
- /*-
- * digitally-signed struct header:
- * (1 byte) Hash algorithm
- * (1 byte) Signature algorithm
- * (2 bytes + ?) Signature
- */
+ /*-
+ * digitally-signed struct header:
+ * (1 byte) Hash algorithm
+ * (1 byte) Signature algorithm
+ * (2 bytes + ?) Signature
+ */
if (sctlen < 4)
goto err;
sctlen -= 4;