+2020-11-14 Niels Möller <nisse@lysator.liu.se>
+
+ * ecc-mod-inv.c (ecc_mod_inv): Use passed in scratch for all
+ scratch needs, don't use memory after the result area.
+ * ecc-ecdsa-sign.c (ecc_ecdsa_sign): Update invert call.
+ * ecc-ecdsa-verify.c (ecc_ecdsa_verify): Likewise.
+ * ecc-eh-to-a.c (ecc_eh_to_a): Likewise.
+ * ecc-j-to-a.c (ecc_j_to_a): Likewise.
+ * ecc-gostdsa-verify.c (ecc_gostdsa_verify): Likewise.
+ * ecc-internal.h (ECC_MOD_INV_ITCH, ECC_J_TO_A_ITCH)
+ (ECC_EH_TO_A_ITCH): Update accordingly, but no change in total
+ scratch need.
+
2020-11-13 Niels Möller <nisse@lysator.liu.se>
* ecc-internal.h (ECC_J_TO_A_ITCH): Generalize, and take invert
{
/* Needs 3*ecc->p.size + scratch for ecc->mul_g. Currently same for
ecc_mul_g. */
+ assert (ecc->p.size + ecc->p.invert_itch <= 3*ecc->p.size + ecc->mul_g_itch);
return ECC_ECDSA_SIGN_ITCH (ecc->p.size);
}
/* x coordinate only, modulo q */
ecc->h_to_a (ecc, 2, rp, P, P + 3*ecc->p.size);
- /* Invert k, uses 4 * ecc->p.size including scratch */
- ecc->q.invert (&ecc->q, kinv, kp, tp); /* NOTE: Also clobbers hp */
+ /* Invert k, uses up to 7 * ecc->p.size including scratch (for secp384). */
+ ecc->q.invert (&ecc->q, kinv, kp, tp);
/* Process hash digest */
ecc_hash (&ecc->q, hp, length, digest);
division, I think), and write an ecc_add_ppp. */
/* Compute sinv */
- ecc->q.invert (&ecc->q, sinv, sp, sinv + 2*ecc->p.size);
+ ecc->q.invert (&ecc->q, sinv, sp, sinv + ecc->p.size);
/* u1 = h / s, P1 = u1 * G */
ecc_hash (&ecc->q, hp, length, digest);
assert(op == 0);
- /* Needs 2*size + scratch for the invert call. */
- ecc->p.invert (&ecc->p, izp, zp, tp + ecc->p.size);
+ /* Needs size + scratch for the invert call. */
+ ecc->p.invert (&ecc->p, izp, zp, tp);
ecc_mod_mul (&ecc->p, tp, xp, izp, tp);
cy = mpn_sub_n (r, tp, ecc->p.m, ecc->p.size);
mpn_add_1 (hp, hp, ecc->p.size, 1);
/* Compute v */
- ecc->q.invert (&ecc->q, vp, hp, vp + 2*ecc->p.size);
+ ecc->q.invert (&ecc->q, vp, hp, vp + ecc->p.size);
/* z1 = s / h, P1 = z1 * G */
ecc_mod_mul (&ecc->q, z1, sp, vp, z1);
mp_limb_t *scratch);
/* Current scratch needs: */
-#define ECC_MOD_INV_ITCH(size) (2*(size))
-#define ECC_J_TO_A_ITCH(size, inv) (2*(size)+(inv))
-#define ECC_EH_TO_A_ITCH(size, inv) (2*(size)+(inv))
+#define ECC_MOD_INV_ITCH(size) (3*(size))
+#define ECC_J_TO_A_ITCH(size, inv) ((size)+(inv))
+#define ECC_EH_TO_A_ITCH(size, inv) ((size)+(inv))
#define ECC_DUP_JJ_ITCH(size) (4*(size))
#define ECC_DUP_EH_ITCH(size) (3*(size))
#define ECC_DUP_TH_ITCH(size) (3*(size))
mp_limb_t cy;
- ecc->p.invert (&ecc->p, izp, p+2*ecc->p.size, izp + 2 * ecc->p.size);
+ ecc->p.invert (&ecc->p, izp, p+2*ecc->p.size, izp + ecc->p.size);
ecc_mod_sqr (&ecc->p, iz2p, izp, iz2p);
if (ecc->use_redc)
Returns zero if a == 0 (mod m), to be consistent with a^{phi(m)-1}.
Also needs (m+1)/2, and m must be odd.
- Needs 2n limbs available at rp, and 2n additional scratch limbs.
+ Needs 3n limbs of scratch space.
*/
/* FIXME: Could use mpn_sec_invert (in GMP-6), but with a bit more
{
#define ap scratch
#define bp (scratch + n)
-#define up (vp + n)
+#define up (scratch + 2*n)
mp_size_t n = m->size;
/* Avoid the mp_bitcnt_t type for compatibility with older GMP