ec suite_b: Split scalar inversion API into variable- and constant- time.

This commit is contained in:
Brian Smith 2023-12-04 11:32:56 -08:00
parent ed2b1d7054
commit c89b5c6e28
6 changed files with 30 additions and 17 deletions

View File

@ -102,7 +102,6 @@ pub static PUBLIC_KEY_OPS: PublicKeyOps = PublicKeyOps {
pub static SCALAR_OPS: ScalarOps = ScalarOps {
common: &COMMON_OPS,
scalar_inv_to_mont_impl: p%(bits)s_scalar_inv_to_mont,
scalar_mul_mont: p%(bits)s_scalar_mul_mont,
};
@ -114,12 +113,16 @@ pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
},
q_minus_n: Elem::from_hex("%(q_minus_n)x"),
// TODO: Use an optimized variable-time implementation.
scalar_inv_to_mont_vartime: p%(bits)s_scalar_inv_to_mont,
};
pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
scalar_ops: &SCALAR_OPS,
oneRR_mod_n: Scalar::from_hex(%(oneRR_mod_n)s),
scalar_inv_to_mont: p%(bits)s_scalar_inv_to_mont,
};
fn p%(bits)s_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {

View File

@ -240,7 +240,7 @@ impl EcdsaKeyPair {
// XXX: iteration conut?
// Step 1.
let k = private_key::random_scalar(self.alg.private_key_ops, rng)?;
let k_inv = scalar_ops.scalar_inv_to_mont(&k);
let k_inv = ops.scalar_inv_to_mont(&k);
// Step 2.
let r = private_key_ops.point_mul_base(&k);

View File

@ -113,7 +113,7 @@ impl EcdsaVerificationAlgorithm {
// NSA Guide Step 4: "Compute w = s**1 mod n, using the routine in
// Appendix B.1."
let w = scalar_ops.scalar_inv_to_mont(&s);
let w = self.ops.scalar_inv_to_mont_vartime(&s);
// NSA Guide Step 5: "Compute u1 = (e * w) mod n, and compute
// u2 = (r * w) mod n."

View File

@ -249,7 +249,6 @@ impl PublicKeyOps {
pub struct ScalarOps {
pub common: &'static CommonOps,
scalar_inv_to_mont_impl: fn(a: &Scalar) -> Scalar<R>,
scalar_mul_mont: unsafe extern "C" fn(r: *mut Limb, a: *const Limb, b: *const Limb),
}
@ -263,13 +262,6 @@ impl ScalarOps {
&s.limbs[..self.common.num_limbs]
}
/// Returns the modular inverse of `a` (mod `n`). Panics of `a` is zero,
/// because zero isn't invertible.
pub fn scalar_inv_to_mont(&self, a: &Scalar) -> Scalar<R> {
assert!(!self.common.is_zero(a));
(self.scalar_inv_to_mont_impl)(a)
}
#[inline]
pub fn scalar_product<EA: Encoding, EB: Encoding>(
&self,
@ -289,6 +281,7 @@ pub struct PublicScalarOps {
pub public_key_ops: &'static PublicKeyOps,
pub twin_mul: fn(g_scalar: &Scalar, p_scalar: &Scalar, p_xy: &(Elem<R>, Elem<R>)) -> Point,
pub scalar_inv_to_mont_vartime: fn(s: &Scalar<Unencoded>) -> Scalar<R>,
pub q_minus_n: Elem<Unencoded>,
}
@ -315,6 +308,10 @@ impl PublicScalarOps {
let num_limbs = self.public_key_ops.common.num_limbs;
limbs_less_than_limbs_vartime(&a.limbs[..num_limbs], &b.limbs[..num_limbs])
}
pub fn scalar_inv_to_mont_vartime(&self, s: &Scalar<Unencoded>) -> Scalar<R> {
(self.scalar_inv_to_mont_vartime)(s)
}
}
#[allow(non_snake_case)]
@ -322,12 +319,19 @@ pub struct PrivateScalarOps {
pub scalar_ops: &'static ScalarOps,
oneRR_mod_n: Scalar<RR>, // 1 * R**2 (mod n). TOOD: Use One<RR>.
scalar_inv_to_mont: fn(a: &Scalar) -> Scalar<R>,
}
impl PrivateScalarOps {
pub fn to_mont(&self, s: &Scalar<Unencoded>) -> Scalar<R> {
self.scalar_ops.scalar_product(s, &self.oneRR_mod_n)
}
/// Returns the modular inverse of `a` (mod `n`). Panics if `a` is zero.
pub fn scalar_inv_to_mont(&self, a: &Scalar) -> Scalar<R> {
assert!(!self.scalar_ops.common.is_zero(a));
(self.scalar_inv_to_mont)(a)
}
}
// XXX: Inefficient and unnecessarily depends on `PrivateKeyOps`. TODO: implement interleaved wNAF
@ -764,15 +768,15 @@ mod tests {
}
#[test]
#[should_panic(expected = "!self.common.is_zero(a)")]
#[should_panic(expected = "!self.scalar_ops.common.is_zero(a)")]
fn p256_scalar_inv_to_mont_zero_panic_test() {
let _ = p256::SCALAR_OPS.scalar_inv_to_mont(&ZERO_SCALAR);
let _ = p256::PRIVATE_SCALAR_OPS.scalar_inv_to_mont(&ZERO_SCALAR);
}
#[test]
#[should_panic(expected = "!self.common.is_zero(a)")]
#[should_panic(expected = "!self.scalar_ops.common.is_zero(a)")]
fn p384_scalar_inv_to_mont_zero_panic_test() {
let _ = p384::SCALAR_OPS.scalar_inv_to_mont(&ZERO_SCALAR);
let _ = p384::PRIVATE_SCALAR_OPS.scalar_inv_to_mont(&ZERO_SCALAR);
}
#[test]

View File

@ -114,7 +114,6 @@ pub static PUBLIC_KEY_OPS: PublicKeyOps = PublicKeyOps {
pub static SCALAR_OPS: ScalarOps = ScalarOps {
common: &COMMON_OPS,
scalar_inv_to_mont_impl: p256_scalar_inv_to_mont,
scalar_mul_mont: p256_scalar_mul_mont,
};
@ -131,6 +130,9 @@ pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
},
q_minus_n: Elem::from_hex("4319055358e8617b0c46353d039cdaae"),
// TODO: Use an optimized variable-time implementation.
scalar_inv_to_mont_vartime: p256_scalar_inv_to_mont,
};
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
@ -160,6 +162,7 @@ pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
oneRR_mod_n: Scalar::from_hex(
"66e12d94f3d956202845b2392b6bec594699799c49bd6fa683244c95be79eea2",
),
scalar_inv_to_mont: p256_scalar_inv_to_mont,
};
fn p256_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {

View File

@ -114,7 +114,6 @@ pub static PUBLIC_KEY_OPS: PublicKeyOps = PublicKeyOps {
pub static SCALAR_OPS: ScalarOps = ScalarOps {
common: &COMMON_OPS,
scalar_inv_to_mont_impl: p384_scalar_inv_to_mont,
scalar_mul_mont: p384_scalar_mul_mont,
};
@ -126,12 +125,16 @@ pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
},
q_minus_n: Elem::from_hex("389cb27e0bc8d21fa7e5f24cb74f58851313e696333ad68c"),
// TODO: Use an optimized variable-time implementation.
scalar_inv_to_mont_vartime: p384_scalar_inv_to_mont,
};
pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
scalar_ops: &SCALAR_OPS,
oneRR_mod_n: Scalar::from_hex("c84ee012b39bf213fb05b7a28266895d40d49174aab1cc5bc3e483afcb82947ff3d81e5df1aa4192d319b2419b409a9"),
scalar_inv_to_mont: p384_scalar_inv_to_mont,
};
fn p384_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {