The test modules were getting huge, and some of its functions were actually a huge amount of code due to macros, causing tests to take a long time just to compile. They are now separated into a few different tests, and the scalar macros especially are now expanded more sparingly in just a few `check()` functions. Test compile times for me went from about 25 seconds to 1.5s in debug mode, and from 300 seconds (!) to about 8s in release mode.
104 lines
2.6 KiB
Rust
104 lines
2.6 KiB
Rust
extern crate num_bigint;
|
|
extern crate num_traits;
|
|
|
|
use num_bigint::BigUint;
|
|
use num_traits::{Zero, ToPrimitive};
|
|
|
|
mod consts;
|
|
use consts::*;
|
|
|
|
#[macro_use]
|
|
mod macros;
|
|
|
|
#[test]
|
|
fn test_scalar_add() {
|
|
fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
|
|
let (x, y, z) = (x.clone(), y.clone(), z.clone());
|
|
assert_unsigned_scalar_op!(x + y == z);
|
|
}
|
|
|
|
for elm in SUM_TRIPLES.iter() {
|
|
let (a_vec, b_vec, c_vec) = *elm;
|
|
let a = BigUint::from_slice(a_vec);
|
|
let b = BigUint::from_slice(b_vec);
|
|
let c = BigUint::from_slice(c_vec);
|
|
|
|
check(&a, &b, &c);
|
|
check(&b, &a, &c);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_scalar_sub() {
|
|
fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
|
|
let (x, y, z) = (x.clone(), y.clone(), z.clone());
|
|
assert_unsigned_scalar_op!(x - y == z);
|
|
}
|
|
|
|
for elm in SUM_TRIPLES.iter() {
|
|
let (a_vec, b_vec, c_vec) = *elm;
|
|
let a = BigUint::from_slice(a_vec);
|
|
let b = BigUint::from_slice(b_vec);
|
|
let c = BigUint::from_slice(c_vec);
|
|
|
|
check(&c, &a, &b);
|
|
check(&c, &b, &a);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_scalar_mul() {
|
|
fn check(x: &BigUint, y: &BigUint, z: &BigUint) {
|
|
let (x, y, z) = (x.clone(), y.clone(), z.clone());
|
|
assert_unsigned_scalar_op!(x * y == z);
|
|
}
|
|
|
|
for elm in MUL_TRIPLES.iter() {
|
|
let (a_vec, b_vec, c_vec) = *elm;
|
|
let a = BigUint::from_slice(a_vec);
|
|
let b = BigUint::from_slice(b_vec);
|
|
let c = BigUint::from_slice(c_vec);
|
|
|
|
check(&a, &b, &c);
|
|
check(&b, &a, &c);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_scalar_div_rem() {
|
|
fn check(x: &BigUint, y: &BigUint, z: &BigUint, r: &BigUint) {
|
|
let (x, y, z, r) = (x.clone(), y.clone(), z.clone(), r.clone());
|
|
assert_unsigned_scalar_op!(x / y == z);
|
|
assert_unsigned_scalar_op!(x % y == r);
|
|
}
|
|
|
|
for elm in MUL_TRIPLES.iter() {
|
|
let (a_vec, b_vec, c_vec) = *elm;
|
|
let a = BigUint::from_slice(a_vec);
|
|
let b = BigUint::from_slice(b_vec);
|
|
let c = BigUint::from_slice(c_vec);
|
|
|
|
if !a.is_zero() {
|
|
check(&c, &a, &b, &Zero::zero());
|
|
}
|
|
|
|
if !b.is_zero() {
|
|
check(&c, &b, &a, &Zero::zero());
|
|
}
|
|
}
|
|
|
|
for elm in DIV_REM_QUADRUPLES.iter() {
|
|
let (a_vec, b_vec, c_vec, d_vec) = *elm;
|
|
let a = BigUint::from_slice(a_vec);
|
|
let b = BigUint::from_slice(b_vec);
|
|
let c = BigUint::from_slice(c_vec);
|
|
let d = BigUint::from_slice(d_vec);
|
|
|
|
if !b.is_zero() {
|
|
check(&a, &b, &c, &d);
|
|
assert_unsigned_scalar_op!(a / b == c);
|
|
assert_unsigned_scalar_op!(a % b == d);
|
|
}
|
|
}
|
|
}
|