diff --git a/mk/generate_curves.py b/mk/generate_curves.py
index 04fa40cf4..92efecadc 100644
--- a/mk/generate_curves.py
+++ b/mk/generate_curves.py
@@ -115,7 +115,7 @@ pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
     q_minus_n: Elem::from_hex("%(q_minus_n)x"),
 
     // TODO: Use an optimized variable-time implementation.
-    scalar_inv_to_mont_vartime: p%(bits)s_scalar_inv_to_mont,
+    scalar_inv_to_mont_vartime: |s| PRIVATE_SCALAR_OPS.scalar_inv_to_mont(s),
 };
 
 pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
@@ -125,7 +125,7 @@ pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
     scalar_inv_to_mont: p%(bits)s_scalar_inv_to_mont,
 };
 
-fn p%(bits)s_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
+fn p%(bits)s_scalar_inv_to_mont(a: Scalar<R>) -> Scalar<R> {
     // Calculate the modular inverse of scalar |a| using Fermat's Little
     // Theorem:
     //
@@ -166,22 +166,13 @@ fn p%(bits)s_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
         binary_op_assign(p%(bits)d_scalar_mul_mont, acc, b)
     }
 
-    fn to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
-        static N_RR: Scalar<Unencoded> = Scalar {
-            limbs: PRIVATE_SCALAR_OPS.oneRR_mod_n.limbs,
-            m: PhantomData,
-            encoding: PhantomData,
-        };
-        binary_op(p%(bits)s_scalar_mul_mont, a, &N_RR)
-    }
-
     // Indexes into `d`.
     const B_1: usize = 0;
     todo!();
     const DIGIT_COUNT: usize = todo!();
 
     let mut d = [Scalar::zero(); DIGIT_COUNT];
-    d[B_1] = to_mont(a);
+    d[B_1] = a;
     let b_10 = sqr(&d[B_1]);
     for i in B_11..DIGIT_COUNT {
         d[i] = mul(&d[i - 1], &b_10);
diff --git a/src/ec/suite_b/ops.rs b/src/ec/suite_b/ops.rs
index e3de1c4d1..57e4c7aaa 100644
--- a/src/ec/suite_b/ops.rs
+++ b/src/ec/suite_b/ops.rs
@@ -319,7 +319,7 @@ pub struct PrivateScalarOps {
     pub scalar_ops: &'static ScalarOps,
 
     oneRR_mod_n: Scalar<RR>, // 1 * R**2 (mod n). TOOD: Use One<RR>.
-    scalar_inv_to_mont: fn(a: &Scalar) -> Scalar<R>,
+    scalar_inv_to_mont: fn(a: Scalar<R>) -> Scalar<R>,
 }
 
 impl PrivateScalarOps {
@@ -330,6 +330,7 @@ impl PrivateScalarOps {
     /// Returns the modular inverse of `a` (mod `n`). Panics if `a` is zero.
     pub fn scalar_inv_to_mont(&self, a: &Scalar) -> Scalar<R> {
         assert!(!self.scalar_ops.common.is_zero(a));
+        let a = self.to_mont(a);
         (self.scalar_inv_to_mont)(a)
     }
 }
diff --git a/src/ec/suite_b/ops/p256.rs b/src/ec/suite_b/ops/p256.rs
index deeac6097..6b0323cb4 100644
--- a/src/ec/suite_b/ops/p256.rs
+++ b/src/ec/suite_b/ops/p256.rs
@@ -132,7 +132,7 @@ pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
     q_minus_n: Elem::from_hex("4319055358e8617b0c46353d039cdaae"),
 
     // TODO: Use an optimized variable-time implementation.
-    scalar_inv_to_mont_vartime: p256_scalar_inv_to_mont,
+    scalar_inv_to_mont_vartime: |s| PRIVATE_SCALAR_OPS.scalar_inv_to_mont(s),
 };
 
 #[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
@@ -165,7 +165,7 @@ pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
     scalar_inv_to_mont: p256_scalar_inv_to_mont,
 };
 
-fn p256_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
+fn p256_scalar_inv_to_mont(a: Scalar<R>) -> Scalar<R> {
     // Calculate the modular inverse of scalar |a| using Fermat's Little
     // Theorem:
     //
@@ -202,15 +202,6 @@ fn p256_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
         binary_op_assign(p256_scalar_mul_mont, acc, b);
     }
 
-    fn to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
-        static N_RR: Scalar<Unencoded> = Scalar {
-            limbs: PRIVATE_SCALAR_OPS.oneRR_mod_n.limbs,
-            m: PhantomData,
-            encoding: PhantomData,
-        };
-        binary_op(p256_scalar_mul_mont, a, &N_RR)
-    }
-
     // Indexes into `d`.
     const B_1: usize = 0;
     const B_10: usize = 1;
@@ -224,7 +215,7 @@ fn p256_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
 
     let mut d = [Scalar::zero(); DIGIT_COUNT];
 
-    d[B_1] = to_mont(a);
+    d[B_1] = a;
     d[B_10] = sqr(&d[B_1]);
     d[B_11] = mul(&d[B_10], &d[B_1]);
     d[B_101] = mul(&d[B_10], &d[B_11]);
diff --git a/src/ec/suite_b/ops/p384.rs b/src/ec/suite_b/ops/p384.rs
index 01ee8ae85..01a43d6af 100644
--- a/src/ec/suite_b/ops/p384.rs
+++ b/src/ec/suite_b/ops/p384.rs
@@ -127,7 +127,7 @@ pub static PUBLIC_SCALAR_OPS: PublicScalarOps = PublicScalarOps {
     q_minus_n: Elem::from_hex("389cb27e0bc8d21fa7e5f24cb74f58851313e696333ad68c"),
 
     // TODO: Use an optimized variable-time implementation.
-    scalar_inv_to_mont_vartime: p384_scalar_inv_to_mont,
+    scalar_inv_to_mont_vartime: |s| PRIVATE_SCALAR_OPS.scalar_inv_to_mont(s),
 };
 
 pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
@@ -137,7 +137,7 @@ pub static PRIVATE_SCALAR_OPS: PrivateScalarOps = PrivateScalarOps {
     scalar_inv_to_mont: p384_scalar_inv_to_mont,
 };
 
-fn p384_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
+fn p384_scalar_inv_to_mont(a: Scalar<R>) -> Scalar<R> {
     // Calculate the modular inverse of scalar |a| using Fermat's Little
     // Theorem:
     //
@@ -179,15 +179,6 @@ fn p384_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
         binary_op_assign(p384_scalar_mul_mont, acc, b)
     }
 
-    fn to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
-        static N_RR: Scalar<Unencoded> = Scalar {
-            limbs: PRIVATE_SCALAR_OPS.oneRR_mod_n.limbs,
-            m: PhantomData,
-            encoding: PhantomData,
-        };
-        binary_op(p384_scalar_mul_mont, a, &N_RR)
-    }
-
     // Indexes into `d`.
     const B_1: usize = 0;
     const B_11: usize = 1;
@@ -200,7 +191,7 @@ fn p384_scalar_inv_to_mont(a: &Scalar<Unencoded>) -> Scalar<R> {
     const DIGIT_COUNT: usize = 8;
 
     let mut d = [Scalar::zero(); DIGIT_COUNT];
-    d[B_1] = to_mont(a);
+    d[B_1] = a;
     let b_10 = sqr(&d[B_1]);
     for i in B_11..DIGIT_COUNT {
         d[i] = mul(&d[i - 1], &b_10);