AES: Remove use of ArrayEncoding.

Take a step towards reducing `unsafe` by eliminating another use of
`ArrayEncoding`.
This commit is contained in:
Brian Smith 2023-10-11 11:06:05 -07:00
parent b1147381c9
commit f8cad224b5
5 changed files with 34 additions and 11 deletions

View File

@ -20,7 +20,7 @@ use super::{
use crate::{
bits::BitLength,
c, cpu,
endian::{ArrayEncoding, BigEndian},
endian::BigEndian,
error,
polyfill::{self, ChunksFixed},
};
@ -332,7 +332,8 @@ impl Counter {
}
pub fn increment(&mut self) -> Iv {
let iv = Iv(self.0);
let iv: [[u8; 4]; 4] = self.0.map(Into::into);
let iv = Iv(Block::from(iv));
self.increment_by_less_safe(1);
iv
}
@ -346,11 +347,12 @@ impl Counter {
/// The IV for a single block encryption.
///
/// Intentionally not `Clone` to ensure each is used only once.
pub struct Iv([BigEndian<u32>; 4]);
pub struct Iv(Block);
impl From<Counter> for Iv {
fn from(counter: Counter) -> Self {
Self(counter.0)
let iv: [[u8; 4]; 4] = counter.0.map(Into::into);
Self(Block::from(iv))
}
}
@ -358,7 +360,7 @@ impl Iv {
/// "Less safe" because it defeats attempts to use the type system to prevent reuse of the IV.
#[inline]
pub(super) fn into_block_less_safe(self) -> Block {
Block::from(self.0.as_byte_array())
self.0
}
}

View File

@ -19,7 +19,7 @@ use super::{
};
use crate::{
aead, cpu, error,
polyfill::{self, ArrayFlatten},
polyfill::{self},
};
use core::ops::RangeFrom;
@ -246,9 +246,7 @@ fn finish(
let aad_bits = polyfill::u64_from_usize(aad_len) << 3;
let ciphertext_bits = polyfill::u64_from_usize(in_out_len) << 3;
gcm_ctx.update_block(Block::from(
&[aad_bits, ciphertext_bits]
.map(u64::to_be_bytes)
.array_flatten(),
[aad_bits, ciphertext_bits].map(u64::to_be_bytes),
));
// Finalize the tag and return it.

View File

@ -12,6 +12,7 @@
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
use crate::polyfill::ArrayFlatten;
use core::ops::{BitXor, BitXorAssign};
#[repr(transparent)]
@ -63,6 +64,16 @@ impl BitXor for Block {
}
}
impl<T> From<T> for Block
where
T: ArrayFlatten<Output = [u8; 16]>,
{
#[inline]
fn from(bytes: T) -> Self {
Self(bytes.array_flatten())
}
}
impl From<&'_ [u8; BLOCK_LEN]> for Block {
#[inline]
fn from(bytes: &[u8; BLOCK_LEN]) -> Self {

View File

@ -23,7 +23,7 @@
// Unlike the BearSSL notes, we use u128 in the 64-bit implementation.
use super::{Block, Xi, BLOCK_LEN};
use crate::polyfill::{ArrayFlatten, ChunksFixed};
use crate::polyfill::ChunksFixed;
#[cfg(target_pointer_width = "64")]
fn gcm_mul64_nohw(a: u64, b: u64) -> (u64, u64) {
@ -242,5 +242,5 @@ fn with_swapped_xi(Xi(xi): &mut Xi, f: impl FnOnce(&mut [u64; 2])) {
let mut swapped: [u64; 2] = [unswapped[1], unswapped[0]];
f(&mut swapped);
let reswapped = [swapped[1], swapped[0]];
*xi = Block::from(&reswapped.map(u64::to_be_bytes).array_flatten())
*xi = Block::from(reswapped.map(u64::to_be_bytes))
}

View File

@ -30,3 +30,15 @@ impl<T> ArrayFlatten for [[T; 8]; 2] {
]
}
}
impl<T> ArrayFlatten for [[T; 4]; 4] {
type Output = [T; 16];
#[inline(always)]
fn array_flatten(self) -> Self::Output {
let [[a0, a1, a2, a3], [b0, b1, b2, b3], [c0, c1, c2, c3], [d0, d1, d2, d3]] = self;
[
a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3,
]
}
}