mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
lib/crypto: arm64/aes: Remove obsolete chunking logic
Since commitaefbab8e77("arm64: fpsimd: Preserve/restore kernel mode NEON at context switch"), kernel-mode NEON sections have been preemptible on arm64. And since commit7dadeaa6e8("sched: Further restrict the preemption modes"), voluntary preemption is no longer supported on arm64 either. Therefore, there's no longer any need to limit the length of kernel-mode NEON sections on arm64. Simplify the AES-CBC-MAC code accordingly. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20260401000548.133151-2-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
This commit is contained in:
parent
8aeeb5255d
commit
11d6bc70ff
|
|
@ -101,16 +101,11 @@ static u32 ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
|
|||
u32 blocks = abytes / AES_BLOCK_SIZE;
|
||||
|
||||
if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) {
|
||||
u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac,
|
||||
macp, enc_after);
|
||||
u32 adv = (blocks - rem) * AES_BLOCK_SIZE;
|
||||
|
||||
ce_aes_mac_update(in, rk, rounds, blocks, mac, macp,
|
||||
enc_after);
|
||||
macp = enc_after ? 0 : AES_BLOCK_SIZE;
|
||||
in += adv;
|
||||
abytes -= adv;
|
||||
|
||||
if (unlikely(rem))
|
||||
macp = 0;
|
||||
in += blocks * AES_BLOCK_SIZE;
|
||||
abytes -= blocks * AES_BLOCK_SIZE;
|
||||
} else {
|
||||
u32 l = min(AES_BLOCK_SIZE - macp, abytes);
|
||||
|
||||
|
|
|
|||
|
|
@ -230,9 +230,9 @@ asmlinkage void ce_aes_essiv_cbc_encrypt(u8 out[], u8 const in[],
|
|||
asmlinkage void ce_aes_essiv_cbc_decrypt(u8 out[], u8 const in[],
|
||||
u32 const rk1[], int rounds,
|
||||
int blocks, u8 iv[], u32 const rk2[]);
|
||||
asmlinkage size_t ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
||||
size_t blocks, u8 dg[], int enc_before,
|
||||
int enc_after);
|
||||
asmlinkage void ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
||||
size_t blocks, u8 dg[], int enc_before,
|
||||
int enc_after);
|
||||
#elif defined(CONFIG_PPC)
|
||||
void ppc_expand_key_128(u32 *key_enc, const u8 *key);
|
||||
void ppc_expand_key_192(u32 *key_enc, const u8 *key);
|
||||
|
|
|
|||
|
|
@ -817,9 +817,9 @@ AES_FUNC_END(aes_xts_decrypt)
|
|||
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_LIB_AES_CBC_MACS)
|
||||
/*
|
||||
* size_t aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
||||
* size_t blocks, u8 dg[], int enc_before,
|
||||
* int enc_after);
|
||||
* void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
||||
* size_t blocks, u8 dg[], int enc_before,
|
||||
* int enc_after);
|
||||
*/
|
||||
AES_FUNC_START(aes_mac_update)
|
||||
ld1 {v0.16b}, [x4] /* get dg */
|
||||
|
|
@ -844,7 +844,6 @@ AES_FUNC_START(aes_mac_update)
|
|||
cbz w5, .Lmacout
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
st1 {v0.16b}, [x4] /* return dg */
|
||||
cond_yield .Lmacout, x7, x8
|
||||
b .Lmacloop4x
|
||||
.Lmac1x:
|
||||
add x3, x3, #4
|
||||
|
|
@ -863,7 +862,6 @@ AES_FUNC_START(aes_mac_update)
|
|||
|
||||
.Lmacout:
|
||||
st1 {v0.16b}, [x4] /* return dg */
|
||||
mov x0, x3
|
||||
ret
|
||||
AES_FUNC_END(aes_mac_update)
|
||||
#endif /* CONFIG_CRYPTO_LIB_AES_CBC_MACS */
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@ asmlinkage void __aes_ce_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE],
|
|||
asmlinkage u32 __aes_ce_sub(u32 l);
|
||||
asmlinkage void __aes_ce_invert(struct aes_block *out,
|
||||
const struct aes_block *in);
|
||||
asmlinkage size_t neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
||||
size_t blocks, u8 dg[], int enc_before,
|
||||
int enc_after);
|
||||
asmlinkage void neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
||||
size_t blocks, u8 dg[], int enc_before,
|
||||
int enc_after);
|
||||
|
||||
/*
|
||||
* Expand an AES key using the crypto extensions if supported and usable or
|
||||
|
|
@ -192,25 +192,16 @@ static bool aes_cbcmac_blocks_arch(u8 h[AES_BLOCK_SIZE],
|
|||
bool enc_after)
|
||||
{
|
||||
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
|
||||
do {
|
||||
size_t rem;
|
||||
|
||||
scoped_ksimd() {
|
||||
if (static_branch_likely(&have_aes))
|
||||
rem = ce_aes_mac_update(
|
||||
data, key->k.rndkeys,
|
||||
key->nrounds, nblocks, h,
|
||||
enc_before, enc_after);
|
||||
else
|
||||
rem = neon_aes_mac_update(
|
||||
data, key->k.rndkeys,
|
||||
key->nrounds, nblocks, h,
|
||||
enc_before, enc_after);
|
||||
}
|
||||
data += (nblocks - rem) * AES_BLOCK_SIZE;
|
||||
nblocks = rem;
|
||||
enc_before = false;
|
||||
} while (nblocks);
|
||||
scoped_ksimd() {
|
||||
if (static_branch_likely(&have_aes))
|
||||
ce_aes_mac_update(data, key->k.rndkeys,
|
||||
key->nrounds, nblocks, h,
|
||||
enc_before, enc_after);
|
||||
else
|
||||
neon_aes_mac_update(data, key->k.rndkeys,
|
||||
key->nrounds, nblocks, h,
|
||||
enc_before, enc_after);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user