lib/crypto: arm/ghash: Migrate optimized code into library

Remove the "ghash-neon" crypto_shash algorithm.  Move the corresponding
assembly code into lib/crypto/, and wire it up to the GHASH library.

This makes the GHASH library be optimized on arm (though only with NEON,
not PMULL; for now the goal is just parity with crypto_shash).  It
greatly reduces the amount of arm-specific glue code that is needed, and
it fixes the issue where this optimization was disabled by default.

To integrate the assembly code correctly with the library, make the
following tweaks:

- Change the type of 'blocks' from int to size_t.
- Change the types of 'dg' and 'h' to polyval_elem.  Note that this
  simply reflects the format that the code was already using, at least
  on little endian CPUs.  For big endian CPUs, add byte-swaps.
- Remove the 'head' argument, which is no longer needed.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260319061723.1140720-8-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
This commit is contained in:
Eric Biggers 2026-03-18 23:17:08 -07:00
parent ca5ff14c1a
commit 71e59795c9
7 changed files with 66 additions and 162 deletions

View File

@ -3,26 +3,17 @@
menu "Accelerated Cryptographic Algorithms for CPU (arm)"
config CRYPTO_GHASH_ARM_CE
tristate "Hash functions: GHASH (PMULL/NEON/ARMv8 Crypto Extensions)"
tristate "AEAD cipher: AES in GCM mode (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_AEAD
select CRYPTO_HASH
select CRYPTO_CRYPTD
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
help
GCM GHASH function (NIST SP800-38D)
AEAD cipher: AES-GCM
Architecture: arm using
- PMULL (Polynomial Multiply Long) instructions
- NEON (Advanced SIMD) extensions
- ARMv8 Crypto Extensions
Use an implementation of GHASH (used by the GCM AEAD chaining mode)
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
that is part of the ARMv8 Crypto Extensions, or a slower variant that
uses the vmull.p8 instruction that is part of the basic NEON ISA.
config CRYPTO_AES_ARM_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON

View File

@ -10,4 +10,4 @@ obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o ghash-neon-core.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
* AES-GCM using ARMv8 Crypto Extensions
*
* Copyright (C) 2015 - 2018 Linaro Ltd.
* Copyright (C) 2023 Google LLC.
@ -14,7 +14,6 @@
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
@ -25,20 +24,14 @@
#include <linux/string.h>
#include <linux/unaligned.h>
MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
MODULE_DESCRIPTION("AES-GCM using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("ghash");
MODULE_ALIAS_CRYPTO("gcm(aes)");
MODULE_ALIAS_CRYPTO("rfc4106(gcm(aes))");
#define RFC4106_NONCE_SIZE 4
struct ghash_key {
be128 k;
u64 h[1][2];
};
struct gcm_key {
u64 h[4][2];
u32 rk[AES_MAX_KEYLENGTH_U32];
@ -46,80 +39,9 @@ struct gcm_key {
u8 nonce[]; // for RFC4106 nonce
};
struct arm_ghash_desc_ctx {
u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
};
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
u64 const h[4][2], const char *head);
asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
u64 const h[1][2], const char *head);
static int ghash_init(struct shash_desc *desc)
{
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
*ctx = (struct arm_ghash_desc_ctx){};
return 0;
}
static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key *key, const char *head)
{
kernel_neon_begin();
pmull_ghash_update_p8(blocks, dg, src, key->h, head);
kernel_neon_end();
}
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
struct ghash_key *key = crypto_shash_ctx(desc->tfm);
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
int blocks;
blocks = len / GHASH_BLOCK_SIZE;
ghash_do_update(blocks, ctx->digest, src, key, NULL);
return len - blocks * GHASH_BLOCK_SIZE;
}
static int ghash_export(struct shash_desc *desc, void *out)
{
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
u8 *dst = out;
put_unaligned_be64(ctx->digest[1], dst);
put_unaligned_be64(ctx->digest[0], dst + 8);
return 0;
}
static int ghash_import(struct shash_desc *desc, const void *in)
{
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
const u8 *src = in;
ctx->digest[1] = get_unaligned_be64(src);
ctx->digest[0] = get_unaligned_be64(src + 8);
return 0;
}
static int ghash_finup(struct shash_desc *desc, const u8 *src,
unsigned int len, u8 *dst)
{
struct ghash_key *key = crypto_shash_ctx(desc->tfm);
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
if (len) {
u8 buf[GHASH_BLOCK_SIZE] = {};
memcpy(buf, src, len);
ghash_do_update(1, ctx->digest, buf, key, NULL);
memzero_explicit(buf, sizeof(buf));
}
return ghash_export(desc, dst);
}
static void ghash_reflect(u64 h[], const be128 *k)
{
u64 carry = be64_to_cpu(k->a) >> 63;
@ -131,40 +53,6 @@ static void ghash_reflect(u64 h[], const be128 *k)
h[1] ^= 0xc200000000000000UL;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *inkey, unsigned int keylen)
{
struct ghash_key *key = crypto_shash_ctx(tfm);
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
ghash_reflect(key->h[0], &key->k);
return 0;
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.finup = ghash_finup,
.setkey = ghash_setkey,
.export = ghash_export,
.import = ghash_import,
.descsize = sizeof(struct arm_ghash_desc_ctx),
.statesize = sizeof(struct ghash_desc_ctx),
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-neon",
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key),
.base.cra_module = THIS_MODULE,
};
void pmull_gcm_encrypt(int blocks, u64 dg[], const char *src,
struct gcm_key const *k, char *dst,
const char *iv, int rounds, u32 counter);
@ -543,37 +431,15 @@ static struct aead_alg gcm_aes_algs[] = {{
static int __init ghash_ce_mod_init(void)
{
int err;
if (!(elf_hwcap & HWCAP_NEON))
if (!(elf_hwcap & HWCAP_NEON) || !(elf_hwcap2 & HWCAP2_PMULL))
return -ENODEV;
if (elf_hwcap2 & HWCAP2_PMULL) {
err = crypto_register_aeads(gcm_aes_algs,
ARRAY_SIZE(gcm_aes_algs));
if (err)
return err;
}
err = crypto_register_shash(&ghash_alg);
if (err)
goto err_aead;
return 0;
err_aead:
if (elf_hwcap2 & HWCAP2_PMULL)
crypto_unregister_aeads(gcm_aes_algs,
ARRAY_SIZE(gcm_aes_algs));
return err;
return crypto_register_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs));
}
static void __exit ghash_ce_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
if (elf_hwcap2 & HWCAP2_PMULL)
crypto_unregister_aeads(gcm_aes_algs,
ARRAY_SIZE(gcm_aes_algs));
crypto_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs));
}
module_init(ghash_ce_mod_init);

View File

@ -119,6 +119,7 @@ config CRYPTO_LIB_GF128HASH
config CRYPTO_LIB_GF128HASH_ARCH
bool
depends on CRYPTO_LIB_GF128HASH && !UML
default y if ARM && KERNEL_MODE_NEON
default y if ARM64
default y if X86_64

View File

@ -158,6 +158,7 @@ obj-$(CONFIG_CRYPTO_LIB_GF128HASH) += libgf128hash.o
libgf128hash-y := gf128hash.o
ifeq ($(CONFIG_CRYPTO_LIB_GF128HASH_ARCH),y)
CFLAGS_gf128hash.o += -I$(src)/$(SRCARCH)
libgf128hash-$(CONFIG_ARM) += arm/ghash-neon-core.o
libgf128hash-$(CONFIG_ARM64) += arm64/polyval-ce-core.o
libgf128hash-$(CONFIG_X86) += x86/polyval-pclmul-avx.o
endif

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* GHASH, arm optimized
*
* Copyright 2026 Google LLC
*/
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void pmull_ghash_update_p8(size_t blocks, struct polyval_elem *dg,
const u8 *src, const struct polyval_elem *h);
#define ghash_blocks_arch ghash_blocks_arch
static void ghash_blocks_arch(struct polyval_elem *acc,
const struct ghash_key *key,
const u8 *data, size_t nblocks)
{
if (static_branch_likely(&have_neon) && may_use_simd()) {
do {
/* Allow rescheduling every 4 KiB. */
size_t n =
min_t(size_t, nblocks, 4096 / GHASH_BLOCK_SIZE);
scoped_ksimd()
pmull_ghash_update_p8(n, acc, data, &key->h);
data += n * GHASH_BLOCK_SIZE;
nblocks -= n;
} while (nblocks);
} else {
ghash_blocks_generic(acc, &key->h, data, nblocks);
}
}
#define gf128hash_mod_init_arch gf128hash_mod_init_arch
static void gf128hash_mod_init_arch(void)
{
if (elf_hwcap & HWCAP_NEON)
static_branch_enable(&have_neon);
}

View File

@ -141,22 +141,21 @@
vshr.u64 XL, XL, #1
.endm
.macro vrev64_if_be a
#ifdef CONFIG_CPU_BIG_ENDIAN
vrev64.8 \a, \a
#endif
.endm
.macro ghash_update
vld1.64 {XL}, [r1]
/* do the head block first, if supplied */
ldr ip, [sp]
teq ip, #0
beq 0f
vld1.64 {T1}, [ip]
teq r0, #0
b 3f
vrev64_if_be XL
0:
vld1.8 {T1}, [r2]!
subs r0, r0, #1
3: /* multiply XL by SHASH in GF(2^128) */
/* multiply XL by SHASH in GF(2^128) */
vrev64.8 T1, T1
vext.8 IN1, T1, T1, #8
@ -180,11 +179,13 @@
.endm
/*
* void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
* u64 const h[1][2], const char *head)
* void pmull_ghash_update_p8(size_t blocks, struct polyval_elem *dg,
* const u8 *src,
* const struct polyval_elem *h)
*/
ENTRY(pmull_ghash_update_p8)
vld1.64 {SHASH}, [r3]
vrev64_if_be SHASH
veor SHASH2_p8, SHASH_L, SHASH_H
vext.8 s1l, SHASH_L, SHASH_L, #1
@ -201,6 +202,7 @@ ENTRY(pmull_ghash_update_p8)
vmov.i64 k48, #0xffffffffffff
ghash_update
vrev64_if_be XL
vst1.64 {XL}, [r1]
bx lr