crypto: caam - Add support of paes algorithm

PAES algorithm uses protected key for encryption/decryption operations.

Signed-off-by: Gaurav Jain <gaurav.jain@nxp.com>
Signed-off-by: Meenakshi Aggarwal <meenakshi.aggarwal@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Meenakshi Aggarwal 2025-10-06 09:17:53 +02:00 committed by Herbert Xu
parent a703a4c2a3
commit 66b9a095f7
4 changed files with 220 additions and 16 deletions

View File

@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
* Copyright 2016-2019, 2023 NXP
* Copyright 2016-2019, 2023, 2025 NXP
*
* Based on talitos crypto API driver.
*
@ -61,13 +61,16 @@
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <keys/trusted-type.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/key-type.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <soc/fsl/caam-blob.h>
/*
* crypto alg
@ -119,12 +122,15 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t key_dma;
u8 protected_key[CAAM_MAX_KEY_SIZE];
dma_addr_t protected_key_dma;
enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
bool xts_key_fallback;
bool is_blob;
struct crypto_skcipher *fallback;
};
@ -751,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
/* Here keylen is actual key length */
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* Here protected key len is plain key length */
ctx->cdata.plain_keylen = keylen;
ctx->cdata.key_cmd_opt = 0;
/* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@ -772,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
static int paes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key,
unsigned int keylen)
{
struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key;
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int err;
ctx->cdata.key_inline = false;
keylen = keylen - CAAM_PKEY_HEADER;
/* Retrieve the length of key */
ctx->cdata.plain_keylen = pkey_info->plain_key_sz;
/* Retrieve the length of blob*/
ctx->cdata.keylen = keylen;
/* Retrieve the address of the blob */
ctx->cdata.key_virt = pkey_info->key_buf;
/* Validate key length for AES algorithms */
err = aes_check_keylen(ctx->cdata.plain_keylen);
if (err) {
dev_err(jrdev, "bad key length\n");
return err;
}
/* set command option */
ctx->cdata.key_cmd_opt |= KEY_ENC;
/* check if the Protected-Key is CCM key */
if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
ctx->cdata.key_cmd_opt |= KEY_EKT;
memcpy(ctx->key, ctx->cdata.key_virt, keylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.key_dma = ctx->key_dma;
if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
ctx->cdata.plain_keylen +
CAAM_CCM_OVERHEAD,
DMA_FROM_DEVICE);
else
ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
ctx->cdata.plain_keylen,
DMA_FROM_DEVICE);
ctx->cdata.protected_key_dma = ctx->protected_key_dma;
ctx->is_blob = true;
return 0;
}
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@ -1254,7 +1321,9 @@ static void init_skcipher_job(struct skcipher_request *req,
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
u32 *desc = !ctx->is_blob ? edesc->hw_desc :
(u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX);
dma_addr_t desc_dma;
u32 *sh_desc;
u32 in_options = 0, out_options = 0;
dma_addr_t src_dma, dst_dma, ptr;
@ -1269,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req,
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (ivsize || edesc->mapped_src_nents > 1) {
src_dma = edesc->sec4_sg_dma;
@ -1283,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req,
src_dma = sg_dma_address(req->src);
}
append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
@ -1296,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
if (ctx->is_blob) {
cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata,
src_dma, dst_dma, req->cryptlen + ivsize,
in_options, out_options,
ivsize, encrypt);
desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma);
} else {
sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
}
}
/*
@ -1817,6 +1897,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
int len;
/*
* XTS is expected to return an error even for input length = 0
@ -1842,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
crypto_skcipher_decrypt(&rctx->fallback_req);
}
len = DESC_JOB_IO_LEN * CAAM_CMD_SZ;
if (ctx->is_blob)
len += CAAM_DESC_BYTES_MAX;
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
edesc = skcipher_edesc_alloc(req, len);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@ -1885,6 +1970,27 @@ static int skcipher_decrypt(struct skcipher_request *req)
}
static struct caam_skcipher_alg driver_algs[] = {
{
.skcipher.base = {
.base = {
.cra_name = "cbc(paes)",
.cra_driver_name = "cbc-paes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = paes_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD +
CAAM_PKEY_HEADER,
.max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD +
CAAM_PKEY_HEADER,
.ivsize = AES_BLOCK_SIZE,
},
.skcipher.op = {
.do_one_request = skcipher_do_one_req,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
.skcipher.base = {
.base = {

View File

@ -2,12 +2,13 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016-2019 NXP
* Copyright 2016-2019, 2025 NXP
*/
#include "compat.h"
#include "desc_constr.h"
#include "caamalg_desc.h"
#include <soc/fsl/caam-blob.h>
/*
* For aead functions, read payload and write payload,
@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc)
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
unsigned int in_options, unsigned int out_options,
unsigned int ivsize, const bool encrypt)
{
u32 options = cdata->algtype | OP_ALG_AS_INIT;
if (encrypt)
options |= OP_ALG_ENCRYPT;
else
options |= OP_ALG_DECRYPT;
init_job_desc(desc, 0);
append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
JUMP_COND_NOP | JUMP_TEST_ALL | 1);
append_key(desc, cdata->protected_key_dma, cdata->plain_keylen,
CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
append_seq_in_ptr(desc, src, data_sz, in_options);
append_seq_out_ptr(desc, dst, data_sz, out_options);
/* Load IV, if there is one */
if (ivsize)
append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB);
append_operation(desc, options);
skcipher_append_src_dst(desc);
/* Store IV */
if (ivsize)
append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB);
print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
}
EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec);
void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
dma_addr_t next_desc_addr)
{
u32 protected_store;
init_job_desc(desc, 0);
/* Load key modifier */
append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1,
LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
append_seq_in_ptr_intlen(desc, cdata->key_dma,
cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0);
append_seq_out_ptr_intlen(desc, cdata->protected_key_dma,
cdata->plain_keylen, 0);
protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK;
if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1)
protected_store |= OP_PCL_BLOB_EKT;
append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store);
if (next_desc_addr) {
append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL);
append_ptr(desc, next_desc_addr);
}
print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
}
EXPORT_SYMBOL(cnstr_desc_protected_blob_decap);
/**
* cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
| cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
| cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {

View File

@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016 NXP
* Copyright 2016, 2025 NXP
*/
#ifndef _CAAMALG_DESC_H_
@ -48,6 +48,9 @@
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
16 * CAAM_CMD_SZ)
/* Key modifier for CAAM Protected blobs */
#define KEYMOD "SECURE_KEY"
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
dma_addr_t next_desc);
void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
unsigned int in_options, unsigned int out_options,
unsigned int ivsize, const bool encrypt);
#endif /* _CAAMALG_DESC_H_ */

View File

@ -3,7 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
* Copyright 2019 NXP
* Copyright 2019, 2025 NXP
*/
#ifndef DESC_CONSTR_H
@ -498,17 +498,23 @@ do { \
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
* @key_dma: dma (bus) address where algorithm key resides
* @protected_key_dma: dma (bus) address where protected key resides
* @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
* @plain_keylen: size of the key to be loaded by the CAAM
* @key_cmd_opt: optional parameters for KEY command
*/
struct alginfo {
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
dma_addr_t key_dma;
dma_addr_t protected_key_dma;
const void *key_virt;
bool key_inline;
u32 plain_keylen;
u32 key_cmd_opt;
};
/**