blk-crypto: optimize data unit alignment checking

Avoid the relatively high overhead of constructing and walking per-page
segment bio_vecs for data unit alignment checking by merging the checks
into existing loops.

For hardware support crypto, perform the check in bio_split_io_at, which
already contains a similar alignment check applied for all I/O.  This
means bio-based drivers that do not call bio_split_to_limits, should they
ever grow blk-crypto support, need to implement the check themselves,
just like all other queue limits checks.

For blk-crypto-fallback do it in the encryption/decryption loops.  This
means alignment errors for decryption will only be detected after I/O
has completed, but that seems like a worthwhile trade off.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2026-01-09 07:07:48 +01:00 committed by Jens Axboe
parent 3d939695e6
commit 66e5a11d2e
3 changed files with 21 additions and 25 deletions

View File

@ -278,6 +278,12 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
bio_iter_iovec(src_bio, src_bio->bi_iter);
struct page *enc_page = enc_pages[enc_idx];
if (!IS_ALIGNED(src_bv.bv_len | src_bv.bv_offset,
data_unit_size)) {
enc_bio->bi_status = BLK_STS_INVAL;
goto out_free_enc_bio;
}
__bio_add_page(enc_bio, enc_page, src_bv.bv_len,
src_bv.bv_offset);
@ -296,8 +302,10 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
*/
for (i = 0; i < src_bv.bv_len; i += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
if (crypto_skcipher_encrypt(ciph_req))
if (crypto_skcipher_encrypt(ciph_req)) {
enc_bio->bi_status = BLK_STS_IOERR;
goto out_free_enc_bio;
}
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
dst.offset += data_unit_size;
@ -334,7 +342,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
*/
for (; enc_idx < nr_enc_pages; enc_idx++)
__bio_add_page(enc_bio, enc_pages[enc_idx], PAGE_SIZE, 0);
bio_io_error(enc_bio);
bio_endio(enc_bio);
}
/*
@ -387,6 +395,9 @@ static blk_status_t __blk_crypto_fallback_decrypt_bio(struct bio *bio,
__bio_for_each_segment(bv, bio, iter, iter) {
struct page *page = bv.bv_page;
if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
return BLK_STS_INVAL;
sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
/* Decrypt each data unit in the segment */

View File

@ -219,22 +219,6 @@ bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
}
/* Check that all I/O segments are data unit aligned. */
static bool bio_crypt_check_alignment(struct bio *bio)
{
const unsigned int data_unit_size =
bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
return false;
}
return true;
}
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
{
return blk_crypto_get_keyslot(rq->q->crypto_profile,
@ -287,12 +271,6 @@ bool __blk_crypto_bio_prep(struct bio *bio)
return false;
}
if (!bio_crypt_check_alignment(bio)) {
bio->bi_status = BLK_STS_INVAL;
bio_endio(bio);
return false;
}
/*
* If the device does not natively support the encryption context, try to use
* the fallback if available.

View File

@ -324,12 +324,19 @@ static inline unsigned int bvec_seg_gap(struct bio_vec *bvprv,
int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
unsigned *segs, unsigned max_bytes, unsigned len_align_mask)
{
struct bio_crypt_ctx *bc = bio_crypt_ctx(bio);
struct bio_vec bv, bvprv, *bvprvp = NULL;
unsigned nsegs = 0, bytes = 0, gaps = 0;
struct bvec_iter iter;
unsigned start_align_mask = lim->dma_alignment;
if (bc) {
start_align_mask |= (bc->bc_key->crypto_cfg.data_unit_size - 1);
len_align_mask |= (bc->bc_key->crypto_cfg.data_unit_size - 1);
}
bio_for_each_bvec(bv, bio, iter) {
if (bv.bv_offset & lim->dma_alignment ||
if (bv.bv_offset & start_align_mask ||
bv.bv_len & len_align_mask)
return -EINVAL;