block-7.1-20260508

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmn+FSMQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgphZLEACU3HeIpCrWGqLShqiop0x9FSzREhdAOgHC
 SKJVPyJeGGvS46dCBM3Dkhfwa8VyJvLkpfAYcg9FXQxDyvMB+40jZVxUT+tshHPm
 UtjIGoUJ8DMtLzXzvg73nEcnIZWOmTlSQFwTCfOo+h79zHzsJu+YTUoNQ4vgGO6l
 Hn+lNybPHIdPizH4hMq913OcIApQyTuA0+JsuUm5YVb0kUiJoDIvfwuOH6jIQ5aX
 Mv7MTRRvXRQ/XRgY+Er46YYSL+GA+IEUgVXLawSNiaiOj17m0G1jzNIG7ERZJVkE
 vWD+Yq9U0ECEWT9V1c0VaDnOAK8Bfd/h3ZGnYAWXtfj3aToRyIUOfw5rO0UdoT47
 /fQWrz8qS3X71FlQHmNVgB3hZWSQ+o+3TrWqhSm2oytCZ3mpzz9Rytt4qltfX6tz
 2PncyyD124m6KAaouCQpitZlTLoXQWCdYmjCbfOwFYPkBjROXYtMu0x/eoCyjusg
 e42qt8zDc1ZnpgMejto4g1IBmrQLpQK9ui1cJzwbcuFomegIzfsivtYEav4v0JYN
 Y4SmuvvwOiErJVrQzhBXR/iDUSQ/d4AEow1GGTKRrt+naip4PWx9/HPCsJQ9N+xv
 kvcpZhTZQ8G063wCNW1Nt3c5kzDQ6xjeO9FwVYrRxLGYHKjhmWRzRjaKzODEG9g4
 NY3LC7T/JA==
 =k+a/
 -----END PGP SIGNATURE-----

Merge tag 'block-7.1-20260508' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

 - Fix for ublk not doing an actual issue from the task_work fallback
   path. Any request hitting that should be canceled automatically

 - Fix for uring_cmd prep side handling, for the block side uring_cmd
   discard handling

 - Fix for missing validation of the io and physical block size shifts

 - Fix for a use-after-free in ublk's cancel command handling

* tag 'block-7.1-20260508' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  ublk: fix use-after-free in ublk_cancel_cmd()
  ublk: validate physical_bs_shift, io_min_shift and io_opt_shift
  block: only read from sqe on initial invocation of blkdev_uring_cmd()
  ublk: don't issue uring_cmd from fallback task work
This commit is contained in:
Linus Torvalds 2026-05-08 13:18:13 -07:00
commit cbf457c584
2 changed files with 50 additions and 16 deletions

View File

@ -857,6 +857,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
#endif
struct blk_iou_cmd {
u64 start;
u64 len;
int res;
bool nowait;
};
@ -946,23 +948,27 @@ int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
const struct io_uring_sqe *sqe = cmd->sqe;
u32 cmd_op = cmd->cmd_op;
uint64_t start, len;
/* Read what we need from the SQE on the first issue */
if (!(issue_flags & IORING_URING_CMD_REISSUE)) {
const struct io_uring_sqe *sqe = cmd->sqe;
if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
sqe->rw_flags || sqe->file_index))
return -EINVAL;
bic->start = READ_ONCE(sqe->addr);
bic->len = READ_ONCE(sqe->addr3);
}
bic->res = 0;
bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
start = READ_ONCE(sqe->addr);
len = READ_ONCE(sqe->addr3);
switch (cmd_op) {
case BLOCK_URING_CMD_DISCARD:
return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
return blkdev_cmd_discard(cmd, bdev, bic->start, bic->len,
bic->nowait);
}
return -EINVAL;
}

View File

@ -900,6 +900,20 @@ static int ublk_validate_params(const struct ublk_device *ub)
if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
return -EINVAL;
/*
* 256M is a reasonable upper bound for physical block size,
* io_min and io_opt; it aligns with the maximum physical
* block size possible in NVMe.
*/
if (p->physical_bs_shift > ilog2(SZ_256M))
return -EINVAL;
if (p->io_min_shift > ilog2(SZ_256M))
return -EINVAL;
if (p->io_opt_shift > ilog2(SZ_256M))
return -EINVAL;
if (p->logical_bs_shift > p->physical_bs_shift)
return -EINVAL;
@ -2397,8 +2411,14 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
{
int i;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_queue_reinit(ub, ublk_get_queue(ub, i));
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
struct ublk_queue *ubq = ublk_get_queue(ub, i);
/* Sync with ublk_cancel_cmd() */
spin_lock(&ubq->cancel_lock);
ublk_queue_reinit(ub, ubq);
spin_unlock(&ubq->cancel_lock);
}
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
@ -2739,6 +2759,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
{
struct ublk_io *io = &ubq->ios[tag];
struct ublk_device *ub = ubq->dev;
struct io_uring_cmd *cmd = NULL;
struct request *req;
bool done;
@ -2761,12 +2782,15 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
if (!done) {
io->flags |= UBLK_IO_FLAG_CANCELED;
cmd = io->cmd;
io->cmd = NULL;
}
spin_unlock(&ubq->cancel_lock);
if (!done)
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
if (!done && cmd)
io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
@ -3496,8 +3520,10 @@ static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
int ret = -ECANCELED;
if (!tw.cancel)
ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
io_uring_cmd_done(cmd, ret, issue_flags);
}
@ -4990,13 +5016,15 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
*/
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
/* zero out partial copy so no stale params survive */
memset(&ub->params, 0, sizeof(ub->params));
ret = -EFAULT;
} else {
/* clear all we don't support yet */
ub->params.types &= UBLK_PARAM_TYPE_ALL;
ret = ublk_validate_params(ub);
if (ret)
ub->params.types = 0;
memset(&ub->params, 0, sizeof(ub->params));
}
mutex_unlock(&ub->mutex);