mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
RDMA: Properly propagate the number of CQEs as unsigned int
Instead of checking whether the number of CQEs is negative or zero, fix the .resize_user_cq() declaration to use unsigned int. This better reflects the expected value range. The sanity check is then handled correctly in ib_uvbers. Link: https://patch.msgid.link/20260319-resize_cq-cqe-v1-1-b78c6efc1def@nvidia.com Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
This commit is contained in:
parent
ce68351be0
commit
dc76086a2d
|
|
@ -1138,6 +1138,9 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!cmd.cqe)
|
||||
return -EINVAL;
|
||||
|
||||
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
|
||||
if (IS_ERR(cq))
|
||||
return PTR_ERR(cq);
|
||||
|
|
|
|||
|
|
@ -3551,7 +3551,8 @@ static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
|
|||
}
|
||||
}
|
||||
|
||||
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
int bnxt_re_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_qplib_sg_info sg_info = {};
|
||||
struct bnxt_qplib_dpi *orig_dpi = NULL;
|
||||
|
|
@ -3577,11 +3578,8 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
|||
}
|
||||
|
||||
/* Check the requested cq depth out of supported depth */
|
||||
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
|
||||
ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
|
||||
cq->qplib_cq.id, cqe);
|
||||
if (cqe > dev_attr->max_cq_wqes)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
entries = bnxt_re_init_depth(cqe + 1, uctx);
|
||||
|
|
|
|||
|
|
@ -255,7 +255,8 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
struct uverbs_attr_bundle *attrs);
|
||||
int bnxt_re_create_user_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
||||
int bnxt_re_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
||||
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
|
|
|
|||
|
|
@ -2012,7 +2012,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|||
* @entries: desired cq size
|
||||
* @udata: user data
|
||||
*/
|
||||
static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
|
||||
static int irdma_resize_cq(struct ib_cq *ibcq, unsigned int entries,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
|
||||
|
|
|
|||
|
|
@ -414,7 +414,8 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
|
|||
++cq->mcq.cons_index;
|
||||
}
|
||||
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
|
||||
struct mlx4_ib_cq *cq = to_mcq(ibcq);
|
||||
|
|
@ -423,7 +424,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
|||
int err;
|
||||
|
||||
mutex_lock(&cq->resize_mutex);
|
||||
if (entries < 1 || entries > dev->dev->caps.max_cqes) {
|
||||
if (entries > dev->dev->caps.max_cqes) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -767,7 +767,8 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
|
|||
int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||
unsigned int *sg_offset);
|
||||
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
int mlx4_ib_create_user_cq(struct ib_cq *ibcq,
|
||||
|
|
|
|||
|
|
@ -1335,7 +1335,8 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
int mlx5_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
|
||||
struct mlx5_ib_cq *cq = to_mcq(ibcq);
|
||||
|
|
@ -1355,13 +1356,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (entries < 1 ||
|
||||
entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
|
||||
mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
|
||||
entries,
|
||||
1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
|
||||
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
|
||||
|
|
|
|||
|
|
@ -1309,7 +1309,8 @@ int mlx5_ib_pre_destroy_cq(struct ib_cq *cq);
|
|||
void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
|
||||
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||
int mlx5_ib_resize_cq(struct ib_cq *ibcq, unsigned int entries,
|
||||
struct ib_udata *udata);
|
||||
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
|
|
|
|||
|
|
@ -695,7 +695,8 @@ static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
static int mthca_resize_cq(struct ib_cq *ibcq, unsigned int entries,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mthca_dev *dev = to_mdev(ibcq->device);
|
||||
struct mthca_cq *cq = to_mcq(ibcq);
|
||||
|
|
@ -703,7 +704,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
|
|||
u32 lkey;
|
||||
int ret;
|
||||
|
||||
if (entries < 1 || entries > dev->limits.max_cqes)
|
||||
if (entries > dev->limits.max_cqes)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&cq->mutex);
|
||||
|
|
|
|||
|
|
@ -1013,18 +1013,16 @@ int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
|
||||
int ocrdma_resize_cq(struct ib_cq *ibcq, unsigned int new_cnt,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int status = 0;
|
||||
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
|
||||
|
||||
if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
|
||||
status = -EINVAL;
|
||||
return status;
|
||||
}
|
||||
if (new_cnt > cq->max_hw_cqe)
|
||||
return -EINVAL;
|
||||
|
||||
ibcq->cqe = new_cnt;
|
||||
return status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ocrdma_flush_cq(struct ocrdma_cq *cq)
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
|||
|
||||
int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
struct uverbs_attr_bundle *attrs);
|
||||
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int ocrdma_resize_cq(struct ib_cq *, unsigned int cqe, struct ib_udata *);
|
||||
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
|
||||
int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
|
||||
|
|
|
|||
|
|
@ -337,7 +337,7 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
|
|||
*
|
||||
* Return: 0 for success.
|
||||
*/
|
||||
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
int rvt_resize_cq(struct ib_cq *ibcq, unsigned int cqe, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
|
||||
u32 head, tail, n;
|
||||
|
|
@ -349,7 +349,7 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
|||
struct rvt_k_cq_wc *k_wc = NULL;
|
||||
struct rvt_k_cq_wc *old_k_wc = NULL;
|
||||
|
||||
if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
|
||||
if (cqe > rdi->dparms.props.max_cqe)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
struct uverbs_attr_bundle *attrs);
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
|
||||
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
||||
int rvt_resize_cq(struct ib_cq *ibcq, unsigned int cqe, struct ib_udata *udata);
|
||||
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
int rvt_driver_cq_init(void);
|
||||
void rvt_cq_exit(void);
|
||||
|
|
|
|||
|
|
@ -8,37 +8,6 @@
|
|||
#include "rxe_loc.h"
|
||||
#include "rxe_queue.h"
|
||||
|
||||
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
|
||||
int cqe, int comp_vector)
|
||||
{
|
||||
int count;
|
||||
|
||||
if (cqe <= 0) {
|
||||
rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (cqe > rxe->attr.max_cqe) {
|
||||
rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
|
||||
cqe, rxe->attr.max_cqe);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (cq) {
|
||||
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
|
||||
if (cqe < count) {
|
||||
rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
|
||||
cqe, count);
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err1:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_udata *udata,
|
||||
struct rxe_create_cq_resp __user *uresp)
|
||||
|
|
|
|||
|
|
@ -18,9 +18,6 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
|
|||
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt, struct rxe_ah **ahp);
|
||||
|
||||
/* rxe_cq.c */
|
||||
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
|
||||
int cqe, int comp_vector);
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_udata *udata,
|
||||
struct rxe_create_cq_resp __user *uresp);
|
||||
|
|
|
|||
|
|
@ -1097,11 +1097,8 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
|
||||
if (err) {
|
||||
rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
|
||||
goto err_out;
|
||||
}
|
||||
if (attr->cqe > rxe->attr.max_cqe)
|
||||
return -EINVAL;
|
||||
|
||||
err = rxe_add_to_pool(&rxe->cq_pool, cq);
|
||||
if (err) {
|
||||
|
|
@ -1127,7 +1124,8 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
static int rxe_resize_cq(struct ib_cq *ibcq, unsigned int cqe,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
struct rxe_dev *rxe = to_rdev(ibcq->device);
|
||||
|
|
@ -1143,11 +1141,9 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
|||
uresp = udata->outbuf;
|
||||
}
|
||||
|
||||
err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
|
||||
if (err) {
|
||||
rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
|
||||
goto err_out;
|
||||
}
|
||||
if (cqe > rxe->attr.max_cqe ||
|
||||
cqe < queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT))
|
||||
return -EINVAL;
|
||||
|
||||
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
|
||||
if (err) {
|
||||
|
|
|
|||
|
|
@ -2634,7 +2634,7 @@ struct ib_device_ops {
|
|||
struct uverbs_attr_bundle *attrs);
|
||||
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int (*resize_user_cq)(struct ib_cq *cq, int cqe,
|
||||
int (*resize_user_cq)(struct ib_cq *cq, unsigned int cqe,
|
||||
struct ib_udata *udata);
|
||||
/*
|
||||
* pre_destroy_cq - Prevent a cq from generating any new work
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user