net/mlx5e: SHAMPO, Always calculate page size

Adapt the rx path in SHAMPO mode to calculate page size based on
configured page_shift when dealing with payload data.

This is necessary as an upcoming patch will add support for using
different page sizes.

This change has no functional changes.

Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20260223204155.1783580-9-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Dragos Tatulea 2026-02-23 22:41:48 +02:00 committed by Paolo Abeni
parent 3707a73854
commit dff1c3164a

View File

@ -1847,11 +1847,14 @@ mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
struct mlx5e_frag_page *frag_page,
u32 data_bcnt, u32 data_offset)
{
u32 page_size = BIT(rq->mpwqe.page_shift);
net_prefetchw(skb->data);
do {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
u32 pg_consumed_bytes = min_t(u32, page_size - data_offset,
data_bcnt);
unsigned int truesize = pg_consumed_bytes;
mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
@ -1872,6 +1875,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 page_size = BIT(rq->mpwqe.page_shift);
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
@ -1926,9 +1930,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
linear_hr = skb_headroom(skb);
linear_data_len = headlen;
linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
if (unlikely(frag_offset >= PAGE_SIZE)) {
if (unlikely(frag_offset >= page_size)) {
frag_page++;
frag_offset -= PAGE_SIZE;
frag_offset -= page_size;
}
}
@ -1940,7 +1944,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
min_t(u32, page_size - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
@ -1978,7 +1982,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
nr_frags_free = old_nr_frags - sinfo->nr_frags;
if (unlikely(nr_frags_free)) {
frag_page -= nr_frags_free;
truesize -= (nr_frags_free - 1) * PAGE_SIZE +
truesize -= (nr_frags_free - 1) * page_size +
ALIGN(pg_consumed_bytes,
BIT(rq->mpwqe.log_stride_sz));
}
@ -2166,15 +2170,16 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
rq->hw_gro_data->skb = NULL;
}
static bool
mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
static bool mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb,
u16 data_bcnt,
u32 page_size)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
if (page_size >= GRO_LEGACY_MAX_SIZE)
return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
else
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
return page_size * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@ -2183,18 +2188,19 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
u32 page_size = BIT(rq->mpwqe.page_shift);
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
u32 data_offset;
u32 page_idx;
wi = mlx5e_get_mpw_info(rq, wqe_id);
wi->consumed_strides += cstrides;
@ -2210,7 +2216,11 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
goto mpwrq_cqe_out;
}
if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
data_offset = wqe_offset & (page_size - 1);
page_idx = wqe_offset >> rq->mpwqe.page_shift;
if (*skb &&
!(match && mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt,
page_size))) {
match = false;
mlx5e_shampo_flush_skb(rq, cqe, match);
}