net: remove the netif_get_rx_queue_lease_locked() helpers

The netif_get_rx_queue_lease_locked() API hides the locking
and the descend onto the leased queue. Making the code
harder to follow (at least to me). Remove the API and open
code the descend a bit. Most of the code now looks like:

 if (!leased)
     return __helper(x);

 hw_rxq = ..
 netdev_lock(hw_rxq->dev);
 ret = __helper(x);
 netdev_unlock(hw_rxq->dev);

 return ret;

Of course if we have more code paths that need the wrapping
we may need to revisit. For now, IMHO, having to know what
netif_get_rx_queue_lease_locked() does is not worth the 20LoC
it saves.

Link: https://patch.msgid.link/20260408151251.72bd2482@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2026-04-08 15:12:51 -07:00
parent 1508922588
commit 581d28606c
6 changed files with 113 additions and 93 deletions

View File

@ -76,11 +76,6 @@ struct netdev_rx_queue *
__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq,
enum netif_lease_dir dir);
struct netdev_rx_queue *
netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq);
void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
struct net_device *dev);
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
void netdev_rx_queue_lease(struct netdev_rx_queue *rxq_dst,
struct netdev_rx_queue *rxq_src);

View File

@ -101,6 +101,7 @@ int netdev_queue_config_validate(struct net_device *dev, int rxq_idx,
bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
bool netif_is_queue_leasee(const struct net_device *dev);
void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
const struct pp_memory_provider_params *p);

View File

@ -395,8 +395,7 @@ netdev_nl_queue_fill_lease(struct sk_buff *rsp, struct net_device *netdev,
struct netdev_rx_queue *rxq;
struct net *net, *peer_net;
rxq = __netif_get_rx_queue_lease(&netdev, &q_idx,
NETIF_PHYS_TO_VIRT);
rxq = __netif_get_rx_queue_lease(&netdev, &q_idx, NETIF_PHYS_TO_VIRT);
if (!rxq || orig_netdev == netdev)
return 0;
@ -436,13 +435,45 @@ netdev_nl_queue_fill_lease(struct sk_buff *rsp, struct net_device *netdev,
return -ENOMEM;
}
static int
__netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct netdev_rx_queue *rxq)
{
struct pp_memory_provider_params *params = &rxq->mp_params;
if (params->mp_ops &&
params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
return -EMSGSIZE;
#ifdef CONFIG_XDP_SOCKETS
if (rxq->pool)
if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
return -EMSGSIZE;
#endif
return 0;
}
static int
netdev_nl_queue_fill_mp(struct sk_buff *rsp, struct net_device *netdev,
struct netdev_rx_queue *rxq)
{
struct netdev_rx_queue *hw_rxq;
int ret;
hw_rxq = rxq->lease;
if (!hw_rxq || !netif_is_queue_leasee(netdev))
return __netdev_nl_queue_fill_mp(rsp, rxq);
netdev_lock(hw_rxq->dev);
ret = __netdev_nl_queue_fill_mp(rsp, hw_rxq);
netdev_unlock(hw_rxq->dev);
return ret;
}
static int
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
{
struct pp_memory_provider_params *params;
struct net_device *orig_netdev = netdev;
struct netdev_rx_queue *rxq, *rxq_lease;
struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
void *hdr;
@ -462,20 +493,8 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
goto nla_put_failure;
if (netdev_nl_queue_fill_lease(rsp, netdev, q_idx, q_type))
goto nla_put_failure;
rxq_lease = netif_get_rx_queue_lease_locked(&netdev, &q_idx);
if (rxq_lease)
rxq = rxq_lease;
params = &rxq->mp_params;
if (params->mp_ops &&
params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
goto nla_put_failure_lease;
#ifdef CONFIG_XDP_SOCKETS
if (rxq->pool)
if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
goto nla_put_failure_lease;
#endif
netif_put_rx_queue_lease_locked(orig_netdev, netdev);
if (netdev_nl_queue_fill_mp(rsp, netdev, rxq))
goto nla_put_failure;
break;
case NETDEV_QUEUE_TYPE_TX:
txq = netdev_get_tx_queue(netdev, q_idx);
@ -493,8 +512,6 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
return 0;
nla_put_failure_lease:
netif_put_rx_queue_lease_locked(orig_netdev, netdev);
nla_put_failure:
genlmsg_cancel(rsp, hdr);
return -EMSGSIZE;

View File

@ -37,18 +37,24 @@ struct device *netdev_queue_get_dma_dev(struct net_device *dev,
unsigned int idx,
enum netdev_queue_type type)
{
struct net_device *orig_dev = dev;
struct netdev_rx_queue *hw_rxq;
struct device *dma_dev;
netdev_ops_assert_locked(dev);
/* Only RX side supports queue leasing today. */
if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx))
return __netdev_queue_get_dma_dev(dev, idx);
if (!netif_get_rx_queue_lease_locked(&dev, &idx))
if (!netif_is_queue_leasee(dev))
return NULL;
dma_dev = __netdev_queue_get_dma_dev(dev, idx);
netif_put_rx_queue_lease_locked(orig_dev, dev);
hw_rxq = __netif_get_rx_queue(dev, idx)->lease;
netdev_lock(hw_rxq->dev);
idx = get_netdev_rx_queue_index(hw_rxq);
dma_dev = __netdev_queue_get_dma_dev(hw_rxq->dev, idx);
netdev_unlock(hw_rxq->dev);
return dma_dev;
}

View File

@ -57,6 +57,11 @@ static bool netif_lease_dir_ok(const struct net_device *dev,
return false;
}
bool netif_is_queue_leasee(const struct net_device *dev)
{
return netif_lease_dir_ok(dev, NETIF_VIRT_TO_PHYS);
}
struct netdev_rx_queue *
__netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx,
enum netif_lease_dir dir)
@ -74,29 +79,6 @@ __netif_get_rx_queue_lease(struct net_device **dev, unsigned int *rxq_idx,
return rxq;
}
struct netdev_rx_queue *
netif_get_rx_queue_lease_locked(struct net_device **dev, unsigned int *rxq_idx)
{
struct net_device *orig_dev = *dev;
struct netdev_rx_queue *rxq;
/* Locking order is always from the virtual to the physical device
* see netdev_nl_queue_create_doit().
*/
netdev_ops_assert_locked(orig_dev);
rxq = __netif_get_rx_queue_lease(dev, rxq_idx, NETIF_VIRT_TO_PHYS);
if (rxq && orig_dev != *dev)
netdev_lock(*dev);
return rxq;
}
void netif_put_rx_queue_lease_locked(struct net_device *orig_dev,
struct net_device *dev)
{
if (orig_dev != dev)
netdev_unlock(dev);
}
/* See also page_pool_is_unreadable() */
bool netif_rxq_has_unreadable_mp(struct net_device *dev, unsigned int rxq_idx)
{
@ -264,7 +246,6 @@ int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack)
{
struct net_device *orig_dev = dev;
int ret;
if (!netdev_need_ops_lock(dev))
@ -279,19 +260,18 @@ int netif_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
if (!netif_rxq_is_leased(dev, rxq_idx))
return __netif_mp_open_rxq(dev, rxq_idx, p, extack);
if (!netif_get_rx_queue_lease_locked(&dev, &rxq_idx)) {
if (!__netif_get_rx_queue_lease(&dev, &rxq_idx, NETIF_VIRT_TO_PHYS)) {
NL_SET_ERR_MSG(extack, "rx queue leased to a virtual netdev");
return -EBUSY;
}
if (!dev->dev.parent) {
NL_SET_ERR_MSG(extack, "rx queue belongs to a virtual netdev");
ret = -EOPNOTSUPP;
goto out;
return -EOPNOTSUPP;
}
netdev_lock(dev);
ret = __netif_mp_open_rxq(dev, rxq_idx, p, extack);
out:
netif_put_rx_queue_lease_locked(orig_dev, dev);
netdev_unlock(dev);
return ret;
}
@ -326,18 +306,18 @@ static void __netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
void netif_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
const struct pp_memory_provider_params *old_p)
{
struct net_device *orig_dev = dev;
if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
return;
if (!netif_rxq_is_leased(dev, ifq_idx))
return __netif_mp_close_rxq(dev, ifq_idx, old_p);
if (WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &ifq_idx)))
if (!__netif_get_rx_queue_lease(&dev, &ifq_idx, NETIF_VIRT_TO_PHYS)) {
WARN_ON_ONCE(1);
return;
}
netdev_lock(dev);
__netif_mp_close_rxq(dev, ifq_idx, old_p);
netif_put_rx_queue_lease_locked(orig_dev, dev);
netdev_unlock(dev);
}
void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,

View File

@ -31,6 +31,8 @@
#include <net/netdev_rx_queue.h>
#include <net/xdp.h>
#include "../core/dev.h"
#include "xsk_queue.h"
#include "xdp_umem.h"
#include "xsk.h"
@ -117,20 +119,42 @@ struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
}
EXPORT_SYMBOL(xsk_get_pool_from_qid);
static void __xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
if (queue_id < dev->num_rx_queues)
dev->_rx[queue_id].pool = NULL;
if (queue_id < dev->num_tx_queues)
dev->_tx[queue_id].pool = NULL;
}
void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
struct net_device *orig_dev = dev;
unsigned int id = queue_id;
struct netdev_rx_queue *hw_rxq;
if (id < dev->real_num_rx_queues)
WARN_ON_ONCE(!netif_get_rx_queue_lease_locked(&dev, &id));
if (!netif_rxq_is_leased(dev, queue_id))
return __xsk_clear_pool_at_qid(dev, queue_id);
WARN_ON_ONCE(!netif_is_queue_leasee(dev));
if (id < dev->num_rx_queues)
dev->_rx[id].pool = NULL;
if (id < dev->num_tx_queues)
dev->_tx[id].pool = NULL;
hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
netif_put_rx_queue_lease_locked(orig_dev, dev);
netdev_lock(hw_rxq->dev);
queue_id = get_netdev_rx_queue_index(hw_rxq);
__xsk_clear_pool_at_qid(hw_rxq->dev, queue_id);
netdev_unlock(hw_rxq->dev);
}
static int __xsk_reg_pool_at_qid(struct net_device *dev,
struct xsk_buff_pool *pool, u16 queue_id)
{
if (xsk_get_pool_from_qid(dev, queue_id))
return -EBUSY;
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].pool = pool;
if (queue_id < dev->real_num_tx_queues)
dev->_tx[queue_id].pool = pool;
return 0;
}
/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
@ -140,29 +164,26 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
u16 queue_id)
{
struct net_device *orig_dev = dev;
unsigned int id = queue_id;
int ret = 0;
struct netdev_rx_queue *hw_rxq;
int ret;
if (id >= max(dev->real_num_rx_queues,
dev->real_num_tx_queues))
if (queue_id >= max(dev->real_num_rx_queues,
dev->real_num_tx_queues))
return -EINVAL;
if (id < dev->real_num_rx_queues) {
if (!netif_get_rx_queue_lease_locked(&dev, &id))
return -EBUSY;
if (xsk_get_pool_from_qid(dev, id)) {
ret = -EBUSY;
goto out;
}
}
if (queue_id >= dev->real_num_rx_queues ||
!netif_rxq_is_leased(dev, queue_id))
return __xsk_reg_pool_at_qid(dev, pool, queue_id);
if (!netif_is_queue_leasee(dev))
return -EBUSY;
hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
netdev_lock(hw_rxq->dev);
queue_id = get_netdev_rx_queue_index(hw_rxq);
ret = __xsk_reg_pool_at_qid(hw_rxq->dev, pool, queue_id);
netdev_unlock(hw_rxq->dev);
if (id < dev->real_num_rx_queues)
dev->_rx[id].pool = pool;
if (id < dev->real_num_tx_queues)
dev->_tx[id].pool = pool;
out:
netif_put_rx_queue_lease_locked(orig_dev, dev);
return ret;
}