mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
net: Proxy netdev_queue_get_dma_dev for leased queues
Extend netdev_queue_get_dma_dev to return the physical device of the real rxq for DMA in case the queue was leased. This allows memory providers like io_uring zero-copy or devmem to bind to the physically leased rxq via virtual devices such as netkit. Signed-off-by: David Wei <dw@davidwei.uk> Co-developed-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org> Link: https://patch.msgid.link/20260402231031.447597-8-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
5602ad61eb
commit
222b5566a0
|
|
@ -380,7 +380,9 @@ static inline unsigned int netif_xmit_timeout_ms(struct netdev_queue *txq)
|
|||
get_desc, start_thrs); \
|
||||
})
|
||||
|
||||
struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
|
||||
struct device *netdev_queue_get_dma_dev(struct net_device *dev,
|
||||
unsigned int idx,
|
||||
enum netdev_queue_type type);
|
||||
bool netdev_can_create_queue(const struct net_device *dev,
|
||||
struct netlink_ext_ack *extack);
|
||||
bool netdev_can_lease_queue(const struct net_device *dev,
|
||||
|
|
|
|||
|
|
@ -829,7 +829,8 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
|
|||
}
|
||||
netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
|
||||
|
||||
ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq);
|
||||
ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq,
|
||||
NETDEV_QUEUE_TYPE_RX);
|
||||
if (!ifq->dev) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto netdev_put_unlock;
|
||||
|
|
|
|||
|
|
@ -976,7 +976,8 @@ netdev_nl_get_dma_dev(struct net_device *netdev, unsigned long *rxq_bitmap,
|
|||
for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
|
||||
struct device *rxq_dma_dev;
|
||||
|
||||
rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx);
|
||||
rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx,
|
||||
NETDEV_QUEUE_TYPE_RX);
|
||||
if (dma_dev && rxq_dma_dev != dma_dev) {
|
||||
NL_SET_ERR_MSG_FMT(extack, "DMA device mismatch between queue %u and %u (multi-PF device?)",
|
||||
rxq_idx, prev_rxq_idx);
|
||||
|
|
@ -1153,7 +1154,7 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
|
|||
goto err_unlock_netdev;
|
||||
}
|
||||
|
||||
dma_dev = netdev_queue_get_dma_dev(netdev, 0);
|
||||
dma_dev = netdev_queue_get_dma_dev(netdev, 0, NETDEV_QUEUE_TYPE_TX);
|
||||
binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_TO_DEVICE,
|
||||
dmabuf_fd, priv, info->extack);
|
||||
if (IS_ERR(binding)) {
|
||||
|
|
|
|||
|
|
@ -6,17 +6,8 @@
|
|||
|
||||
#include "dev.h"
|
||||
|
||||
/**
|
||||
* netdev_queue_get_dma_dev() - get dma device for zero-copy operations
|
||||
* @dev: net_device
|
||||
* @idx: queue index
|
||||
*
|
||||
* Get dma device for zero-copy operations to be used for this queue.
|
||||
* When such device is not available or valid, the function will return NULL.
|
||||
*
|
||||
* Return: Device or NULL on error
|
||||
*/
|
||||
struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx)
|
||||
static struct device *
|
||||
__netdev_queue_get_dma_dev(struct net_device *dev, unsigned int idx)
|
||||
{
|
||||
const struct netdev_queue_mgmt_ops *queue_ops = dev->queue_mgmt_ops;
|
||||
struct device *dma_dev;
|
||||
|
|
@ -29,6 +20,38 @@ struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx)
|
|||
return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* netdev_queue_get_dma_dev() - get dma device for zero-copy operations
|
||||
* @dev: net_device
|
||||
* @idx: queue index
|
||||
* @type: queue type (RX or TX)
|
||||
*
|
||||
* Get dma device for zero-copy operations to be used for this queue. If
|
||||
* the queue is an RX queue leased from a physical queue, we retrieve the
|
||||
* physical queue's dma device. When the dma device is not available or
|
||||
* valid, the function will return NULL.
|
||||
*
|
||||
* Return: Device or NULL on error
|
||||
*/
|
||||
struct device *netdev_queue_get_dma_dev(struct net_device *dev,
|
||||
unsigned int idx,
|
||||
enum netdev_queue_type type)
|
||||
{
|
||||
struct net_device *orig_dev = dev;
|
||||
struct device *dma_dev;
|
||||
|
||||
/* Only RX side supports queue leasing today. */
|
||||
if (type != NETDEV_QUEUE_TYPE_RX || !netif_rxq_is_leased(dev, idx))
|
||||
return __netdev_queue_get_dma_dev(dev, idx);
|
||||
|
||||
if (!netif_get_rx_queue_lease_locked(&dev, &idx))
|
||||
return NULL;
|
||||
|
||||
dma_dev = __netdev_queue_get_dma_dev(dev, idx);
|
||||
netif_put_rx_queue_lease_locked(orig_dev, dev);
|
||||
return dma_dev;
|
||||
}
|
||||
|
||||
bool netdev_can_create_queue(const struct net_device *dev,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user