mirror of
https://github.com/torvalds/linux.git
synced 2026-05-13 00:28:54 +02:00
Currently, the condition 'page->pp_magic == PP_SIGNATURE' is used to determine if a page belongs to a page pool. However, with the planned removal of @pp_magic, we should instead leverage the page_type in struct page, such as PGTY_netpp, for this purpose. Introduce and use the page type APIs e.g. PageNetpp(), __SetPageNetpp(), and __ClearPageNetpp() instead, and remove the existing APIs accessing @pp_magic e.g. page_pool_page_is_pp(), netmem_or_pp_magic(), and netmem_clear_pp_magic(). Plus, add @page_type to struct net_iov at the same offset as struct page so as to use the page_type APIs for struct net_iov as well. While at it, reorder @type and @owner in struct net_iov to avoid a hole and increasing the struct size. This work was inspired by the following link: https://lore.kernel.org/all/582f41c0-2742-4400-9c81-0d46bf4e8314@gmail.com/ While at it, move the sanity check for page pool to on the free path. [byungchul@sk.com: gate the sanity check, per Johannes] Link: https://lkml.kernel.org/r/20260316223113.20097-1-byungchul@sk.com Link: https://lkml.kernel.org/r/20260224051347.19621-1-byungchul@sk.com Co-developed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Byungchul Park <byungchul@sk.com> Suggested-by: David Hildenbrand <david@redhat.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Zi Yan <ziy@nvidia.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Jakub Kicinski <kuba@kernel.org> Acked-by: Jesper Dangaard Brouer <hawk@kernel.org> Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Brendan Jackman <jackmanb@google.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: David S. Miller <davem@davemloft.net> Cc: David Wei <dw@davidwei.uk> Cc: Dragos Tatulea <dtatulea@nvidia.com> Cc: Eric Dumazet <edumazet@google.com> Cc: John Fastabend <john.fastabend@gmail.com> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Mark Bloch <mbloch@nvidia.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Saeed Mahameed <saeedm@nvidia.com> Cc: Simon Horman <horms@kernel.org> Cc: Stanislav Fomichev <sdf@fomichev.me> Cc: Stehen Rothwell <sfr@canb.auug.org.au> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Taehee Yoo <ap420073@gmail.com> Cc: Tariq Toukan <tariqt@nvidia.com> Cc: Usama Arif <usamaarif642@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
60 lines
1.5 KiB
C
60 lines
1.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __NETMEM_PRIV_H
|
|
#define __NETMEM_PRIV_H
|
|
|
|
static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
|
|
{
|
|
return netmem_to_nmdesc(netmem)->pp_magic & ~PP_DMA_INDEX_MASK;
|
|
}
|
|
|
|
static inline bool netmem_is_pp(netmem_ref netmem)
|
|
{
|
|
struct page *page;
|
|
|
|
/* XXX: Now that the offset of page_type is shared between
|
|
* struct page and net_iov, just cast the netmem to struct page
|
|
* unconditionally by clearing NET_IOV if any, no matter whether
|
|
* it comes from struct net_iov or struct page. This should be
|
|
* adjusted once the offset is no longer shared.
|
|
*/
|
|
page = (struct page *)((__force unsigned long)netmem & ~NET_IOV);
|
|
return PageNetpp(page);
|
|
}
|
|
|
|
static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
|
|
{
|
|
netmem_to_nmdesc(netmem)->pp = pool;
|
|
}
|
|
|
|
static inline void netmem_set_dma_addr(netmem_ref netmem,
|
|
unsigned long dma_addr)
|
|
{
|
|
netmem_to_nmdesc(netmem)->dma_addr = dma_addr;
|
|
}
|
|
|
|
static inline unsigned long netmem_get_dma_index(netmem_ref netmem)
|
|
{
|
|
unsigned long magic;
|
|
|
|
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
|
|
return 0;
|
|
|
|
magic = netmem_to_nmdesc(netmem)->pp_magic;
|
|
|
|
return (magic & PP_DMA_INDEX_MASK) >> PP_DMA_INDEX_SHIFT;
|
|
}
|
|
|
|
static inline void netmem_set_dma_index(netmem_ref netmem,
|
|
unsigned long id)
|
|
{
|
|
unsigned long magic;
|
|
|
|
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
|
|
return;
|
|
|
|
magic = netmem_get_pp_magic(netmem) | (id << PP_DMA_INDEX_SHIFT);
|
|
netmem_to_nmdesc(netmem)->pp_magic = magic;
|
|
}
|
|
#endif
|