mm: introduce a new page type for page pool in page type

Currently, the condition 'page->pp_magic == PP_SIGNATURE' is used to
determine if a page belongs to a page pool.  However, with the planned
removal of @pp_magic, we should instead leverage the page_type in struct
page, such as PGTY_netpp, for this purpose.

Introduce and use the page type APIs e.g.  PageNetpp(), __SetPageNetpp(),
and __ClearPageNetpp() instead, and remove the existing APIs accessing
@pp_magic e.g.  page_pool_page_is_pp(), netmem_or_pp_magic(), and
netmem_clear_pp_magic().

Plus, add @page_type to struct net_iov at the same offset as struct page
so as to use the page_type APIs for struct net_iov as well.  While at it,
reorder @type and @owner in struct net_iov to avoid a hole and increasing
the struct size.

This work was inspired by the following link:

  https://lore.kernel.org/all/582f41c0-2742-4400-9c81-0d46bf4e8314@gmail.com/

While at it, move the sanity check for page pool to on the free path.

[byungchul@sk.com: gate the sanity check, per Johannes]
  Link: https://lkml.kernel.org/r/20260316223113.20097-1-byungchul@sk.com
Link: https://lkml.kernel.org/r/20260224051347.19621-1-byungchul@sk.com
Co-developed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Byungchul Park <byungchul@sk.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrew Lunn <andrew+netdev@lunn.ch>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Wei <dw@davidwei.uk>
Cc: Dragos Tatulea <dtatulea@nvidia.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Leon Romanovsky <leon@kernel.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Mark Bloch <mbloch@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: Simon Horman <horms@kernel.org>
Cc: Stanislav Fomichev <sdf@fomichev.me>
Cc: Stehen Rothwell <sfr@canb.auug.org.au>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Taehee Yoo <ap420073@gmail.com>
Cc: Tariq Toukan <tariqt@nvidia.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Byungchul Park 2026-02-24 14:13:47 +09:00 committed by Andrew Morton
parent 92a9cf97a4
commit db359fccf2
7 changed files with 64 additions and 46 deletions

View File

@ -707,7 +707,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
page = xdpi.page.page;
/* No need to check page_pool_page_is_pp() as we
/* No need to check PageNetpp() as we
* know this is a page_pool page.
*/
page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp,

View File

@ -4840,10 +4840,9 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
* DMA mapping IDs for page_pool
*
* When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
* stashes it in the upper bits of page->pp_magic. We always want to be able to
* unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
* pages can have arbitrary kernel pointers stored in the same field as pp_magic
* (since it overlaps with page->lru.next), so we must ensure that we cannot
* stashes it in the upper bits of page->pp_magic. Non-PP pages can have
* arbitrary kernel pointers stored in the same field as pp_magic (since
* it overlaps with page->lru.next), so we must ensure that we cannot
* mistake a valid kernel pointer with any of the values we write into this
* field.
*
@ -4878,26 +4877,6 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
PP_DMA_INDEX_SHIFT)
/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
* OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
* the head page of compound page and bit 1 for pfmemalloc page, as well as the
* bits used for the DMA index. page_is_pfmemalloc() is checked in
* __page_pool_put_page() to avoid recycling the pfmemalloc page.
*/
#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
#ifdef CONFIG_PAGE_POOL
static inline bool page_pool_page_is_pp(const struct page *page)
{
return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
}
#else
static inline bool page_pool_page_is_pp(const struct page *page)
{
return false;
}
#endif
#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)

View File

@ -923,6 +923,7 @@ enum pagetype {
PGTY_zsmalloc = 0xf6,
PGTY_unaccepted = 0xf7,
PGTY_large_kmalloc = 0xf8,
PGTY_netpp = 0xf9,
PGTY_mapcount_underflow = 0xff
};
@ -1055,6 +1056,11 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
/*
* Marks page_pool allocated pages.
*/
PAGE_TYPE_OPS(Netpp, netpp, netpp)
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.

View File

@ -110,10 +110,21 @@ struct net_iov {
atomic_long_t pp_ref_count;
};
};
struct net_iov_area *owner;
unsigned int page_type;
enum net_iov_type type;
struct net_iov_area *owner;
};
/* Make sure 'the offset of page_type in struct page == the offset of
* type in struct net_iov'.
*/
#define NET_IOV_ASSERT_OFFSET(pg, iov) \
static_assert(offsetof(struct page, pg) == \
offsetof(struct net_iov, iov))
NET_IOV_ASSERT_OFFSET(page_type, page_type);
#undef NET_IOV_ASSERT_OFFSET
struct net_iov_area {
/* Array of net_iovs for this area. */
struct net_iov *niovs;
@ -256,7 +267,7 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
*/
#define pp_page_to_nmdesc(p) \
({ \
DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \
DEBUG_NET_WARN_ON_ONCE(!PageNetpp(p)); \
__pp_page_to_nmdesc(p); \
})

View File

@ -1043,7 +1043,6 @@ static inline bool page_expected_state(struct page *page,
#ifdef CONFIG_MEMCG
page->memcg_data |
#endif
page_pool_page_is_pp(page) |
(page->flags.f & check_flags)))
return false;
@ -1070,8 +1069,6 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
if (unlikely(page->memcg_data))
bad_reason = "page still charged to cgroup";
#endif
if (unlikely(page_pool_page_is_pp(page)))
bad_reason = "page_pool leak";
return bad_reason;
}
@ -1380,9 +1377,17 @@ __always_inline bool __free_pages_prepare(struct page *page,
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
folio->mapping = NULL;
}
if (unlikely(page_has_type(page)))
if (unlikely(page_has_type(page))) {
/* networking expects to clear its page type before releasing */
if (is_check_pages_enabled()) {
if (unlikely(PageNetpp(page))) {
bad_page(page, "page_pool leak");
return false;
}
}
/* Reset the page_type (which overlays _mapcount) */
page->page_type = UINT_MAX;
}
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))

View File

@ -8,21 +8,18 @@ static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
return netmem_to_nmdesc(netmem)->pp_magic & ~PP_DMA_INDEX_MASK;
}
static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
{
netmem_to_nmdesc(netmem)->pp_magic |= pp_magic;
}
static inline void netmem_clear_pp_magic(netmem_ref netmem)
{
WARN_ON_ONCE(netmem_to_nmdesc(netmem)->pp_magic & PP_DMA_INDEX_MASK);
netmem_to_nmdesc(netmem)->pp_magic = 0;
}
static inline bool netmem_is_pp(netmem_ref netmem)
{
return (netmem_get_pp_magic(netmem) & PP_MAGIC_MASK) == PP_SIGNATURE;
struct page *page;
/* XXX: Now that the offset of page_type is shared between
* struct page and net_iov, just cast the netmem to struct page
* unconditionally by clearing NET_IOV if any, no matter whether
* it comes from struct net_iov or struct page. This should be
* adjusted once the offset is no longer shared.
*/
page = (struct page *)((__force unsigned long)netmem & ~NET_IOV);
return PageNetpp(page);
}
static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)

View File

@ -702,8 +702,18 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
{
struct page *page;
netmem_set_pp(netmem, pool);
netmem_or_pp_magic(netmem, PP_SIGNATURE);
/* XXX: Now that the offset of page_type is shared between
* struct page and net_iov, just cast the netmem to struct page
* unconditionally by clearing NET_IOV if any, no matter whether
* it comes from struct net_iov or struct page. This should be
* adjusted once the offset is no longer shared.
*/
page = (struct page *)((__force unsigned long)netmem & ~NET_IOV);
__SetPageNetpp(page);
/* Ensuring all pages have been split into one fragment initially:
* page_pool_set_pp_info() is only called once for every page when it
@ -718,7 +728,17 @@ void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
void page_pool_clear_pp_info(netmem_ref netmem)
{
netmem_clear_pp_magic(netmem);
struct page *page;
/* XXX: Now that the offset of page_type is shared between
* struct page and net_iov, just cast the netmem to struct page
* unconditionally by clearing NET_IOV if any, no matter whether
* it comes from struct net_iov or struct page. This should be
* adjusted once the offset is no longer shared.
*/
page = (struct page *)((__force unsigned long)netmem & ~NET_IOV);
__ClearPageNetpp(page);
netmem_set_pp(netmem, NULL);
}