mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
mm: prepare to move subsection_map_init() to mm/sparse-vmemmap.c
We want to move subsection_map_init() to mm/sparse-vmemmap.c. To prepare for getting rid of subsection_map_init() in mm/sparse.c completely, use a static inline function for !CONFIG_SPARSEMEM_VMEMMAP. While at it, move the declaration to internal.h and rename it to "sparse_init_subsection_map()". Link: https://lkml.kernel.org/r/20260320-sparsemem_cleanups-v2-11-096addc8800d@kernel.org Signed-off-by: David Hildenbrand (Arm) <david@kernel.org> Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@kernel.org> Cc: Wei Xu <weixugc@google.com> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
dac89b150b
commit
fead6dcff8
|
|
@ -1982,8 +1982,6 @@ struct mem_section_usage {
|
|||
unsigned long pageblock_flags[0];
|
||||
};
|
||||
|
||||
void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
|
||||
|
||||
struct page;
|
||||
struct page_ext;
|
||||
struct mem_section {
|
||||
|
|
@ -2376,7 +2374,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
|
|||
#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
|
||||
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
|
||||
#define pfn_in_present_section pfn_valid
|
||||
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -959,12 +959,24 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
|
|||
unsigned long, enum meminit_context, struct vmem_altmap *, int,
|
||||
bool);
|
||||
|
||||
/*
|
||||
* mm/sparse.c
|
||||
*/
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
void sparse_init(void);
|
||||
#else
|
||||
static inline void sparse_init(void) {}
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
void sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages);
|
||||
#else
|
||||
static inline void sparse_init_subsection_map(unsigned long pfn,
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -1896,7 +1896,7 @@ static void __init free_area_init(void)
|
|||
pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
|
||||
(u64)start_pfn << PAGE_SHIFT,
|
||||
((u64)end_pfn << PAGE_SHIFT) - 1);
|
||||
subsection_map_init(start_pfn, end_pfn - start_pfn);
|
||||
sparse_init_subsection_map(start_pfn, end_pfn - start_pfn);
|
||||
}
|
||||
|
||||
/* Initialise every node */
|
||||
|
|
|
|||
|
|
@ -185,7 +185,7 @@ static void subsection_mask_set(unsigned long *map, unsigned long pfn,
|
|||
bitmap_set(map, idx, end - idx + 1);
|
||||
}
|
||||
|
||||
void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
|
||||
void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
|
||||
unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
|
||||
|
|
@ -207,10 +207,6 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
|
|||
nr_pages -= pfns;
|
||||
}
|
||||
}
|
||||
#else
|
||||
void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Record a memory area against a node. */
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user