memblock: updates for 7.0-rc1

* improve debugability of reserve_mem kernel parameter handling with print
   outs in case of a failure and debugfs info showing what was actually
   reserved
 * Make memblock_free_late() and free_reserved_area() use the same core
   logic for freeing the memory to buddy and ensure it takes care of
   updating memblock arrays when ARCH_KEEP_MEMBLOCK is enabled.
 -----BEGIN PGP SIGNATURE-----
 
 iQFEBAABCgAuFiEEeOVYVaWZL5900a/pOQOGJssO/ZEFAmnjRmsQHHJwcHRAa2Vy
 bmVsLm9yZwAKCRA5A4Ymyw79kYh0CAC4NpZGFqpEBep1eQcfqsPH05dvp1LUXDNk
 i5GwS2ht/F5D9GcD+EyoYRQjRM8k+XZyOe3sqEF01Uav/rHAv3XrITg/pfiA92AR
 K7CvQv4NvyQqUNcv/mEb+P8niriJ4oHRXCag9inop1jo/x3Mym07oEy73rknAx9r
 ZQKwoFNOM/QQGVb9hZUANKCkE8cAsUXG89yEOH0n17FOahC0PZbK/vxjeO+br3IL
 HxEoC5l1j4cUauf8XEhsVXXdch0iqit/fB3ROePYFNCx7koVYHk6Yl1w++AM0RUA
 ypOmfPsSiqLY2ciuTIAnpTeMfQkkhEmMI3mp6T5BUBwSKJxLRaSM
 =c1xd
 -----END PGP SIGNATURE-----

Merge tag 'memblock-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock

Pull memblock updates from Mike Rapoport:

 - improve debuggability of reserve_mem kernel parameter handling with
   print outs in case of a failure and debugfs info showing what was
   actually reserved

 - Make memblock_free_late() and free_reserved_area() use the same core
   logic for freeing the memory to buddy and ensure it takes care of
   updating memblock arrays when ARCH_KEEP_MEMBLOCK is enabled.

* tag 'memblock-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
  x86/alternative: delay freeing of smp_locks section
  memblock: warn when freeing reserved memory before memory map is initialized
  memblock, treewide: make memblock_free() handle late freeing
  memblock: make free_reserved_area() update memblock if ARCH_KEEP_MEMBLOCK=y
  memblock: extract page freeing from free_reserved_area() into a helper
  memblock: make free_reserved_area() more robust
  mm: move free_reserved_area() to mm/memblock.c
  powerpc: opal-core: pair alloc_pages_exact() with free_pages_exact()
  powerpc: fadump: pair alloc_pages_exact() with free_pages_exact()
  memblock: reserve_mem: fix end caclulation in reserve_mem_release_by_name()
  memblock: move reserve_bootmem_range() to memblock.c and make it static
  memblock: Add reserve_mem debugfs info
  memblock: Print out errors on reserve_mem parser
This commit is contained in:
Linus Torvalds 2026-04-18 11:29:14 -07:00
commit 9055c64567
25 changed files with 272 additions and 199 deletions

View File

@ -397,9 +397,6 @@ void free_initmem(void)
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
/* Delete __init region from memblock.reserved. */
memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
free_reserved_area(lm_init_begin, lm_init_end,
POISON_FREE_INITMEM, "unused kernel");
/*

View File

@ -775,24 +775,12 @@ void __init fadump_update_elfcore_header(char *bufp)
static void *__init fadump_alloc_buffer(unsigned long size)
{
unsigned long count, i;
struct page *page;
void *vaddr;
vaddr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
if (!vaddr)
return NULL;
count = PAGE_ALIGN(size) / PAGE_SIZE;
page = virt_to_page(vaddr);
for (i = 0; i < count; i++)
mark_page_reserved(page + i);
return vaddr;
return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
}
static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
{
free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
free_pages_exact((void *)vaddr, size);
}
s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)

View File

@ -303,7 +303,6 @@ static int __init create_opalcore(void)
struct device_node *dn;
struct opalcore *new;
loff_t opalcore_off;
struct page *page;
Elf64_Phdr *phdr;
Elf64_Ehdr *elf;
int i, ret;
@ -328,11 +327,6 @@ static int __init create_opalcore(void)
oc_conf->opalcorebuf_sz = 0;
return -ENOMEM;
}
count = oc_conf->opalcorebuf_sz / PAGE_SIZE;
page = virt_to_page(oc_conf->opalcorebuf);
for (i = 0; i < count; i++)
mark_page_reserved(page + i);
pr_debug("opalcorebuf = 0x%llx\n", (u64)oc_conf->opalcorebuf);
/* Read OPAL related device-tree entries */
@ -437,10 +431,7 @@ static void opalcore_cleanup(void)
/* free the buffer used for setting up OPAL core */
if (oc_conf->opalcorebuf) {
void *end = (void *)((u64)oc_conf->opalcorebuf +
oc_conf->opalcorebuf_sz);
free_reserved_area(oc_conf->opalcorebuf, end, -1, NULL);
free_pages_exact(oc_conf->opalcorebuf, oc_conf->opalcorebuf_sz);
oc_conf->opalcorebuf = NULL;
oc_conf->opalcorebuf_sz = 0;
}

View File

@ -183,14 +183,12 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
static void __init mdesc_memblock_free(struct mdesc_handle *hp)
{
unsigned int alloc_size;
unsigned long start;
BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
memblock_free_late(start, alloc_size);
memblock_free(hp, alloc_size);
}
static struct mdesc_mem_ops memblock_mdesc_ops = {

View File

@ -2448,12 +2448,6 @@ void __init alternative_instructions(void)
__smp_locks, __smp_locks_end,
_text, _etext);
}
if (!uniproc_patched || num_possible_cpus() == 1) {
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
(unsigned long)__smp_locks_end);
}
#endif
restart_nmi();
@ -2462,6 +2456,24 @@ void __init alternative_instructions(void)
alt_reloc_selftest();
}
#ifdef CONFIG_SMP
/*
* With CONFIG_DEFERRED_STRUCT_PAGE_INIT enabled we can free_init_pages() only
* after the deferred initialization of the memory map is complete.
*/
static int __init free_smp_locks(void)
{
if (!uniproc_patched || num_possible_cpus() == 1) {
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
(unsigned long)__smp_locks_end);
}
return 0;
}
arch_initcall(free_smp_locks);
#endif
/**
* text_poke_early - Update instructions on a live kernel at boot time
* @addr: address to modify

View File

@ -426,7 +426,7 @@ int __init ima_free_kexec_buffer(void)
if (!ima_kexec_buffer_size)
return -ENOENT;
memblock_free_late(ima_kexec_buffer_phys,
memblock_phys_free(ima_kexec_buffer_phys,
ima_kexec_buffer_size);
ima_kexec_buffer_phys = 0;

View File

@ -34,10 +34,7 @@ static
void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
{
if (flags & EFI_MEMMAP_MEMBLOCK) {
if (slab_is_available())
memblock_free_late(phys, size);
else
memblock_phys_free(phys, size);
memblock_phys_free(phys, size);
} else if (flags & EFI_MEMMAP_SLAB) {
struct page *p = pfn_to_page(PHYS_PFN(phys));
unsigned int order = get_order(size);

View File

@ -372,7 +372,7 @@ void __init efi_reserve_boot_services(void)
* doesn't make sense as far as the firmware is
* concerned, but it does provide us with a way to tag
* those regions that must not be paired with
* memblock_free_late().
* memblock_phys_free().
*/
md->attribute |= EFI_MEMORY_RUNTIME;
}

View File

@ -226,7 +226,7 @@ static int __init map_properties(void)
*/
data->len = 0;
memunmap(data);
memblock_free_late(pa_data + sizeof(*data), data_len);
memblock_phys_free(pa_data + sizeof(*data), data_len);
return ret;
}

View File

@ -175,7 +175,7 @@ int __init ima_free_kexec_buffer(void)
if (ret)
return ret;
memblock_free_late(addr, size);
memblock_phys_free(addr, size);
return 0;
}
#endif

View File

@ -44,10 +44,6 @@ static inline void free_bootmem_page(struct page *page)
{
enum bootmem_type type = bootmem_type(page);
/*
* The reserve_bootmem_region sets the reserved flag on bootmem
* pages.
*/
VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
if (type == SECTION_INFO || type == MIX_SECTION_INFO)

View File

@ -173,8 +173,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
void memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
phys_addr_t *out_start,

View File

@ -3928,9 +3928,6 @@ extern unsigned long free_reserved_area(void *start, void *end,
extern void adjust_managed_page_count(struct page *page, long count);
extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
void free_reserved_page(struct page *page);

View File

@ -652,13 +652,6 @@ void __init reserve_initrd_mem(void)
void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
{
#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
memblock_free((void *)aligned_start, aligned_end - aligned_start);
#endif
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}

View File

@ -547,10 +547,10 @@ void __init swiotlb_exit(void)
free_pages(tbl_vaddr, get_order(tbl_size));
free_pages((unsigned long)mem->slots, get_order(slots_size));
} else {
memblock_free_late(__pa(mem->areas),
memblock_free(mem->areas,
array_size(sizeof(*mem->areas), mem->nareas));
memblock_free_late(mem->start, tbl_size);
memblock_free_late(__pa(mem->slots), slots_size);
memblock_phys_free(mem->start, tbl_size);
memblock_free(mem->slots, slots_size);
}
memset(mem, 0, sizeof(*mem));

View File

@ -66,7 +66,7 @@ static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
if (early)
memblock_free(addr, size);
else if (addr)
memblock_free_late(__pa(addr), size);
memblock_free(addr, size);
}
#else /* !__KERNEL__ */

View File

@ -1322,7 +1322,17 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
DECLARE_STATIC_KEY_TRUE(deferred_pages);
static inline bool deferred_pages_enabled(void)
{
return static_branch_unlikely(&deferred_pages);
}
bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
#else
static inline bool deferred_pages_enabled(void)
{
return false;
}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
void init_deferred_page(unsigned long pfn, int nid);

View File

@ -736,10 +736,10 @@ static bool __init kfence_init_pool_early(void)
* fails for the first page, and therefore expect addr==__kfence_pool in
* most failure cases.
*/
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
memblock_free((void *)addr, KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
__kfence_pool = NULL;
memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
kfence_metadata_init = NULL;
return false;

View File

@ -17,6 +17,7 @@
#include <linux/seq_file.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#ifdef CONFIG_KEXEC_HANDOVER
#include <linux/libfdt.h>
@ -384,26 +385,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
*/
void __init memblock_discard(void)
{
phys_addr_t addr, size;
phys_addr_t size;
void *addr;
if (memblock.reserved.regions != memblock_reserved_init_regions) {
addr = __pa(memblock.reserved.regions);
addr = memblock.reserved.regions;
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.reserved.max);
if (memblock_reserved_in_slab)
kfree(memblock.reserved.regions);
kfree(addr);
else
memblock_free_late(addr, size);
memblock_free(addr, size);
}
if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions);
addr = memblock.memory.regions;
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max);
if (memblock_memory_in_slab)
kfree(memblock.memory.regions);
kfree(addr);
else
memblock_free_late(addr, size);
memblock_free(addr, size);
}
memblock_memory = NULL;
@ -893,13 +895,81 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
return memblock_remove_range(&memblock.memory, base, size);
}
static unsigned long __free_reserved_area(phys_addr_t start, phys_addr_t end,
int poison)
{
unsigned long pages = 0, pfn;
if (deferred_pages_enabled()) {
WARN(1, "Cannot free reserved memory because of deferred initialization of the memory map");
return 0;
}
for_each_valid_pfn(pfn, PFN_UP(start), PFN_DOWN(end)) {
struct page *page = pfn_to_page(pfn);
void *direct_map_addr;
/*
* 'direct_map_addr' might be different from the kernel virtual
* address because some architectures use aliases.
* Going via physical address, pfn_to_page() and page_address()
* ensures that we get a _writeable_ alias for the memset().
*/
direct_map_addr = page_address(page);
/*
* Perform a kasan-unchecked memset() since this memory
* has not been initialized.
*/
direct_map_addr = kasan_reset_tag(direct_map_addr);
if ((unsigned int)poison <= 0xFF)
memset(direct_map_addr, poison, PAGE_SIZE);
free_reserved_page(page);
pages++;
}
return pages;
}
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
{
phys_addr_t start_pa, end_pa;
unsigned long pages;
/*
* end is the first address past the region and it may be beyond what
* __pa() or __pa_symbol() can handle.
* Use the address included in the range for the conversion and add back
* 1 afterwards.
*/
if (__is_kernel((unsigned long)start)) {
start_pa = __pa_symbol(start);
end_pa = __pa_symbol(end - 1) + 1;
} else {
start_pa = __pa(start);
end_pa = __pa(end - 1) + 1;
}
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
if (start_pa < end_pa)
memblock_remove_range(&memblock.reserved,
start_pa, end_pa - start_pa);
}
pages = __free_reserved_area(start_pa, end_pa, poison);
if (pages && s)
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
return pages;
}
/**
* memblock_free - free boot memory allocation
* @ptr: starting address of the boot memory allocation
* @size: size of the boot memory block in bytes
*
* Free boot memory block previously allocated by memblock_alloc_xx() API.
* The freeing memory will not be released to the buddy allocator.
* If called after the buddy allocator is available, the memory is released to
* the buddy allocator.
*/
void __init_memblock memblock_free(void *ptr, size_t size)
{
@ -913,17 +983,24 @@ void __init_memblock memblock_free(void *ptr, size_t size)
* @size: size of the boot memory block in bytes
*
* Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
* The freeing memory will not be released to the buddy allocator.
* If called after the buddy allocator is available, the memory is released to
* the buddy allocator.
*/
int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
int ret;
memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
&base, &end, (void *)_RET_IP_);
kmemleak_free_part_phys(base, size);
return memblock_remove_range(&memblock.reserved, base, size);
ret = memblock_remove_range(&memblock.reserved, base, size);
if (slab_is_available())
__free_reserved_area(base, base + size, -1);
return ret;
}
int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
@ -973,7 +1050,7 @@ __init void memmap_init_kho_scratch_pages(void)
/*
* Initialize struct pages for free scratch memory.
* The struct pages for reserved scratch memory will be set up in
* reserve_bootmem_region()
* memmap_init_reserved_pages()
*/
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) {
@ -1766,32 +1843,6 @@ void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
return addr;
}
/**
* memblock_free_late - free pages directly to buddy allocator
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
* This is only useful when the memblock allocator has already been torn
* down, but we are still initializing the system. Pages are released directly
* to the buddy allocator.
*/
void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
phys_addr_t cursor, end;
end = base + size - 1;
memblock_dbg("%s: [%pa-%pa] %pS\n",
__func__, &base, &end, (void *)_RET_IP_);
kmemleak_free_part_phys(base, size);
cursor = PFN_UP(base);
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
memblock_free_pages(cursor, 0);
totalram_pages_inc();
}
}
/*
* Remaining API functions
*/
@ -2255,6 +2306,31 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
return end_pfn - start_pfn;
}
/*
* Initialised pages do not have PageReserved set. This function is called
* for each reserved range and marks the pages PageReserved.
* When deferred initialization of struct pages is enabled it also ensures
* that struct pages are properly initialised.
*/
static void __init memmap_init_reserved_range(phys_addr_t start,
phys_addr_t end, int nid)
{
unsigned long pfn;
for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
struct page *page = pfn_to_page(pfn);
init_deferred_page(pfn, nid);
/*
* no need for atomic set_bit because the struct
* page is not visible yet so nobody should
* access it yet.
*/
__SetPageReserved(page);
}
}
static void __init memmap_init_reserved_pages(void)
{
struct memblock_region *region;
@ -2274,7 +2350,7 @@ static void __init memmap_init_reserved_pages(void)
end = start + region->size;
if (memblock_is_nomap(region))
reserve_bootmem_region(start, end, nid);
memmap_init_reserved_range(start, end, nid);
memblock_set_node(start, region->size, &memblock.reserved, nid);
}
@ -2299,7 +2375,7 @@ static void __init memmap_init_reserved_pages(void)
if (!numa_valid_node(nid))
nid = early_pfn_to_nid(PFN_DOWN(start));
reserve_bootmem_region(start, end, nid);
memmap_init_reserved_range(start, end, nid);
}
}
}
@ -2449,7 +2525,7 @@ int reserve_mem_release_by_name(const char *name)
return 0;
start = phys_to_virt(map->start);
end = start + map->size - 1;
end = start + map->size;
snprintf(buf, sizeof(buf), "reserve_mem:%s", name);
free_reserved_area(start, end, 0, buf);
map->size = 0;
@ -2657,23 +2733,25 @@ static int __init reserve_mem(char *p)
int len;
if (!p)
return -EINVAL;
goto err_param;
/* Check if there's room for more reserved memory */
if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES)
if (reserved_mem_count >= RESERVE_MEM_MAX_ENTRIES) {
pr_err("reserve_mem: no more room for reserved memory\n");
return -EBUSY;
}
oldp = p;
size = memparse(p, &p);
if (!size || p == oldp)
return -EINVAL;
goto err_param;
if (*p != ':')
return -EINVAL;
goto err_param;
align = memparse(p+1, &p);
if (*p != ':')
return -EINVAL;
goto err_param;
/*
* memblock_phys_alloc() doesn't like a zero size align,
@ -2687,7 +2765,7 @@ static int __init reserve_mem(char *p)
/* name needs to have length but not too big */
if (!len || len >= RESERVE_MEM_NAME_SIZE)
return -EINVAL;
goto err_param;
/* Make sure that name has text */
for (p = name; *p; p++) {
@ -2695,11 +2773,13 @@ static int __init reserve_mem(char *p)
break;
}
if (!*p)
return -EINVAL;
goto err_param;
/* Make sure the name is not already used */
if (reserve_mem_find_by_name(name, &start, &tmp))
if (reserve_mem_find_by_name(name, &start, &tmp)) {
pr_err("reserve_mem: name \"%s\" was already used\n", name);
return -EBUSY;
}
/* Pick previous allocations up from KHO if available */
if (reserve_mem_kho_revive(name, size, align))
@ -2707,16 +2787,22 @@ static int __init reserve_mem(char *p)
/* TODO: Allocation must be outside of scratch region */
start = memblock_phys_alloc(size, align);
if (!start)
if (!start) {
pr_err("reserve_mem: memblock allocation failed\n");
return -ENOMEM;
}
reserved_mem_add(start, size, name);
return 1;
err_param:
pr_err("reserve_mem: empty or malformed parameter\n");
return -EINVAL;
}
__setup("reserve_mem=", reserve_mem);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
#ifdef CONFIG_DEBUG_FS
#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
static const char * const flagname[] = {
[ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG",
[ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
@ -2763,10 +2849,8 @@ static int memblock_debug_show(struct seq_file *m, void *private)
}
DEFINE_SHOW_ATTRIBUTE(memblock_debug);
static int __init memblock_init_debugfs(void)
static inline void memblock_debugfs_expose_arrays(struct dentry *root)
{
struct dentry *root = debugfs_create_dir("memblock", NULL);
debugfs_create_file("memory", 0444, root,
&memblock.memory, &memblock_debug_fops);
debugfs_create_file("reserved", 0444, root,
@ -2775,7 +2859,48 @@ static int __init memblock_init_debugfs(void)
debugfs_create_file("physmem", 0444, root, &physmem,
&memblock_debug_fops);
#endif
}
#else
static inline void memblock_debugfs_expose_arrays(struct dentry *root) { }
#endif /* CONFIG_ARCH_KEEP_MEMBLOCK */
static int memblock_reserve_mem_show(struct seq_file *m, void *private)
{
struct reserve_mem_table *map;
char txtsz[16];
guard(mutex)(&reserve_mem_lock);
for (int i = 0; i < reserved_mem_count; i++) {
map = &reserved_mem_table[i];
if (!map->size)
continue;
memset(txtsz, 0, sizeof(txtsz));
string_get_size(map->size, 1, STRING_UNITS_2, txtsz, sizeof(txtsz));
seq_printf(m, "%s\t\t(%s)\n", map->name, txtsz);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(memblock_reserve_mem);
static int __init memblock_init_debugfs(void)
{
struct dentry *root;
if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !reserved_mem_count)
return 0;
root = debugfs_create_dir("memblock", NULL);
if (reserved_mem_count)
debugfs_create_file("reserve_mem_param", 0444, root, NULL,
&memblock_reserve_mem_fops);
memblock_debugfs_expose_arrays(root);
return 0;
}
__initcall(memblock_init_debugfs);

View File

@ -783,31 +783,6 @@ void __meminit init_deferred_page(unsigned long pfn, int nid)
__init_deferred_page(pfn, nid);
}
/*
* Initialised pages do not have PageReserved set. This function is
* called for each range allocated by the bootmem allocator and
* marks the pages PageReserved. The remaining valid pages are later
* sent to the buddy page allocator.
*/
void __meminit reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid)
{
unsigned long pfn;
for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
struct page *page = pfn_to_page(pfn);
__init_deferred_page(pfn, nid);
/*
* no need for atomic set_bit because the struct
* page is not visible yet so nobody should
* access it yet.
*/
__SetPageReserved(page);
}
}
/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
static bool __meminit
overlap_memmap_init(unsigned long zone, unsigned long *pfn)

View File

@ -297,11 +297,6 @@ int page_group_by_mobility_disabled __read_mostly;
*/
DEFINE_STATIC_KEY_TRUE(deferred_pages);
static inline bool deferred_pages_enabled(void)
{
return static_branch_unlikely(&deferred_pages);
}
/*
* deferred_grow_zone() is __init, but it is called from
* get_page_from_freelist() during early boot until deferred_pages permanently
@ -314,11 +309,6 @@ _deferred_grow_zone(struct zone *zone, unsigned int order)
return deferred_grow_zone(zone, order);
}
#else
static inline bool deferred_pages_enabled(void)
{
return false;
}
static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
{
return false;
@ -6211,42 +6201,6 @@ void adjust_managed_page_count(struct page *page, long count)
}
EXPORT_SYMBOL(adjust_managed_page_count);
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
{
void *pos;
unsigned long pages = 0;
start = (void *)PAGE_ALIGN((unsigned long)start);
end = (void *)((unsigned long)end & PAGE_MASK);
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
struct page *page = virt_to_page(pos);
void *direct_map_addr;
/*
* 'direct_map_addr' might be different from 'pos'
* because some architectures' virt_to_page()
* work with aliases. Getting the direct map
* address ensures that we get a _writeable_
* alias for the memset().
*/
direct_map_addr = page_address(page);
/*
* Perform a kasan-unchecked memset() since this memory
* has not been initialized.
*/
direct_map_addr = kasan_reset_tag(direct_map_addr);
if ((unsigned int)poison <= 0xFF)
memset(direct_map_addr, poison, PAGE_SIZE);
free_reserved_page(page);
}
if (pages && s)
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
return pages;
}
void free_reserved_page(struct page *page)
{
clear_page_tag_ref(page);

View File

@ -17,6 +17,7 @@
#define __va(x) ((void *)((unsigned long)(x)))
#define __pa(x) ((unsigned long)(x))
#define __pa_symbol(x) ((unsigned long)(x))
#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
@ -32,8 +33,6 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
return (phys_addr_t)address;
}
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid);
static inline void totalram_pages_inc(void)
{
}

View File

@ -11,9 +11,22 @@ static int memblock_debug = 1;
#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
#define K(x) ((x) << (PAGE_SHIFT-10))
bool mirrored_kernelcore = false;
struct page {};
static inline void *page_address(struct page *page)
{
BUG();
return page;
}
static inline struct page *virt_to_page(void *virt)
{
BUG();
return virt;
}
void memblock_free_pages(unsigned long pfn, unsigned int order)
{
@ -23,10 +36,34 @@ static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
static inline unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s)
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s);
void free_reserved_page(struct page *page);
static inline bool deferred_pages_enabled(void)
{
return 0;
return false;
}
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
static inline void *kasan_reset_tag(const void *addr)
{
return (void *)addr;
}
static inline bool __is_kernel(unsigned long addr)
{
return false;
}
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
static inline void init_deferred_page(unsigned long pfn, int nid)
{
}
#define __SetPageReserved(p) ((void)(p))
#endif

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
/*
* Header stub to avoid test build breakage; we don't need to
* actually implement string_get_size() as it's not used in the tests.
*/
#endif

View File

@ -11,10 +11,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
return NULL;
}
void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid)
{
}
void atomic_long_set(atomic_long_t *v, long i)
{
}