memblock, treewide: make memblock_free() handle late freeing

It shouldn't be responsibility of memblock users to detect if they free
memory allocated from memblock late and should use memblock_free_late().

Make memblock_free() and memblock_phys_free() take care of late memory
freeing and drop memblock_free_late().

Link: https://patch.msgid.link/20260323074836.3653702-9-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
This commit is contained in:
Mike Rapoport (Microsoft) 2026-03-23 09:48:35 +02:00
parent b2129a3951
commit 87ce9e83ab
11 changed files with 31 additions and 49 deletions

View File

@ -183,14 +183,12 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size
static void __init mdesc_memblock_free(struct mdesc_handle *hp)
{
unsigned int alloc_size;
unsigned long start;
BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size);
start = __pa(hp);
memblock_free_late(start, alloc_size);
memblock_free(hp, alloc_size);
}
static struct mdesc_mem_ops memblock_mdesc_ops = {

View File

@ -426,7 +426,7 @@ int __init ima_free_kexec_buffer(void)
if (!ima_kexec_buffer_size)
return -ENOENT;
memblock_free_late(ima_kexec_buffer_phys,
memblock_phys_free(ima_kexec_buffer_phys,
ima_kexec_buffer_size);
ima_kexec_buffer_phys = 0;

View File

@ -34,10 +34,7 @@ static
void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
{
if (flags & EFI_MEMMAP_MEMBLOCK) {
if (slab_is_available())
memblock_free_late(phys, size);
else
memblock_phys_free(phys, size);
memblock_phys_free(phys, size);
} else if (flags & EFI_MEMMAP_SLAB) {
struct page *p = pfn_to_page(PHYS_PFN(phys));
unsigned int order = get_order(size);

View File

@ -372,7 +372,7 @@ void __init efi_reserve_boot_services(void)
* doesn't make sense as far as the firmware is
* concerned, but it does provide us with a way to tag
* those regions that must not be paired with
* memblock_free_late().
* memblock_phys_free().
*/
md->attribute |= EFI_MEMORY_RUNTIME;
}

View File

@ -226,7 +226,7 @@ static int __init map_properties(void)
*/
data->len = 0;
memunmap(data);
memblock_free_late(pa_data + sizeof(*data), data_len);
memblock_phys_free(pa_data + sizeof(*data), data_len);
return ret;
}

View File

@ -175,7 +175,7 @@ int __init ima_free_kexec_buffer(void)
if (ret)
return ret;
memblock_free_late(addr, size);
memblock_phys_free(addr, size);
return 0;
}
#endif

View File

@ -172,8 +172,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
void memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
phys_addr_t *out_start,

View File

@ -546,10 +546,10 @@ void __init swiotlb_exit(void)
free_pages(tbl_vaddr, get_order(tbl_size));
free_pages((unsigned long)mem->slots, get_order(slots_size));
} else {
memblock_free_late(__pa(mem->areas),
memblock_free(mem->areas,
array_size(sizeof(*mem->areas), mem->nareas));
memblock_free_late(mem->start, tbl_size);
memblock_free_late(__pa(mem->slots), slots_size);
memblock_phys_free(mem->start, tbl_size);
memblock_free(mem->slots, slots_size);
}
memset(mem, 0, sizeof(*mem));

View File

@ -64,7 +64,7 @@ static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
if (early)
memblock_free(addr, size);
else if (addr)
memblock_free_late(__pa(addr), size);
memblock_free(addr, size);
}
#else /* !__KERNEL__ */

View File

@ -731,10 +731,10 @@ static bool __init kfence_init_pool_early(void)
* fails for the first page, and therefore expect addr==__kfence_pool in
* most failure cases.
*/
memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
memblock_free((void *)addr, KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
__kfence_pool = NULL;
memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
kfence_metadata_init = NULL;
return false;

View File

@ -385,26 +385,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
*/
void __init memblock_discard(void)
{
phys_addr_t addr, size;
phys_addr_t size;
void *addr;
if (memblock.reserved.regions != memblock_reserved_init_regions) {
addr = __pa(memblock.reserved.regions);
addr = memblock.reserved.regions;
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.reserved.max);
if (memblock_reserved_in_slab)
kfree(memblock.reserved.regions);
kfree(addr);
else
memblock_free_late(addr, size);
memblock_free(addr, size);
}
if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions);
addr = memblock.memory.regions;
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max);
if (memblock_memory_in_slab)
kfree(memblock.memory.regions);
kfree(addr);
else
memblock_free_late(addr, size);
memblock_free(addr, size);
}
memblock_memory = NULL;
@ -962,7 +963,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
* @size: size of the boot memory block in bytes
*
* Free boot memory block previously allocated by memblock_alloc_xx() API.
* The freeing memory will not be released to the buddy allocator.
* If called after the buddy allocator is available, the memory is released to
* the buddy allocator.
*/
void __init_memblock memblock_free(void *ptr, size_t size)
{
@ -976,17 +978,24 @@ void __init_memblock memblock_free(void *ptr, size_t size)
* @size: size of the boot memory block in bytes
*
* Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
* The freeing memory will not be released to the buddy allocator.
* If called after the buddy allocator is available, the memory is released to
* the buddy allocator.
*/
int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
int ret;
memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
&base, &end, (void *)_RET_IP_);
kmemleak_free_part_phys(base, size);
return memblock_remove_range(&memblock.reserved, base, size);
ret = memblock_remove_range(&memblock.reserved, base, size);
if (slab_is_available())
__free_reserved_area(base, base + size, -1);
return ret;
}
int __init_memblock __memblock_reserve(phys_addr_t base, phys_addr_t size,
@ -1814,26 +1823,6 @@ void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
return addr;
}
/**
* memblock_free_late - free pages directly to buddy allocator
* @base: phys starting address of the boot memory block
* @size: size of the boot memory block in bytes
*
* This is only useful when the memblock allocator has already been torn
* down, but we are still initializing the system. Pages are released directly
* to the buddy allocator.
*/
void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
memblock_dbg("%s: [%pa-%pa] %pS\n",
__func__, &base, &end, (void *)_RET_IP_);
kmemleak_free_part_phys(base, size);
__free_reserved_area(base, base + size, -1);
}
/*
* Remaining API functions
*/