mirror of
https://github.com/torvalds/linux.git
synced 2026-05-13 00:28:54 +02:00
kasan_free_pxd() assumes the page table is always struct page aligned.
But that's not always the case for all architectures. E.g. In case of
powerpc with 64K pagesize, PUD table (of size 4096) comes from slab cache
named pgtable-2^9. Hence instead of page_to_virt(pxd_page()) let's just
directly pass the start of the pxd table which is passed as the 1st
argument.
This fixes the below double free kasan issue seen with PMEM:
radix-mmu: Mapped 0x0000047d10000000-0x0000047f90000000 with 2.00 MiB pages
==================================================================
BUG: KASAN: double-free in kasan_remove_zero_shadow+0x9c4/0xa20
Free of addr c0000003c38e0000 by task ndctl/2164
CPU: 34 UID: 0 PID: 2164 Comm: ndctl Not tainted 6.19.0-rc1-00048-gea1013c15392 #157 VOLUNTARY
Hardware name: IBM,9080-HEX POWER10 (architected) 0x800200 0xf000006 of:IBM,FW1060.00 (NH1060_012) hv:phyp pSeries
Call Trace:
dump_stack_lvl+0x88/0xc4 (unreliable)
print_report+0x214/0x63c
kasan_report_invalid_free+0xe4/0x110
check_slab_allocation+0x100/0x150
kmem_cache_free+0x128/0x6e0
kasan_remove_zero_shadow+0x9c4/0xa20
memunmap_pages+0x2b8/0x5c0
devm_action_release+0x54/0x70
release_nodes+0xc8/0x1a0
devres_release_all+0xe0/0x140
device_unbind_cleanup+0x30/0x120
device_release_driver_internal+0x3e4/0x450
unbind_store+0xfc/0x110
drv_attr_store+0x78/0xb0
sysfs_kf_write+0x114/0x140
kernfs_fop_write_iter+0x264/0x3f0
vfs_write+0x3bc/0x7d0
ksys_write+0xa4/0x190
system_call_exception+0x190/0x480
system_call_vectored_common+0x15c/0x2ec
---- interrupt: 3000 at 0x7fff93b3d3f4
NIP: 00007fff93b3d3f4 LR: 00007fff93b3d3f4 CTR: 0000000000000000
REGS: c0000003f1b07e80 TRAP: 3000 Not tainted (6.19.0-rc1-00048-gea1013c15392)
MSR: 800000000280f033 <SF,VEC,VSX,EE,PR,FP,ME,IR,DR,RI,LE> CR: 48888208 XER: 00000000
<...>
NIP [00007fff93b3d3f4] 0x7fff93b3d3f4
LR [00007fff93b3d3f4] 0x7fff93b3d3f4
---- interrupt: 3000
The buggy address belongs to the object at c0000003c38e0000
which belongs to the cache pgtable-2^9 of size 4096
The buggy address is located 0 bytes inside of
4096-byte region [c0000003c38e0000, c0000003c38e1000)
The buggy address belongs to the physical page:
page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x3c38c
head: order:2 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
memcg:c0000003bfd63e01
flags: 0x63ffff800000040(head|node=6|zone=0|lastcpupid=0x7ffff)
page_type: f5(slab)
raw: 063ffff800000040 c000000140058980 5deadbeef0000122 0000000000000000
raw: 0000000000000000 0000000080200020 00000000f5000000 c0000003bfd63e01
head: 063ffff800000040 c000000140058980 5deadbeef0000122 0000000000000000
head: 0000000000000000 0000000080200020 00000000f5000000 c0000003bfd63e01
head: 063ffff800000002 c00c000000f0e301 00000000ffffffff 00000000ffffffff
head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000004
page dumped because: kasan: bad access detected
[ 138.953636] [ T2164] Memory state around the buggy address:
[ 138.953643] [ T2164] c0000003c38dff00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 138.953652] [ T2164] c0000003c38dff80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 138.953661] [ T2164] >c0000003c38e0000: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 138.953669] [ T2164] ^
[ 138.953675] [ T2164] c0000003c38e0080: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 138.953684] [ T2164] c0000003c38e0100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 138.953692] [ T2164] ==================================================================
[ 138.953701] [ T2164] Disabling lock debugging due to kernel taint
Link: https://lkml.kernel.org/r/2f9135c7866c6e0d06e960993b8a5674a9ebc7ec.1771938394.git.ritesh.list@gmail.com
Fixes: 0207df4fa1 ("kernel/memremap, kasan: make ZONE_DEVICE with work with KASAN")
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reported-by: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
497 lines
11 KiB
C
497 lines
11 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* This file contains KASAN shadow initialization code.
|
|
*
|
|
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
|
|
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
|
|
*/
|
|
|
|
#include <linux/memblock.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/pgalloc.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include "kasan.h"
|
|
|
|
/*
|
|
* This page serves two purposes:
|
|
* - It used as early shadow memory. The entire shadow region populated
|
|
* with this page, before we will be able to setup normal shadow memory.
|
|
* - Latter it reused it as zero shadow to cover large ranges of memory
|
|
* that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
|
|
*/
|
|
unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 4
|
|
p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
|
|
static inline bool kasan_p4d_table(pgd_t pgd)
|
|
{
|
|
return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
|
|
}
|
|
#else
|
|
static inline bool kasan_p4d_table(pgd_t pgd)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
|
pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss;
|
|
static inline bool kasan_pud_table(p4d_t p4d)
|
|
{
|
|
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
|
|
}
|
|
#else
|
|
static inline bool kasan_pud_table(p4d_t p4d)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
|
pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss;
|
|
static inline bool kasan_pmd_table(pud_t pud)
|
|
{
|
|
return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
|
|
}
|
|
#else
|
|
static inline bool kasan_pmd_table(pud_t pud)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]
|
|
__page_aligned_bss;
|
|
|
|
static inline bool kasan_pte_table(pmd_t pmd)
|
|
{
|
|
return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
|
|
}
|
|
|
|
static inline bool kasan_early_shadow_page_entry(pte_t pte)
|
|
{
|
|
return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
|
|
}
|
|
|
|
static __init void *early_alloc(size_t size, int node)
|
|
{
|
|
void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
|
|
MEMBLOCK_ALLOC_ACCESSIBLE, node);
|
|
|
|
if (!ptr)
|
|
panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
|
|
__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
|
|
|
|
return ptr;
|
|
}
|
|
|
|
static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
pte_t *pte = pte_offset_kernel(pmd, addr);
|
|
pte_t zero_pte;
|
|
|
|
zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
|
|
PAGE_KERNEL);
|
|
zero_pte = pte_wrprotect(zero_pte);
|
|
|
|
while (addr + PAGE_SIZE <= end) {
|
|
set_pte_at(&init_mm, addr, pte, zero_pte);
|
|
addr += PAGE_SIZE;
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
}
|
|
}
|
|
|
|
static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
|
|
pmd_populate_kernel(&init_mm, pmd,
|
|
lm_alias(kasan_early_shadow_pte));
|
|
continue;
|
|
}
|
|
|
|
if (pmd_none(*pmd)) {
|
|
pte_t *p;
|
|
|
|
if (slab_is_available())
|
|
p = pte_alloc_one_kernel(&init_mm);
|
|
else {
|
|
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
|
|
kernel_pte_init(p);
|
|
}
|
|
if (!p)
|
|
return -ENOMEM;
|
|
|
|
pmd_populate_kernel(&init_mm, pmd, p);
|
|
}
|
|
zero_pte_populate(pmd, addr, next);
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
pud_t *pud = pud_offset(p4d, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
|
|
pmd_t *pmd;
|
|
|
|
pud_populate(&init_mm, pud,
|
|
lm_alias(kasan_early_shadow_pmd));
|
|
pmd = pmd_offset(pud, addr);
|
|
pmd_populate_kernel(&init_mm, pmd,
|
|
lm_alias(kasan_early_shadow_pte));
|
|
continue;
|
|
}
|
|
|
|
if (pud_none(*pud)) {
|
|
pmd_t *p;
|
|
|
|
if (slab_is_available()) {
|
|
p = pmd_alloc(&init_mm, pud, addr);
|
|
if (!p)
|
|
return -ENOMEM;
|
|
} else {
|
|
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
|
|
pmd_init(p);
|
|
pud_populate(&init_mm, pud, p);
|
|
}
|
|
}
|
|
zero_pmd_populate(pud, addr, next);
|
|
} while (pud++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
p4d_t *p4d = p4d_offset(pgd, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = p4d_addr_end(addr, end);
|
|
if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
p4d_populate_kernel(addr, p4d,
|
|
lm_alias(kasan_early_shadow_pud));
|
|
pud = pud_offset(p4d, addr);
|
|
pud_populate(&init_mm, pud,
|
|
lm_alias(kasan_early_shadow_pmd));
|
|
pmd = pmd_offset(pud, addr);
|
|
pmd_populate_kernel(&init_mm, pmd,
|
|
lm_alias(kasan_early_shadow_pte));
|
|
continue;
|
|
}
|
|
|
|
if (p4d_none(*p4d)) {
|
|
pud_t *p;
|
|
|
|
if (slab_is_available()) {
|
|
p = pud_alloc(&init_mm, p4d, addr);
|
|
if (!p)
|
|
return -ENOMEM;
|
|
} else {
|
|
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
|
|
pud_init(p);
|
|
p4d_populate_kernel(addr, p4d, p);
|
|
}
|
|
}
|
|
zero_pud_populate(p4d, addr, next);
|
|
} while (p4d++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* kasan_populate_early_shadow - populate shadow memory region with
|
|
* kasan_early_shadow_page
|
|
* @shadow_start: start of the memory range to populate
|
|
* @shadow_end: end of the memory range to populate
|
|
*/
|
|
int __ref kasan_populate_early_shadow(const void *shadow_start,
|
|
const void *shadow_end)
|
|
{
|
|
unsigned long addr = (unsigned long)shadow_start;
|
|
unsigned long end = (unsigned long)shadow_end;
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
/*
|
|
* kasan_early_shadow_pud should be populated with pmds
|
|
* at this moment.
|
|
* [pud,pmd]_populate*() below needed only for
|
|
* 3,2 - level page tables where we don't have
|
|
* puds,pmds, so pgd_populate(), pud_populate()
|
|
* is noops.
|
|
*/
|
|
pgd_populate_kernel(addr, pgd,
|
|
lm_alias(kasan_early_shadow_p4d));
|
|
p4d = p4d_offset(pgd, addr);
|
|
p4d_populate_kernel(addr, p4d,
|
|
lm_alias(kasan_early_shadow_pud));
|
|
pud = pud_offset(p4d, addr);
|
|
pud_populate(&init_mm, pud,
|
|
lm_alias(kasan_early_shadow_pmd));
|
|
pmd = pmd_offset(pud, addr);
|
|
pmd_populate_kernel(&init_mm, pmd,
|
|
lm_alias(kasan_early_shadow_pte));
|
|
continue;
|
|
}
|
|
|
|
if (pgd_none(*pgd)) {
|
|
|
|
if (slab_is_available()) {
|
|
if (!p4d_alloc(&init_mm, pgd, addr))
|
|
return -ENOMEM;
|
|
} else {
|
|
pgd_populate_kernel(addr, pgd,
|
|
early_alloc(PAGE_SIZE, NUMA_NO_NODE));
|
|
}
|
|
}
|
|
zero_p4d_populate(pgd, addr, next);
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
|
|
{
|
|
pte_t *pte;
|
|
int i;
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
|
pte = pte_start + i;
|
|
if (!pte_none(ptep_get(pte)))
|
|
return;
|
|
}
|
|
|
|
pte_free_kernel(&init_mm, pte_start);
|
|
pmd_clear(pmd);
|
|
}
|
|
|
|
static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
|
|
{
|
|
pmd_t *pmd;
|
|
int i;
|
|
|
|
for (i = 0; i < PTRS_PER_PMD; i++) {
|
|
pmd = pmd_start + i;
|
|
if (!pmd_none(*pmd))
|
|
return;
|
|
}
|
|
|
|
pmd_free(&init_mm, pmd_start);
|
|
pud_clear(pud);
|
|
}
|
|
|
|
static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
|
|
{
|
|
pud_t *pud;
|
|
int i;
|
|
|
|
for (i = 0; i < PTRS_PER_PUD; i++) {
|
|
pud = pud_start + i;
|
|
if (!pud_none(*pud))
|
|
return;
|
|
}
|
|
|
|
pud_free(&init_mm, pud_start);
|
|
p4d_clear(p4d);
|
|
}
|
|
|
|
static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
|
|
{
|
|
p4d_t *p4d;
|
|
int i;
|
|
|
|
for (i = 0; i < PTRS_PER_P4D; i++) {
|
|
p4d = p4d_start + i;
|
|
if (!p4d_none(*p4d))
|
|
return;
|
|
}
|
|
|
|
p4d_free(&init_mm, p4d_start);
|
|
pgd_clear(pgd);
|
|
}
|
|
|
|
static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
unsigned long next;
|
|
pte_t ptent;
|
|
|
|
for (; addr < end; addr = next, pte++) {
|
|
next = (addr + PAGE_SIZE) & PAGE_MASK;
|
|
if (next > end)
|
|
next = end;
|
|
|
|
ptent = ptep_get(pte);
|
|
|
|
if (!pte_present(ptent))
|
|
continue;
|
|
|
|
if (WARN_ON(!kasan_early_shadow_page_entry(ptent)))
|
|
continue;
|
|
pte_clear(&init_mm, addr, pte);
|
|
}
|
|
}
|
|
|
|
static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
unsigned long next;
|
|
|
|
for (; addr < end; addr = next, pmd++) {
|
|
pte_t *pte;
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (!pmd_present(*pmd))
|
|
continue;
|
|
|
|
if (kasan_pte_table(*pmd)) {
|
|
if (IS_ALIGNED(addr, PMD_SIZE) &&
|
|
IS_ALIGNED(next, PMD_SIZE)) {
|
|
pmd_clear(pmd);
|
|
continue;
|
|
}
|
|
}
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
kasan_remove_pte_table(pte, addr, next);
|
|
kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
|
|
}
|
|
}
|
|
|
|
static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
unsigned long next;
|
|
|
|
for (; addr < end; addr = next, pud++) {
|
|
pmd_t *pmd, *pmd_base;
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (!pud_present(*pud))
|
|
continue;
|
|
|
|
if (kasan_pmd_table(*pud)) {
|
|
if (IS_ALIGNED(addr, PUD_SIZE) &&
|
|
IS_ALIGNED(next, PUD_SIZE)) {
|
|
pud_clear(pud);
|
|
continue;
|
|
}
|
|
}
|
|
pmd = pmd_offset(pud, addr);
|
|
pmd_base = pmd_offset(pud, 0);
|
|
kasan_remove_pmd_table(pmd, addr, next);
|
|
kasan_free_pmd(pmd_base, pud);
|
|
}
|
|
}
|
|
|
|
static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
|
|
unsigned long end)
|
|
{
|
|
unsigned long next;
|
|
|
|
for (; addr < end; addr = next, p4d++) {
|
|
pud_t *pud;
|
|
|
|
next = p4d_addr_end(addr, end);
|
|
|
|
if (!p4d_present(*p4d))
|
|
continue;
|
|
|
|
if (kasan_pud_table(*p4d)) {
|
|
if (IS_ALIGNED(addr, P4D_SIZE) &&
|
|
IS_ALIGNED(next, P4D_SIZE)) {
|
|
p4d_clear(p4d);
|
|
continue;
|
|
}
|
|
}
|
|
pud = pud_offset(p4d, addr);
|
|
kasan_remove_pud_table(pud, addr, next);
|
|
kasan_free_pud(pud_offset(p4d, 0), p4d);
|
|
}
|
|
}
|
|
|
|
void kasan_remove_zero_shadow(void *start, unsigned long size)
|
|
{
|
|
unsigned long addr, end, next;
|
|
pgd_t *pgd;
|
|
|
|
addr = (unsigned long)kasan_mem_to_shadow(start);
|
|
end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
|
|
|
|
if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
|
|
WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
|
|
return;
|
|
|
|
for (; addr < end; addr = next) {
|
|
p4d_t *p4d;
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
if (!pgd_present(*pgd))
|
|
continue;
|
|
|
|
if (kasan_p4d_table(*pgd)) {
|
|
if (IS_ALIGNED(addr, PGDIR_SIZE) &&
|
|
IS_ALIGNED(next, PGDIR_SIZE)) {
|
|
pgd_clear(pgd);
|
|
continue;
|
|
}
|
|
}
|
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
kasan_remove_p4d_table(p4d, addr, next);
|
|
kasan_free_p4d(p4d_offset(pgd, 0), pgd);
|
|
}
|
|
}
|
|
|
|
int kasan_add_zero_shadow(void *start, unsigned long size)
|
|
{
|
|
int ret;
|
|
void *shadow_start, *shadow_end;
|
|
|
|
shadow_start = kasan_mem_to_shadow(start);
|
|
shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
|
|
|
|
if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
|
|
WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
|
|
return -EINVAL;
|
|
|
|
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
|
|
if (ret)
|
|
kasan_remove_zero_shadow(start, size);
|
|
return ret;
|
|
}
|