mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
mm: rename zap_vma_ptes() to zap_special_vma_range()
zap_vma_ptes() is the only zapping function we export to modules. It's essentially a wrapper around zap_vma_range(), however, with some safety checks: * That the passed range fits fully into the VMA * That it's only used for VM_PFNMAP We will add support for VM_MIXEDMAP next, so use the more-generic term "special vma", although "special" is a bit overloaded. Maybe we'll later just support any VM_SPECIAL flag. While at it, improve the kerneldoc. Link: https://lkml.kernel.org/r/20260227200848.114019-16-david@kernel.org Signed-off-by: David Hildenbrand (Arm) <david@kernel.org> Acked-by: Leon Romanovsky <leon@kernel.org> [drivers/infiniband] Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Arve <arve@android.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Carlos Llamas <cmllamas@google.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Daniel Borkman <daniel@iogearbox.net> Cc: Dave Airlie <airlied@gmail.com> Cc: David Ahern <dsahern@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Hartley Sweeten <hsweeten@visionengravers.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ian Abbott <abbotti@mev.co.uk> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jakub Kacinski <kuba@kernel.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jarkko Sakkinen <jarkko@kernel.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Namhyung kim <namhyung@kernel.org> Cc: Neal Cardwell <ncardwell@google.com> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Todd Kjos <tkjos@android.com> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
0326440c35
commit
52a9e9cd18
|
|
@ -1220,7 +1220,7 @@ void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr)
|
|||
|
||||
ret = sgx_encl_find(encl_mm->mm, addr, &vma);
|
||||
if (!ret && encl == vma->vm_private_data)
|
||||
zap_vma_ptes(vma, addr, PAGE_SIZE);
|
||||
zap_special_vma_range(vma, addr, PAGE_SIZE);
|
||||
|
||||
mmap_read_unlock(encl_mm->mm);
|
||||
|
||||
|
|
|
|||
|
|
@ -2588,7 +2588,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
* remap_pfn_range() because we call remap_pfn_range() in a loop.
|
||||
*/
|
||||
if (retval)
|
||||
zap_vma_ptes(vma, vma->vm_start, size);
|
||||
zap_special_vma_range(vma, vma->vm_start, size);
|
||||
#endif
|
||||
|
||||
if (retval == 0) {
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ int remap_io_mapping(struct vm_area_struct *vma,
|
|||
|
||||
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
|
||||
if (unlikely(err)) {
|
||||
zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
|
||||
zap_special_vma_range(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -156,7 +156,7 @@ int remap_io_sg(struct vm_area_struct *vma,
|
|||
|
||||
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
|
||||
if (unlikely(err)) {
|
||||
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
|
||||
zap_special_vma_range(vma, addr, r.pfn << PAGE_SHIFT);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -756,7 +756,7 @@ static void rdma_umap_open(struct vm_area_struct *vma)
|
|||
* point, so zap it.
|
||||
*/
|
||||
vma->vm_private_data = NULL;
|
||||
zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
zap_special_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
}
|
||||
|
||||
static void rdma_umap_close(struct vm_area_struct *vma)
|
||||
|
|
@ -782,7 +782,7 @@ static void rdma_umap_close(struct vm_area_struct *vma)
|
|||
}
|
||||
|
||||
/*
|
||||
* Once the zap_vma_ptes has been called touches to the VMA will come here and
|
||||
* Once the zap_special_vma_range has been called touches to the VMA will come here and
|
||||
* we return a dummy writable zero page for all the pfns.
|
||||
*/
|
||||
static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
|
||||
|
|
@ -878,7 +878,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
|||
continue;
|
||||
list_del_init(&priv->list);
|
||||
|
||||
zap_vma_ptes(vma, vma->vm_start,
|
||||
zap_special_vma_range(vma, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start);
|
||||
|
||||
if (priv->entry) {
|
||||
|
|
|
|||
|
|
@ -542,7 +542,7 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
|
|||
int ctxnum = gts->ts_ctxnum;
|
||||
|
||||
if (!is_kernel_context(gts))
|
||||
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
|
||||
zap_special_vma_range(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
|
||||
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
|
||||
|
||||
gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
|
||||
|
|
|
|||
|
|
@ -2802,7 +2802,7 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
|
|||
struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
|
||||
pud_t pud);
|
||||
|
||||
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size);
|
||||
void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size);
|
||||
|
|
|
|||
16
mm/memory.c
16
mm/memory.c
|
|
@ -2233,17 +2233,15 @@ void zap_vma_range(struct vm_area_struct *vma, unsigned long address,
|
|||
}
|
||||
|
||||
/**
|
||||
* zap_vma_ptes - remove ptes mapping the vma
|
||||
* @vma: vm_area_struct holding ptes to be zapped
|
||||
* @address: starting address of pages to zap
|
||||
* zap_special_vma_range - zap all page table entries in a special vma range
|
||||
* @vma: the vma covering the range to zap
|
||||
* @address: starting address of the range to zap
|
||||
* @size: number of bytes to zap
|
||||
*
|
||||
* This function only unmaps ptes assigned to VM_PFNMAP vmas.
|
||||
*
|
||||
* The entire address range must be fully contained within the vma.
|
||||
*
|
||||
* This function does nothing when the provided address range is not fully
|
||||
* contained in @vma, or when the @vma is not VM_PFNMAP.
|
||||
*/
|
||||
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
void zap_special_vma_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size)
|
||||
{
|
||||
if (!range_in_vma(vma, address, address + size) ||
|
||||
|
|
@ -2252,7 +2250,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
|||
|
||||
zap_vma_range(vma, address, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zap_vma_ptes);
|
||||
EXPORT_SYMBOL_GPL(zap_special_vma_range);
|
||||
|
||||
static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user