mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
mm: rename zap_page_range_single_batched() to zap_vma_range_batched()
Let's make the naming more consistent with our new naming scheme. While at it, polish the kerneldoc a bit. Link: https://lkml.kernel.org/r/20260227200848.114019-14-david@kernel.org Signed-off-by: David Hildenbrand (Arm) <david@kernel.org> Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Arve <arve@android.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Carlos Llamas <cmllamas@google.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Daniel Borkman <daniel@iogearbox.net> Cc: Dave Airlie <airlied@gmail.com> Cc: David Ahern <dsahern@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Hartley Sweeten <hsweeten@visionengravers.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ian Abbott <abbotti@mev.co.uk> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jakub Kacinski <kuba@kernel.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jarkko Sakkinen <jarkko@kernel.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Rapoport <rppt@kernel.org> Cc: Namhyung kim <namhyung@kernel.org> Cc: Neal Cardwell <ncardwell@google.com> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Todd Kjos <tkjos@android.com> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
32bc7fe4a6
commit
784a742e7b
|
|
@ -536,7 +536,7 @@ static inline void sync_with_folio_pmd_zap(struct mm_struct *mm, pmd_t *pmdp)
|
|||
}
|
||||
|
||||
struct zap_details;
|
||||
void zap_page_range_single_batched(struct mmu_gather *tlb,
|
||||
void zap_vma_range_batched(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long size, struct zap_details *details);
|
||||
int zap_vma_for_reaping(struct vm_area_struct *vma);
|
||||
|
|
|
|||
|
|
@ -855,9 +855,8 @@ static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior)
|
|||
.reclaim_pt = true,
|
||||
};
|
||||
|
||||
zap_page_range_single_batched(
|
||||
madv_behavior->tlb, madv_behavior->vma, range->start,
|
||||
range->end - range->start, &details);
|
||||
zap_vma_range_batched(madv_behavior->tlb, madv_behavior->vma,
|
||||
range->start, range->end - range->start, &details);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
23
mm/memory.c
23
mm/memory.c
|
|
@ -2167,17 +2167,20 @@ void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
|
|||
}
|
||||
|
||||
/**
|
||||
* zap_page_range_single_batched - remove user pages in a given range
|
||||
* zap_vma_range_batched - zap page table entries in a vma range
|
||||
* @tlb: pointer to the caller's struct mmu_gather
|
||||
* @vma: vm_area_struct holding the applicable pages
|
||||
* @address: starting address of pages to remove
|
||||
* @size: number of bytes to remove
|
||||
* @details: details of shared cache invalidation
|
||||
* @vma: the vma covering the range to zap
|
||||
* @address: starting address of the range to zap
|
||||
* @size: number of bytes to zap
|
||||
* @details: details specifying zapping behavior
|
||||
*
|
||||
* @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for
|
||||
* hugetlb, @tlb is flushed and re-initialized by this function.
|
||||
* @tlb must not be NULL. The provided address range must be fully
|
||||
* contained within @vma. If @vma is for hugetlb, @tlb is flushed and
|
||||
* re-initialized by this function.
|
||||
*
|
||||
* If @details is NULL, this function will zap all page table entries.
|
||||
*/
|
||||
void zap_page_range_single_batched(struct mmu_gather *tlb,
|
||||
void zap_vma_range_batched(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *details)
|
||||
{
|
||||
|
|
@ -2225,7 +2228,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
|||
struct mmu_gather tlb;
|
||||
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
zap_page_range_single_batched(&tlb, vma, address, size, NULL);
|
||||
zap_vma_range_batched(&tlb, vma, address, size, NULL);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
|
|
@ -4251,7 +4254,7 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
|
|||
size = (end_idx - start_idx) << PAGE_SHIFT;
|
||||
|
||||
tlb_gather_mmu(&tlb, vma->vm_mm);
|
||||
zap_page_range_single_batched(&tlb, vma, start, size, details);
|
||||
zap_vma_range_batched(&tlb, vma, start, size, details);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user