linux/arch/um/include/asm/pgtable.h
Linus Torvalds 065c4e67cc Mostly cleanups and small things, notably:
- musl libc compatibility
  - vDSO installation fix
  - TLB sync race fix for recent SMP support
  - build fix for 32-bit with Clang 20/21
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEpeA8sTs3M8SN2hR410qiO8sPaAAFAmnmKS4ACgkQ10qiO8sP
 aAD2+w//dOOblgUYgQJUXIxHpS7Gcb3Tm+a7ujC23q/kWf/pc8milCSf+zoxzUXL
 23Vwh4Gt4KrHKp8lG1gU3xZqV0qwhXNi5HO2hMpB0ioIVpX3TcrUhFbp/Oirvhgi
 3PvnvsFtUlW82DFgewB98tefXZSAlG/pg+RjQ3weHfEo+xQbjYc+kR8o59tN8LNR
 Ea4rrxyjsr3KN2yBNaFpDkMchudP6XWgKByAZBxZ2FofC3zuVRCyF8ThDfQl/3/W
 muSqX+2iuKjGpmxV0XWt72hYOhNYjBtDY7f4EPe6sbUy+PU6SjD9h/s7VTyVHgZR
 3Sii9AQLLJNYPoglExMfmWfeUnJCUJNNTLUze+ZtnhURZQYTvyJRzVmKj6fDPjK2
 jGEKXanfZCK9Cfgy2f2xbQxCxhAVwz6QT0XaQO2dZBXa0anzG+2HM0Zn8MNa9jbU
 +Lm11k1jd1QBifr+5zeni98KHt2mf77blCny8TraODgLNgWUVi5kMkPF4bZgD4Qj
 udMU9lOkTD08R89hG/Le9TsB+NIpPauyNxDHUpC/VDterFdZqFvmOFT6afTo/4RZ
 nXNVdL1tn+7O7v0bLdbyhXwj2her1GDbe6HZ5eTNqmjcOthcgI3gF2stDfFhEbNb
 /wMHnpGPncMeEI8YWtWOFA4FA5T32+LafLCKhuRJdaw0+f/NMOo=
 =oovZ
 -----END PGP SIGNATURE-----

Merge tag 'uml-for-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux

Pull uml updates from Johannes Berg:
 "Mostly cleanups and small things, notably:

   - musl libc compatibility

   - vDSO installation fix

   - TLB sync race fix for recent SMP support

   - build fix for 32-bit with Clang 20/21"

* tag 'uml-for-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/uml/linux:
  um: Disable GCOV_PROFILE_ALL on 32-bit UML with Clang 20/21
  um: drivers: call kernel_strrchr() explicitly in cow_user.c
  um: Replace strncpy() with strnlen()+memcpy_and_pad() in strncpy_chunk_from_user()
  x86/um: fix vDSO installation
  um: Remove CONFIG_FRAME_WARN from x86_64_defconfig
  um: Fix pte_read() and pte_exec() for kernel mappings
  um: Fix potential race condition in TLB sync
  um: time-travel: clean up kernel-doc warnings
  um: avoid struct sigcontext redefinition with musl
  um: fix address-of CMSG_DATA() rvalue in stub
2026-04-20 16:36:46 -07:00

329 lines
8.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgtable.h
*/
#ifndef __UM_PGTABLE_H
#define __UM_PGTABLE_H
#include <asm/page.h>
#include <linux/mm_types.h>
#define _PAGE_PRESENT 0x001
#define _PAGE_NEEDSYNC 0x002
#define _PAGE_RW 0x020
#define _PAGE_USER 0x040
#define _PAGE_ACCESSED 0x080
#define _PAGE_DIRTY 0x100
/* If _PAGE_PRESENT is clear, we use these: */
#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
pte_present gives true */
/* We borrow bit 10 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE 0x400
#if CONFIG_PGTABLE_LEVELS == 4
#include <asm/pgtable-4level.h>
#elif CONFIG_PGTABLE_LEVELS == 2
#include <asm/pgtable-2level.h>
#else
#error "Unsupported number of page table levels"
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#ifndef COMPILE_OFFSETS
#include <as-layout.h> /* for high_physmem */
#endif
#define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START ((high_physmem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END (TASK_SIZE-2*PAGE_SIZE)
#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
/*
* The i386 can't do page protection for execute, and considers that the same
* are read.
* Also, write permissions imply read permissions. This is the closest we can
* get..
*/
#define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0)
#define pmd_needsync(x) (pmd_val(x) & _PAGE_NEEDSYNC)
#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC)
#define pud_needsync(x) (pud_val(x) & _PAGE_NEEDSYNC)
#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC)
#define p4d_needsync(x) (p4d_val(x) & _PAGE_NEEDSYNC)
#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC)
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
/*
* =================================
* Flags checking section.
* =================================
*/
static inline int pte_none(pte_t pte)
{
return pte_is_zero(pte);
}
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_read(pte_t pte)
{
return !pte_get_bits(pte, _PAGE_PROTNONE);
}
static inline int pte_exec(pte_t pte)
{
return !pte_get_bits(pte, _PAGE_PROTNONE);
}
static inline int pte_write(pte_t pte)
{
return((pte_get_bits(pte, _PAGE_RW)) &&
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
static inline int pte_dirty(pte_t pte)
{
return pte_get_bits(pte, _PAGE_DIRTY);
}
static inline int pte_young(pte_t pte)
{
return pte_get_bits(pte, _PAGE_ACCESSED);
}
static inline int pte_needsync(pte_t pte)
{
return pte_get_bits(pte, _PAGE_NEEDSYNC);
}
/*
* =================================
* Flags setting section.
* =================================
*/
static inline pte_t pte_mkclean(pte_t pte)
{
pte_clear_bits(pte, _PAGE_DIRTY);
return(pte);
}
static inline pte_t pte_mkold(pte_t pte)
{
pte_clear_bits(pte, _PAGE_ACCESSED);
return(pte);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_clear_bits(pte, _PAGE_RW);
return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
pte_set_bits(pte, _PAGE_USER);
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_set_bits(pte, _PAGE_DIRTY);
return(pte);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_set_bits(pte, _PAGE_ACCESSED);
return(pte);
}
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte_set_bits(pte, _PAGE_RW);
return pte;
}
static inline pte_t pte_mkuptodate(pte_t pte)
{
pte_clear_bits(pte, _PAGE_NEEDSYNC);
return pte;
}
static inline pte_t pte_mkneedsync(pte_t pte)
{
pte_set_bits(pte, _PAGE_NEEDSYNC);
return(pte);
}
static inline void set_pte(pte_t *pteptr, pte_t pteval)
{
pte_copy(*pteptr, pteval);
/* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so
* update_pte_range knows to unmap it.
*/
*pteptr = pte_mkneedsync(*pteptr);
}
#define PFN_PTE_SHIFT PAGE_SHIFT
static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
guard(spinlock_irqsave)(&mm->context.sync_tlb_lock);
if (!mm->context.sync_tlb_range_to) {
mm->context.sync_tlb_range_from = start;
mm->context.sync_tlb_range_to = end;
} else {
if (start < mm->context.sync_tlb_range_from)
mm->context.sync_tlb_range_from = start;
if (end > mm->context.sync_tlb_range_to)
mm->context.sync_tlb_range_to = end;
}
}
#define set_ptes set_ptes
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int nr)
{
/* Basically the default implementation */
size_t length = nr * PAGE_SIZE;
for (;;) {
set_pte(ptep, pte);
if (--nr == 0)
break;
ptep++;
pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
}
um_tlb_mark_sync(mm, addr, addr + length);
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
}
#define __virt_to_page(virt) phys_to_page(__pa(virt))
#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
pte_t pte;
pte_set_val(pte, pfn_to_phys(pfn), pgprot);
return pte;
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
return pte;
}
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#define update_mmu_cache(vma,address,ptep) do {} while (0)
#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
* Format of swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* <--------------- offset ----------------> E < type -> 0 0 0 1 0
*
* E is the exclusive marker that is not stored in swap entries.
* _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte().
*/
#define __swp_type(x) (((x).val >> 5) & 0x1f)
#define __swp_offset(x) ((x).val >> 11)
#define __swp_entry(type, offset) \
((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
#define __pte_to_swp_entry(pte) \
((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
}
static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
return pte;
}
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
return pte;
}
#endif