Miscellaneous x86 fixes:

- Prevent deadlock during shstk sigreturn (Rick Edgecombe)
 
  - Disable FRED when PTI is forced on (Dave Hansen)
 
  - Revert a CPA INVLPGB optimization that did not properly handle
    discontiguous virtual addresses (Dave Hansen)
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmnrdcARHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1jdgg//Rc1HsR9foqgOZLunF7no1jiscoFqHSF2
 0sa+Hdmujw3JLBNzTRWRC4PFs1rBuxXSRl/rgZboH2QJa675xsjiHbnieN0XEymM
 gd7m03ljbLRHS/JY52CtKC1DqoAY0YWMl+qSnQVBNS1uegHO8eyNkByVIL0SyALe
 aiBgd0gDoABFxbKvfKI6pKgr7NfdpGxeIhn/cl52FObCjk4d5Eq0Fu2PfCTWZjPG
 xXDdqWyi/HWUCbCUkLtnar/CYO6gbIjC8TSVY4awRZ0rAYMDiuY/H4mp6QtCakm7
 c+82NZFxDjFWGQBlN+XZW8sePS0AECNL6jnRzPmBZSdn82jazyyKAyTodChlvXF4
 1UROkOCR2UZs6iXxLIweS32CU8u9YiHPKslbXw+fYIPL4JSsUwxkrsLrjO3FkGap
 ke/Mn4W9hG6L/drRY6PW7j2728+2Kb0nQFefACMepxozRfKbuKJIeQ7Saji8/KKB
 ga3f1PECRbzD5YgUkifIqUUV21phyw8zvw4x/s8mWkXCezOwxMbheaG7DX5f7tdw
 jaR0SCS+cikYNATj69LMHs+x08AcITtFglV18DVTujVcpYSX09BVWA/jPWyGX+eC
 qzX7wnxbY/MkJvirrcZPa+ZL8tsrcEy9ZYQhpj5Bj859R8qLew3DvxhmbyRVEkk6
 B93tXZmyx7M=
 =hABk
 -----END PGP SIGNATURE-----

Merge tag 'x86-urgent-2026-04-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:

 - Prevent deadlock during shstk sigreturn (Rick Edgecombe)

 - Disable FRED when PTI is forced on (Dave Hansen)

 - Revert a CPA INVLPGB optimization that did not properly handle
   discontiguous virtual addresses (Dave Hansen)

* tag 'x86-urgent-2026-04-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Revert INVLPGB optimization for set_memory code
  x86/cpu: Disable FRED when PTI is forced on
  x86/shstk: Prevent deadlock during shstk sigreturn
This commit is contained in:
Linus Torvalds 2026-04-24 10:05:42 -07:00
commit 8f4e8687c8
4 changed files with 42 additions and 26 deletions

View File

@ -1885,6 +1885,7 @@ config X86_USER_SHADOW_STACK
bool "X86 userspace shadow stack"
depends on AS_WRUSS
depends on X86_64
depends on PER_VMA_LOCK
select ARCH_USES_HIGH_VMA_FLAGS
select ARCH_HAS_USER_SHADOW_STACK
select X86_CET

View File

@ -326,10 +326,8 @@ static int shstk_push_sigframe(unsigned long *ssp)
static int shstk_pop_sigframe(unsigned long *ssp)
{
struct vm_area_struct *vma;
unsigned long token_addr;
bool need_to_check_vma;
int err = 1;
unsigned int seq;
/*
* It is possible for the SSP to be off the end of a shadow stack by 4
@ -340,25 +338,35 @@ static int shstk_pop_sigframe(unsigned long *ssp)
if (!IS_ALIGNED(*ssp, 8))
return -EINVAL;
need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp;
do {
struct vm_area_struct *vma;
bool valid_vma;
int err;
if (need_to_check_vma)
if (mmap_read_lock_killable(current->mm))
return -EINTR;
err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
if (unlikely(err))
goto out_err;
if (need_to_check_vma) {
vma = find_vma(current->mm, *ssp);
if (!vma || !(vma->vm_flags & VM_SHADOW_STACK)) {
err = -EFAULT;
goto out_err;
}
valid_vma = vma && (vma->vm_flags & VM_SHADOW_STACK);
/*
* VMAs can change between get_shstk_data() and find_vma().
* Watch for changes and ensure that 'token_addr' comes from
* 'vma' by recording a seqcount.
*
* Ignore the return value of mmap_lock_speculate_try_begin()
* because the mmap lock excludes the possibility of writers.
*/
mmap_lock_speculate_try_begin(current->mm, &seq);
mmap_read_unlock(current->mm);
}
if (!valid_vma)
return -EINVAL;
err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
if (err)
return err;
} while (mmap_lock_speculate_retry(current->mm, seq));
/* Restore SSP aligned? */
if (unlikely(!IS_ALIGNED(token_addr, 8)))
@ -371,10 +379,6 @@ static int shstk_pop_sigframe(unsigned long *ssp)
*ssp = token_addr;
return 0;
out_err:
if (need_to_check_vma)
mmap_read_unlock(current->mm);
return err;
}
int setup_signal_shadow_stack(struct ksignal *ksig)

View File

@ -399,6 +399,15 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
static void __cpa_flush_tlb(void *data)
{
struct cpa_data *cpa = data;
unsigned int i;
for (i = 0; i < cpa->numpages; i++)
flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
static int collapse_large_pages(unsigned long addr, struct list_head *pgtables);
static void cpa_collapse_large_pages(struct cpa_data *cpa)
@ -435,7 +444,6 @@ static void cpa_collapse_large_pages(struct cpa_data *cpa)
static void cpa_flush(struct cpa_data *cpa, int cache)
{
unsigned long start, end;
unsigned int i;
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
@ -445,12 +453,10 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
goto collapse_large_pages;
}
start = fix_addr(__cpa_addr(cpa, 0));
end = start + cpa->numpages * PAGE_SIZE;
if (cpa->force_flush_all)
end = TLB_FLUSH_ALL;
flush_tlb_kernel_range(start, end);
if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
flush_tlb_all();
else
on_each_cpu(__cpa_flush_tlb, cpa, 1);
if (!cache)
goto collapse_large_pages;

View File

@ -105,6 +105,11 @@ void __init pti_check_boottime_disable(void)
pr_debug("PTI enabled, disabling INVLPGB\n");
setup_clear_cpu_cap(X86_FEATURE_INVLPGB);
}
if (cpu_feature_enabled(X86_FEATURE_FRED)) {
pr_debug("PTI enabled, disabling FRED\n");
setup_clear_cpu_cap(X86_FEATURE_FRED);
}
}
static int __init pti_parse_cmdline(char *arg)