Miscellaneous x86 cleanups for v7.1:

- Consolidate AMD and Hygon cases in parse_topology() (Wei Wang)
  - asm constraints cleanups in __iowrite32_copy() (Uros Bizjak)
  - Drop AMD Extended Interrupt LVT macros (Naveen N Rao)
  - Don't use REALLY_SLOW_IO for delays (Juergen Gross)
  - paravirt cleanups (Juergen Gross)
  - FPU code cleanups (Borislav Petkov)
  - split-lock handling code cleanups (Borislav Petkov, Ronan Pigott)
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmncsF0RHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1gZ1A//TmQrq/spNIRx7KqSjT9u9166OaQaBeA/
 r535C9n1d/rZxw0l10vIoWeSOpDIXEPLpMguvs463pvgLVfdrNOXABn1Kw1RR6dv
 yTHV47KUE1j9FIuJ4Y2fQeHhdTC8cdddrC06fEFOezftTMiAMIR/GMaeVA5ExzQd
 9tcyocH10gjhtKCF+ILFGt7OdPn75YDIc8ysJAAPrsF6Dw222K5E7p4XedmEYL54
 W7WVknLK2jP/BdXp17wDVunQP/Hl7huiM9DMgNlv6eliWV6nyH/hONRm5NrgBUEG
 s2URPPEu30thveMHQ1qv31P6ZY6lVFi0VylubJ+OdPofUJDCdCINRk22Bc6kXurZ
 Y8ZV93UyuIgVfvlI9J5UoHSkpi3owMjvrQShquxH2hDbCzzBvwpI7/+KHwWjgVsH
 9+xdOkjR40UrlmwhyyzqTzmB10mg2SM1/YK5Ca2DcneibIkQRlfXdNXQqNikWqhN
 COAEX6U5ayKEu/TjbiNH4zNInJCEQMI65Jiz+oTmdnf+iCQ1L2sp+zSOB6SoyQtp
 rTyubHDDGu6pq9IEATx3hn5BYO7t6Ly4KJksWCAJ0G8lnP3HRESD9l6QvjqipMWB
 JToVwWsuqgL3zWqCpuvBOErpHslgzN6Usbym6blyrp8ERKVIb2elDt9lDAWyz5X3
 7hS8sNulqDw=
 =Ox7s
 -----END PGP SIGNATURE-----

Merge tag 'x86-cleanups-2026-04-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Ingo Molnar:

 - Consolidate AMD and Hygon cases in parse_topology() (Wei Wang)

 - asm constraints cleanups in __iowrite32_copy() (Uros Bizjak)

 - Drop AMD Extended Interrupt LVT macros (Naveen N Rao)

 - Don't use REALLY_SLOW_IO for delays (Juergen Gross)

 - paravirt cleanups (Juergen Gross)

 - FPU code cleanups (Borislav Petkov)

 - split-lock handling code cleanups (Borislav Petkov, Ronan Pigott)

* tag 'x86-cleanups-2026-04-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Correct the comment explaining what xfeatures_in_use() does
  x86/split_lock: Don't warn about unknown split_lock_detect parameter
  x86/fpu: Correct misspelled xfeaures_to_write local var
  x86/apic: Drop AMD Extended Interrupt LVT macros
  x86/cpu/topology: Consolidate AMD and Hygon cases in parse_topology()
  block/floppy: Don't use REALLY_SLOW_IO for delays
  x86/paravirt: Replace io_delay() hook with a bool
  x86/irqflags: Preemptively move include paravirt.h directive where it belongs
  x86/split_lock: Restructure the unwieldy switch-case in sld_state_show()
  x86/local: Remove trailing semicolon from _ASM_XADD in local_add_return()
  x86/asm: Use inout "+" asm onstraint modifiers in __iowrite32_copy()
This commit is contained in:
Linus Torvalds 2026-04-14 14:03:27 -07:00
commit ac633ba77c
20 changed files with 95 additions and 105 deletions

View File

@ -1748,7 +1748,7 @@ EXPORT_SYMBOL(get_ibs_caps);
static inline int get_eilvt(int offset)
{
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
return !setup_APIC_eilvt(offset, 0, APIC_DELIVERY_MODE_NMI, 1);
}
static inline int put_eilvt(int offset)
@ -1897,7 +1897,7 @@ static void setup_APIC_ibs(void)
if (offset < 0)
goto failed;
if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
if (!setup_APIC_eilvt(offset, 0, APIC_DELIVERY_MODE_NMI, 0))
return;
failed:
pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
@ -1910,7 +1910,7 @@ static void clear_APIC_ibs(void)
offset = get_ibs_lvt_offset();
if (offset >= 0)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
setup_APIC_eilvt(offset, 0, APIC_DELIVERY_MODE_FIXED, 1);
}
static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)

View File

@ -138,15 +138,8 @@
#define APIC_SEOI 0x420
#define APIC_IER 0x480
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
#define APIC_EILVT_MASKED (1 << 16)
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define APIC_BASE_MSR 0x800

View File

@ -29,9 +29,6 @@
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(base, reg) inb_p((base) + (reg))
#define fd_outb(value, base, reg) outb_p(value, (base) + (reg))
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
@ -49,6 +46,26 @@ static char *virtual_dma_addr;
static int virtual_dma_mode;
static int doing_pdma;
static inline u8 fd_inb(u16 base, u16 reg)
{
u8 ret = inb_p(base + reg);
native_io_delay();
native_io_delay();
native_io_delay();
return ret;
}
static inline void fd_outb(u8 value, u16 base, u16 reg)
{
outb_p(value, base + reg);
native_io_delay();
native_io_delay();
native_io_delay();
}
static irqreturn_t floppy_hardint(int irq, void *dev_id)
{
unsigned char st;
@ -79,9 +96,9 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
if (st != (STATUS_DMA | STATUS_READY))
break;
if (virtual_dma_mode)
outb_p(*lptr, virtual_dma_port + FD_DATA);
fd_outb(*lptr, virtual_dma_port, FD_DATA);
else
*lptr = inb_p(virtual_dma_port + FD_DATA);
*lptr = fd_inb(virtual_dma_port, FD_DATA);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;

View File

@ -23,7 +23,7 @@ static inline void xsetbv(u32 index, u64 value)
/*
* Return a mask of xfeatures which are currently being tracked
* by the processor as being in the initial configuration.
* by the processor as being not in the initial configuration.
*
* Callers should check X86_FEATURE_XGETBV1.
*/

View File

@ -218,9 +218,8 @@ static inline void __iowrite32_copy(void __iomem *to, const void *from,
size_t count)
{
asm volatile("rep movsl"
: "=&c"(count), "=&D"(to), "=&S"(from)
: "0"(count), "1"(to), "2"(from)
: "memory");
: "+D"(to), "+S"(from), "+c"(count)
: : "memory");
}
#define __iowrite32_copy __iowrite32_copy
#endif
@ -243,20 +242,18 @@ extern int io_delay_type;
extern void io_delay_init(void);
#if defined(CONFIG_PARAVIRT)
#include <asm/paravirt.h>
#include <asm/paravirt-base.h>
#else
#define call_io_delay() true
#endif
static inline void slow_down_io(void)
{
native_io_delay();
#ifdef REALLY_SLOW_IO
native_io_delay();
native_io_delay();
native_io_delay();
#endif
}
if (!call_io_delay())
return;
#endif
native_io_delay();
}
#define BUILDIO(bwl, type) \
static inline void out##bwl##_p(type value, u16 port) \

View File

@ -96,11 +96,11 @@ static __always_inline void halt(void)
native_halt();
}
#endif /* __ASSEMBLER__ */
#else
#include <asm/paravirt.h>
#endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#ifndef CONFIG_PARAVIRT_XXL
#ifndef __ASSEMBLER__
#include <linux/types.h>

View File

@ -106,7 +106,7 @@ static inline bool local_add_negative(long i, local_t *l)
static inline long local_add_return(long i, local_t *l)
{
long __i = i;
asm volatile(_ASM_XADD "%0, %1;"
asm volatile(_ASM_XADD "%0, %1"
: "+r" (i), "+m" (l->a.counter)
: : "memory");
return i + __i;

View File

@ -15,6 +15,8 @@ struct pv_info {
#ifdef CONFIG_PARAVIRT_XXL
u16 extra_user_64bit_cs; /* __USER_CS if none */
#endif
bool io_delay;
const char *name;
};
@ -26,6 +28,10 @@ u64 _paravirt_ident_64(u64);
#endif
#define paravirt_nop ((void *)nop_func)
#ifdef CONFIG_PARAVIRT
#define call_io_delay() pv_info.io_delay
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
void paravirt_set_cap(void);
#else

View File

@ -19,17 +19,6 @@
#include <linux/cpumask.h>
#include <asm/frame.h>
/* The paravirtualized I/O functions */
static inline void slow_down_io(void)
{
PVOP_VCALL0(pv_ops, cpu.io_delay);
#ifdef REALLY_SLOW_IO
PVOP_VCALL0(pv_ops, cpu.io_delay);
PVOP_VCALL0(pv_ops, cpu.io_delay);
PVOP_VCALL0(pv_ops, cpu.io_delay);
#endif
}
void native_flush_tlb_local(void);
void native_flush_tlb_global(void);
void native_flush_tlb_one_user(unsigned long addr);

View File

@ -30,8 +30,6 @@ struct pv_lazy_ops {
struct pv_cpu_ops {
/* hooks for various privileged instructions */
void (*io_delay)(void);
#ifdef CONFIG_PARAVIRT_XXL
unsigned long (*get_debugreg)(int regno);
void (*set_debugreg)(int regno, unsigned long value);

View File

@ -332,7 +332,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
* Since the offsets must be consistent for all cores, we keep track
* of the LVT offsets in software and reserve the offset for the same
* vector also to be used on other cores. An offset is freed by
* setting the entry to APIC_EILVT_MASKED.
* setting the entry to APIC_LVT_MASKED.
*
* If the BIOS is right, there should be no conflicts. Otherwise a
* "[Firmware Bug]: ..." error message is generated. However, if
@ -344,9 +344,9 @@ static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
{
return (old & APIC_EILVT_MASKED)
|| (new == APIC_EILVT_MASKED)
|| ((new & ~APIC_EILVT_MASKED) == old);
return (old & APIC_LVT_MASKED)
|| (new == APIC_LVT_MASKED)
|| ((new & ~APIC_LVT_MASKED) == old);
}
static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
@ -358,13 +358,13 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
rsvd = atomic_read(&eilvt_offsets[offset]);
do {
vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
vector = rsvd & ~APIC_LVT_MASKED; /* 0: unassigned */
if (vector && !eilvt_entry_is_changeable(vector, new))
/* may not change if vectors are different */
return rsvd;
} while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
rsvd = new & ~APIC_EILVT_MASKED;
rsvd = new & ~APIC_LVT_MASKED;
if (rsvd && rsvd != vector)
pr_info("LVT offset %d assigned for vector 0x%02x\n",
offset, rsvd);

View File

@ -132,6 +132,12 @@ static void __init sld_state_setup(void)
sld_state = state;
}
static __init int setup_split_lock_detect(char *arg)
{
return 1;
}
__setup("split_lock_detect=", setup_split_lock_detect);
static void __init __split_lock_setup(void)
{
if (!split_lock_verify_msr(false)) {
@ -391,34 +397,35 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c)
static void sld_state_show(void)
{
if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
const char *action = "warning";
if ((!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) ||
(sld_state == sld_off))
return;
switch (sld_state) {
case sld_off:
pr_info("disabled\n");
break;
case sld_warn:
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/splitlock", NULL, splitlock_cpu_offline) < 0)
pr_warn("No splitlock CPU offline handler\n");
} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
pr_info("#DB: warning on user-space bus_locks\n");
}
break;
case sld_fatal:
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
pr_info("#DB: sending SIGBUS on user-space bus_locks\n");
break;
case sld_ratelimit:
if (sld_state == sld_ratelimit) {
if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
break;
return;
} else if (sld_state == sld_fatal) {
action = "sending SIGBUS";
}
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
pr_info("#AC: crashing the kernel on kernel split_locks and %s on user-space split_locks\n", action);
/*
* This is handling the case where a CPU goes offline at the
* moment where split lock detection is disabled in the warn
* setting, see split_lock_warn(). It doesn't have any effect
* in the fatal case.
*/
if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
pr_warn("No splitlock CPU offline handler\n");
} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
pr_info("#DB: %s on user-space bus_locks\n", action);
}
}

View File

@ -521,7 +521,7 @@ static void mce_threshold_block_init(struct threshold_block *b, int offset)
static int setup_APIC_mce_threshold(int reserved, int new)
{
if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
APIC_EILVT_MSG_FIX, 0))
APIC_DELIVERY_MODE_FIXED, 0))
return new;
return reserved;
@ -712,11 +712,11 @@ static void smca_enable_interrupt_vectors(void)
return;
offset = (mca_intr_cfg & SMCA_THR_LVT_OFF) >> 12;
if (!setup_APIC_eilvt(offset, THRESHOLD_APIC_VECTOR, APIC_EILVT_MSG_FIX, 0))
if (!setup_APIC_eilvt(offset, THRESHOLD_APIC_VECTOR, APIC_DELIVERY_MODE_FIXED, 0))
data->thr_intr_en = 1;
offset = (mca_intr_cfg & MASK_DEF_LVTOFF) >> 4;
if (!setup_APIC_eilvt(offset, DEFERRED_ERROR_VECTOR, APIC_EILVT_MSG_FIX, 0))
if (!setup_APIC_eilvt(offset, DEFERRED_ERROR_VECTOR, APIC_DELIVERY_MODE_FIXED, 0))
data->dfr_intr_en = 1;
}

View File

@ -157,8 +157,8 @@ static void parse_topology(struct topo_scan *tscan, bool early)
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
if (IS_ENABLED(CONFIG_CPU_SUP_AMD))
cpu_parse_topology_amd(tscan);
case X86_VENDOR_HYGON:
cpu_parse_topology_amd(tscan);
break;
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
@ -170,10 +170,6 @@ static void parse_topology(struct topo_scan *tscan, bool early)
if (c->cpuid_level >= 0x1a)
c->topo.cpu_type = cpuid_eax(0x1a);
break;
case X86_VENDOR_HYGON:
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
cpu_parse_topology_amd(tscan);
break;
}
}

View File

@ -339,7 +339,7 @@ arch_initcall(activate_jump_labels);
static void __init vmware_paravirt_ops_setup(void)
{
pv_info.name = "VMware hypervisor";
pv_ops.cpu.io_delay = paravirt_nop;
pv_info.io_delay = false;
if (vmware_tsc_khz == 0)
return;

View File

@ -267,16 +267,16 @@ static inline void os_xrstor_supervisor(struct fpstate *fpstate)
*/
static inline u64 xfeatures_need_sigframe_write(void)
{
u64 xfeaures_to_write;
u64 xfeatures_to_write;
/* In-use features must be written: */
xfeaures_to_write = xfeatures_in_use();
xfeatures_to_write = xfeatures_in_use();
/* Also write all non-optimizable sigframe features: */
xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
xfeatures_to_write |= XFEATURE_MASK_USER_SUPPORTED &
~XFEATURE_MASK_SIGFRAME_INITOPT;
return xfeaures_to_write;
return xfeatures_to_write;
}
/*

View File

@ -75,12 +75,6 @@ DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visi
static int has_steal_clock = 0;
static int has_guest_poll = 0;
/*
* No need for any "IO delay" on KVM
*/
static void kvm_io_delay(void)
{
}
#define KVM_TASK_SLEEP_HASHBITS 8
#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
@ -327,7 +321,7 @@ static void __init paravirt_ops_setup(void)
pv_info.name = "KVM";
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_ops.cpu.io_delay = kvm_io_delay;
pv_info.io_delay = false;
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;

View File

@ -94,6 +94,7 @@ struct pv_info pv_info = {
#ifdef CONFIG_PARAVIRT_XXL
.extra_user_64bit_cs = __USER_CS,
#endif
.io_delay = true,
};
/* 64-bit pagetable entries */
@ -101,8 +102,6 @@ struct pv_info pv_info = {
struct paravirt_patch_template pv_ops = {
/* Cpu ops. */
.cpu.io_delay = native_io_delay,
#ifdef CONFIG_PARAVIRT_XXL
.cpu.cpuid = native_cpuid,
.cpu.get_debugreg = pv_native_get_debugreg,

View File

@ -1045,10 +1045,6 @@ static void xen_update_io_bitmap(void)
}
#endif
static void xen_io_delay(void)
{
}
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
static unsigned long xen_read_cr0(void)
@ -1208,6 +1204,7 @@ void __init xen_setup_vcpu_info_placement(void)
static const struct pv_info xen_info __initconst = {
.extra_user_64bit_cs = FLAT_USER_CS64,
.io_delay = false,
.name = "Xen",
};
@ -1391,7 +1388,6 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
pv_ops.cpu.invalidate_io_bitmap = xen_invalidate_io_bitmap;
pv_ops.cpu.update_io_bitmap = xen_update_io_bitmap;
#endif
pv_ops.cpu.io_delay = xen_io_delay;
pv_ops.cpu.start_context_switch = xen_start_context_switch;
pv_ops.cpu.end_context_switch = xen_end_context_switch;

View File

@ -145,8 +145,6 @@
* Better audit of register_blkdev.
*/
#define REALLY_SLOW_IO
#define DEBUGT 2
#define DPRINT(format, args...) \