From 09fbb775f1d01945119c4a0be4afacf30cc86796 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 16 Dec 2025 11:51:20 +0100 Subject: [PATCH 01/11] x86/asm: Use inout "+" asm onstraint modifiers in __iowrite32_copy() Use inout "+" asm constraint modifiers to simplify asm operands. No functional changes intended. Signed-off-by: Uros Bizjak Signed-off-by: Borislav Petkov (AMD) Reviewed-by: H. Peter Anvin (Intel) Link: https://patch.msgid.link/20251216105134.248196-1-ubizjak@gmail.com --- arch/x86/include/asm/io.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index ca309a3227c7..2ea25745e059 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -218,9 +218,8 @@ static inline void __iowrite32_copy(void __iomem *to, const void *from, size_t count) { asm volatile("rep movsl" - : "=&c"(count), "=&D"(to), "=&S"(from) - : "0"(count), "1"(to), "2"(from) - : "memory"); + : "+D"(to), "+S"(from), "+c"(count) + : : "memory"); } #define __iowrite32_copy __iowrite32_copy #endif From ceea7868b594ccf376562af40b9463d9f2fb7dd0 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Mar 2026 18:12:35 +0100 Subject: [PATCH 02/11] x86/local: Remove trailing semicolon from _ASM_XADD in local_add_return() Remove the trailing semicolon from the inline assembly statement in local_add_return(). The _ASM_XADD macro already expands to a complete instruction, making the extra semicolon unnecessary. More importantly, the stray semicolon causes GCC to treat the inline asm as containing multiple instructions, which can skew its internal instruction count estimation and affect optimization heuristics. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Borislav Petkov (AMD) Link: https://patch.msgid.link/20260308171250.7278-1-ubizjak@gmail.com --- arch/x86/include/asm/local.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 59aa966dc212..4957018fef3e 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -106,7 +106,7 @@ static inline bool local_add_negative(long i, local_t *l) static inline long local_add_return(long i, local_t *l) { long __i = i; - asm volatile(_ASM_XADD "%0, %1;" + asm volatile(_ASM_XADD "%0, %1" : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; From 04e43ec9f002ed1041b41a6df4c645ef3148da9f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 26 Feb 2026 15:50:33 +0100 Subject: [PATCH 03/11] x86/split_lock: Restructure the unwieldy switch-case in sld_state_show() Split the handling in two parts: 1. handle the sld_state option first 2. handle X86_FEATURE flag-based printing afterwards This splits the function nicely into two, separate logical things which are easier to parse and understand. Also, zap the printing in the disabled case. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Xiaoyao Li Link: https://patch.msgid.link/20260226145033.GAaaBduQ0rWXydOkAm@fat_crate.local --- arch/x86/kernel/cpu/bus_lock.c | 49 +++++++++++++++++----------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index fb166662bc0d..660aa9aa8bec 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -391,34 +391,35 @@ static void __init split_lock_setup(struct cpuinfo_x86 *c) static void sld_state_show(void) { - if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && - !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) + const char *action = "warning"; + + if ((!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && + !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) || + (sld_state == sld_off)) return; - switch (sld_state) { - case sld_off: - pr_info("disabled\n"); - break; - case sld_warn: - if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { - pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n"); - if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, - "x86/splitlock", NULL, splitlock_cpu_offline) < 0) - pr_warn("No splitlock CPU offline handler\n"); - } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { - pr_info("#DB: warning on user-space bus_locks\n"); - } - break; - case sld_fatal: - if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) - pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n"); - else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) - pr_info("#DB: sending SIGBUS on user-space bus_locks\n"); - break; - case sld_ratelimit: + if (sld_state == sld_ratelimit) { if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst); - break; + return; + } else if (sld_state == sld_fatal) { + action = "sending SIGBUS"; + } + + if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { + pr_info("#AC: crashing the kernel on kernel split_locks and %s on user-space split_locks\n", action); + + /* + * This is handling the case where a CPU goes offline at the + * moment where split lock detection is disabled in the warn + * setting, see split_lock_warn(). It doesn't have any effect + * in the fatal case. + */ + if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/splitlock", NULL, splitlock_cpu_offline) < 0) + pr_warn("No splitlock CPU offline handler\n"); + + } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { + pr_info("#DB: %s on user-space bus_locks\n", action); } } From 36c1eb9531e0c9bdcb3494142123f1c1e128367b Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 19 Jan 2026 19:26:27 +0100 Subject: [PATCH 04/11] x86/irqflags: Preemptively move include paravirt.h directive where it belongs Commit 22cc5ca5de52 ("x86/paravirt: Move halt paravirt calls under CONFIG_PARAVIRT") moved some paravirt hooks from the CONFIG_PARAVIRT_XXL umbrella to CONFIG_PARAVIRT, but missed to move the associated "#include " in irqflags.h from CONFIG_PARAVIRT_XXL to CONFIG_PARAVIRT. This hasn't resulted in build failures yet, as all use cases of irqflags.h had paravirt.h included via other header files, even without CONFIG_PARAVIRT_XXL being set. In order to allow changing those other header files, e.g. by no longer including paravirt.h, fix irqflags.h by moving inclusion of paravirt.h under the CONFIG_PARAVIRT umbrella. [ bp: Massage commit message. ] Fixes: 22cc5ca5de52 ("x86/paravirt: Move halt paravirt calls under CONFIG_PARAVIRT") Closes: https://lore.kernel.org/oe-kbuild-all/202601152203.plJOoOEF-lkp@intel.com/ Reported-by: kernel test robot Signed-off-by: Juergen Gross Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Ingo Molnar Link: https://patch.msgid.link/20260119182632.596369-2-jgross@suse.com --- arch/x86/include/asm/irqflags.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 462754b0bf8a..6f25de05ed58 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -96,11 +96,11 @@ static __always_inline void halt(void) native_halt(); } #endif /* __ASSEMBLER__ */ +#else +#include #endif /* CONFIG_PARAVIRT */ -#ifdef CONFIG_PARAVIRT_XXL -#include -#else +#ifndef CONFIG_PARAVIRT_XXL #ifndef __ASSEMBLER__ #include From 9eece498565c3fd5f37efe58498779efd39f2269 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 19 Jan 2026 19:26:28 +0100 Subject: [PATCH 05/11] x86/paravirt: Replace io_delay() hook with a bool The io_delay() paravirt hook is in no way performance critical and all users setting it to a different function than native_io_delay() are using an empty function as replacement. Allow replacing the hook with a bool indicating whether native_io_delay() should be called. [ bp: Massage commit message. ] Signed-off-by: Juergen Gross Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Ingo Molnar Link: https://patch.msgid.link/20260119182632.596369-3-jgross@suse.com --- arch/x86/include/asm/io.h | 9 ++++++--- arch/x86/include/asm/paravirt-base.h | 6 ++++++ arch/x86/include/asm/paravirt.h | 11 ----------- arch/x86/include/asm/paravirt_types.h | 2 -- arch/x86/kernel/cpu/vmware.c | 2 +- arch/x86/kernel/kvm.c | 8 +------- arch/x86/kernel/paravirt.c | 3 +-- arch/x86/xen/enlighten_pv.c | 6 +----- 8 files changed, 16 insertions(+), 31 deletions(-) diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 2ea25745e059..4179a2ebe777 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -242,11 +242,16 @@ extern int io_delay_type; extern void io_delay_init(void); #if defined(CONFIG_PARAVIRT) -#include +#include #else +#define call_io_delay() true +#endif static inline void slow_down_io(void) { + if (!call_io_delay()) + return; + native_io_delay(); #ifdef REALLY_SLOW_IO native_io_delay(); @@ -255,8 +260,6 @@ static inline void slow_down_io(void) #endif } -#endif - #define BUILDIO(bwl, type) \ static inline void out##bwl##_p(type value, u16 port) \ { \ diff --git a/arch/x86/include/asm/paravirt-base.h b/arch/x86/include/asm/paravirt-base.h index 982a0b93bc76..3b9e7772d196 100644 --- a/arch/x86/include/asm/paravirt-base.h +++ b/arch/x86/include/asm/paravirt-base.h @@ -15,6 +15,8 @@ struct pv_info { #ifdef CONFIG_PARAVIRT_XXL u16 extra_user_64bit_cs; /* __USER_CS if none */ #endif + bool io_delay; + const char *name; }; @@ -26,6 +28,10 @@ u64 _paravirt_ident_64(u64); #endif #define paravirt_nop ((void *)nop_func) +#ifdef CONFIG_PARAVIRT +#define call_io_delay() pv_info.io_delay +#endif + #ifdef CONFIG_PARAVIRT_SPINLOCKS void paravirt_set_cap(void); #else diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index fcf8ab50948a..cdfe4007443e 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -19,17 +19,6 @@ #include #include -/* The paravirtualized I/O functions */ -static inline void slow_down_io(void) -{ - PVOP_VCALL0(pv_ops, cpu.io_delay); -#ifdef REALLY_SLOW_IO - PVOP_VCALL0(pv_ops, cpu.io_delay); - PVOP_VCALL0(pv_ops, cpu.io_delay); - PVOP_VCALL0(pv_ops, cpu.io_delay); -#endif -} - void native_flush_tlb_local(void); void native_flush_tlb_global(void); void native_flush_tlb_one_user(unsigned long addr); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9bcf6bce88f6..4f5ae0068aab 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -30,8 +30,6 @@ struct pv_lazy_ops { struct pv_cpu_ops { /* hooks for various privileged instructions */ - void (*io_delay)(void); - #ifdef CONFIG_PARAVIRT_XXL unsigned long (*get_debugreg)(int regno); void (*set_debugreg)(int regno, unsigned long value); diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index a3e6936839b1..eee0d1a48802 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -339,7 +339,7 @@ arch_initcall(activate_jump_labels); static void __init vmware_paravirt_ops_setup(void) { pv_info.name = "VMware hypervisor"; - pv_ops.cpu.io_delay = paravirt_nop; + pv_info.io_delay = false; if (vmware_tsc_khz == 0) return; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 3bc062363814..29226d112029 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -75,12 +75,6 @@ DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visi static int has_steal_clock = 0; static int has_guest_poll = 0; -/* - * No need for any "IO delay" on KVM - */ -static void kvm_io_delay(void) -{ -} #define KVM_TASK_SLEEP_HASHBITS 8 #define KVM_TASK_SLEEP_HASHSIZE (1< Date: Mon, 19 Jan 2026 19:26:29 +0100 Subject: [PATCH 06/11] block/floppy: Don't use REALLY_SLOW_IO for delays Instead of defining REALLY_SLOW_IO before including io.h, add the required additional calls of native_io_delay() to the related functions in arch/x86/include/asm/floppy.h. Drop REALLY_SLOW_IO now too as it has no users. [ bp: Merge the REALLY_SLOW_IO removal into this patch. ] Signed-off-by: Juergen Gross Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Ingo Molnar Link: https://patch.msgid.link/20260119182632.596369-4-jgross@suse.com --- arch/x86/include/asm/floppy.h | 27 ++++++++++++++++++++++----- arch/x86/include/asm/io.h | 5 ----- drivers/block/floppy.c | 2 -- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h index e7a244051c62..8d1e86687b98 100644 --- a/arch/x86/include/asm/floppy.h +++ b/arch/x86/include/asm/floppy.h @@ -29,9 +29,6 @@ #define CSW fd_routine[can_use_virtual_dma & 1] -#define fd_inb(base, reg) inb_p((base) + (reg)) -#define fd_outb(value, base, reg) outb_p(value, (base) + (reg)) - #define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy") #define fd_free_dma() CSW._free_dma(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) @@ -49,6 +46,26 @@ static char *virtual_dma_addr; static int virtual_dma_mode; static int doing_pdma; +static inline u8 fd_inb(u16 base, u16 reg) +{ + u8 ret = inb_p(base + reg); + + native_io_delay(); + native_io_delay(); + native_io_delay(); + + return ret; +} + +static inline void fd_outb(u8 value, u16 base, u16 reg) +{ + outb_p(value, base + reg); + + native_io_delay(); + native_io_delay(); + native_io_delay(); +} + static irqreturn_t floppy_hardint(int irq, void *dev_id) { unsigned char st; @@ -79,9 +96,9 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id) if (st != (STATUS_DMA | STATUS_READY)) break; if (virtual_dma_mode) - outb_p(*lptr, virtual_dma_port + FD_DATA); + fd_outb(*lptr, virtual_dma_port, FD_DATA); else - *lptr = inb_p(virtual_dma_port + FD_DATA); + *lptr = fd_inb(virtual_dma_port, FD_DATA); } virtual_dma_count = lcount; virtual_dma_addr = lptr; diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 4179a2ebe777..7f4847b2b904 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -253,11 +253,6 @@ static inline void slow_down_io(void) return; native_io_delay(); -#ifdef REALLY_SLOW_IO - native_io_delay(); - native_io_delay(); - native_io_delay(); -#endif } #define BUILDIO(bwl, type) \ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 92e446a64371..0509746f8aed 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -145,8 +145,6 @@ * Better audit of register_blkdev. */ -#define REALLY_SLOW_IO - #define DEBUGT 2 #define DPRINT(format, args...) \ From 6a9fe1ad908df12ef71dea12373f25826ea29a8d Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Mon, 23 Mar 2026 16:07:19 +0800 Subject: [PATCH 07/11] x86/cpu/topology: Consolidate AMD and Hygon cases in parse_topology() Merge the two separate switch cases for AMD and Hygon as they share the common cpu_parse_topology_amd(). Also drop the IS_ENABLED(CONFIG_CPU_SUP_AMD/HYGON) guards, because 1) they are dead code: when a vendor's CONFIG_CPU_SUP_* is disabled, its vendor detection code (in amd.c / hygon.c) is not compiled, so x86_vendor will never be set to X86_VENDOR_AMD / X86_VENDOR_HYGON, instead it will default to X86_VENDOR_UNKNOWN and those switch cases are unreachable. 2) topology_amd.o is always built (obj-y), so cpu_parse_topology_amd() is always available regardless of CPU_SUP_* configuration. Signed-off-by: Wei Wang Signed-off-by: Borislav Petkov (AMD) Tested-by: Yongwei Xu Link: https://patch.msgid.link/SI2PR01MB4393D6B7E17AB05612AEE925DC4BA@SI2PR01MB4393.apcprd01.prod.exchangelabs.com --- arch/x86/kernel/cpu/topology_common.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/topology_common.c b/arch/x86/kernel/cpu/topology_common.c index 71625795d711..d0d79d5b8eb9 100644 --- a/arch/x86/kernel/cpu/topology_common.c +++ b/arch/x86/kernel/cpu/topology_common.c @@ -157,8 +157,8 @@ static void parse_topology(struct topo_scan *tscan, bool early) switch (c->x86_vendor) { case X86_VENDOR_AMD: - if (IS_ENABLED(CONFIG_CPU_SUP_AMD)) - cpu_parse_topology_amd(tscan); + case X86_VENDOR_HYGON: + cpu_parse_topology_amd(tscan); break; case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: @@ -170,10 +170,6 @@ static void parse_topology(struct topo_scan *tscan, bool early) if (c->cpuid_level >= 0x1a) c->topo.cpu_type = cpuid_eax(0x1a); break; - case X86_VENDOR_HYGON: - if (IS_ENABLED(CONFIG_CPU_SUP_HYGON)) - cpu_parse_topology_amd(tscan); - break; } } From 5635c8bfd3ab0757c5461d2165f6b284862826bf Mon Sep 17 00:00:00 2001 From: "Naveen N Rao (AMD)" Date: Wed, 1 Apr 2026 10:26:32 +0530 Subject: [PATCH 08/11] x86/apic: Drop AMD Extended Interrupt LVT macros AMD defines Extended Interrupt Local Vector Table (EILVT) registers to allow for additional interrupt sources. While the APIC registers for those are unique to AMD, the format of those registers follows the standard LVT registers. Drop EILVT-specific macros in favor of the standard APIC LVT macros. Drop unused APIC_EILVT_NR_AMD_K8 and APIC_EILVT_LVTOFF while at it. No functional change. [ bp: Merge the two cleanup patches into one. ] Signed-off-by: Naveen N Rao (AMD) Signed-off-by: Borislav Petkov (AMD) Tested-by: Manali Shukla Link: https://patch.msgid.link/b98d69037c0102d2ccd082a941888a689cd214c9.1775019269.git.naveen@kernel.org --- arch/x86/events/amd/ibs.c | 6 +++--- arch/x86/include/asm/apicdef.h | 7 ------- arch/x86/kernel/apic/apic.c | 12 ++++++------ arch/x86/kernel/cpu/mce/amd.c | 6 +++--- 4 files changed, 12 insertions(+), 19 deletions(-) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index aca89f23d2e0..f3a16eb5a58e 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -1545,7 +1545,7 @@ EXPORT_SYMBOL(get_ibs_caps); static inline int get_eilvt(int offset) { - return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); + return !setup_APIC_eilvt(offset, 0, APIC_DELIVERY_MODE_NMI, 1); } static inline int put_eilvt(int offset) @@ -1694,7 +1694,7 @@ static void setup_APIC_ibs(void) if (offset < 0) goto failed; - if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) + if (!setup_APIC_eilvt(offset, 0, APIC_DELIVERY_MODE_NMI, 0)) return; failed: pr_warn("perf: IBS APIC setup failed on cpu #%d\n", @@ -1707,7 +1707,7 @@ static void clear_APIC_ibs(void) offset = get_ibs_lvt_offset(); if (offset >= 0) - setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); + setup_APIC_eilvt(offset, 0, APIC_DELIVERY_MODE_FIXED, 1); } static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu) diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index be39a543fbe5..bc125c4429dc 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h @@ -138,15 +138,8 @@ #define APIC_SEOI 0x420 #define APIC_IER 0x480 #define APIC_EILVTn(n) (0x500 + 0x10 * n) -#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ #define APIC_EILVT_NR_AMD_10H 4 #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H -#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) -#define APIC_EILVT_MSG_FIX 0x0 -#define APIC_EILVT_MSG_SMI 0x2 -#define APIC_EILVT_MSG_NMI 0x4 -#define APIC_EILVT_MSG_EXT 0x7 -#define APIC_EILVT_MASKED (1 << 16) #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #define APIC_BASE_MSR 0x800 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index d93f87f29d03..eb2d8256f7bb 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -332,7 +332,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) * Since the offsets must be consistent for all cores, we keep track * of the LVT offsets in software and reserve the offset for the same * vector also to be used on other cores. An offset is freed by - * setting the entry to APIC_EILVT_MASKED. + * setting the entry to APIC_LVT_MASKED. * * If the BIOS is right, there should be no conflicts. Otherwise a * "[Firmware Bug]: ..." error message is generated. However, if @@ -344,9 +344,9 @@ static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) { - return (old & APIC_EILVT_MASKED) - || (new == APIC_EILVT_MASKED) - || ((new & ~APIC_EILVT_MASKED) == old); + return (old & APIC_LVT_MASKED) + || (new == APIC_LVT_MASKED) + || ((new & ~APIC_LVT_MASKED) == old); } static unsigned int reserve_eilvt_offset(int offset, unsigned int new) @@ -358,13 +358,13 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new) rsvd = atomic_read(&eilvt_offsets[offset]); do { - vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */ + vector = rsvd & ~APIC_LVT_MASKED; /* 0: unassigned */ if (vector && !eilvt_entry_is_changeable(vector, new)) /* may not change if vectors are different */ return rsvd; } while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new)); - rsvd = new & ~APIC_EILVT_MASKED; + rsvd = new & ~APIC_LVT_MASKED; if (rsvd && rsvd != vector) pr_info("LVT offset %d assigned for vector 0x%02x\n", offset, rsvd); diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index da13c1e37f87..3a689d389026 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -521,7 +521,7 @@ static void mce_threshold_block_init(struct threshold_block *b, int offset) static int setup_APIC_mce_threshold(int reserved, int new) { if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR, - APIC_EILVT_MSG_FIX, 0)) + APIC_DELIVERY_MODE_FIXED, 0)) return new; return reserved; @@ -704,11 +704,11 @@ static void smca_enable_interrupt_vectors(void) return; offset = (mca_intr_cfg & SMCA_THR_LVT_OFF) >> 12; - if (!setup_APIC_eilvt(offset, THRESHOLD_APIC_VECTOR, APIC_EILVT_MSG_FIX, 0)) + if (!setup_APIC_eilvt(offset, THRESHOLD_APIC_VECTOR, APIC_DELIVERY_MODE_FIXED, 0)) data->thr_intr_en = 1; offset = (mca_intr_cfg & MASK_DEF_LVTOFF) >> 4; - if (!setup_APIC_eilvt(offset, DEFERRED_ERROR_VECTOR, APIC_EILVT_MSG_FIX, 0)) + if (!setup_APIC_eilvt(offset, DEFERRED_ERROR_VECTOR, APIC_DELIVERY_MODE_FIXED, 0)) data->dfr_intr_en = 1; } From f44cc3a48acaf9909e3953eb69299e3c773c9111 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sat, 4 Apr 2026 13:12:09 +0200 Subject: [PATCH 09/11] x86/fpu: Correct misspelled xfeaures_to_write local var It happens. Fix it. No functional changes. Signed-off-by: Borislav Petkov (AMD) Link: https://patch.msgid.link/20260404120048.14765-1-bp@kernel.org --- arch/x86/kernel/fpu/xstate.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index 52ce19289989..38a2862f09d3 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -267,16 +267,16 @@ static inline void os_xrstor_supervisor(struct fpstate *fpstate) */ static inline u64 xfeatures_need_sigframe_write(void) { - u64 xfeaures_to_write; + u64 xfeatures_to_write; /* In-use features must be written: */ - xfeaures_to_write = xfeatures_in_use(); + xfeatures_to_write = xfeatures_in_use(); /* Also write all non-optimizable sigframe features: */ - xfeaures_to_write |= XFEATURE_MASK_USER_SUPPORTED & + xfeatures_to_write |= XFEATURE_MASK_USER_SUPPORTED & ~XFEATURE_MASK_SIGFRAME_INITOPT; - return xfeaures_to_write; + return xfeatures_to_write; } /* From fbe80bd6993a059ca366a3561051b419f3cd2b29 Mon Sep 17 00:00:00 2001 From: Ronan Pigott Date: Sun, 5 Apr 2026 10:28:25 -0700 Subject: [PATCH 10/11] x86/split_lock: Don't warn about unknown split_lock_detect parameter The split_lock_detect command line parameter is handled in sld_setup() shortly after cpu_parse_early_param() but still before parse_early_param(). Add a dummy parsing function so that parse_early_param() doesn't later complain about the "unknown" parameter split_lock_detect=, and pass it along to init. [ bp: Massage commit message. ] Signed-off-by: Ronan Pigott Signed-off-by: Borislav Petkov (AMD) Link: https://patch.msgid.link/20260405181807.3906-1-ronan@rjp.ie --- arch/x86/kernel/cpu/bus_lock.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 660aa9aa8bec..bba28607a59a 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -132,6 +132,12 @@ static void __init sld_state_setup(void) sld_state = state; } +static __init int setup_split_lock_detect(char *arg) +{ + return 1; +} +__setup("split_lock_detect=", setup_split_lock_detect); + static void __init __split_lock_setup(void) { if (!split_lock_verify_msr(false)) { From 9b8ad2b63067eb302aea429cb6f1f22947b353d7 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 8 Apr 2026 12:32:06 +0200 Subject: [PATCH 11/11] x86/fpu: Correct the comment explaining what xfeatures_in_use() does It returns the mask of the features which are being currently used, i.e., NOT in their initial configuration. No functional changes. Signed-off-by: Borislav Petkov (AMD) --- arch/x86/include/asm/fpu/xcr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h index 9a710c060445..698457f16d5d 100644 --- a/arch/x86/include/asm/fpu/xcr.h +++ b/arch/x86/include/asm/fpu/xcr.h @@ -23,7 +23,7 @@ static inline void xsetbv(u32 index, u64 value) /* * Return a mask of xfeatures which are currently being tracked - * by the processor as being in the initial configuration. + * by the processor as being not in the initial configuration. * * Callers should check X86_FEATURE_XGETBV1. */