IOMMU Fixes for Linux 7.1-rc2:

Including:
 
 	- Core: Cache-flushing fix for non-x86 platforms.
 
 	- AMD-Vi: Security fix when SEV-SNP is enabled.
 
 	- AMD-Vi: Operator precedence fix in DTE setting.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmn9hXwACgkQK/BELZcB
 GuPIFRAAvnJKSnMifUBsW8FaXJmoqN1gvjOXAMGqO5ZGmR+A7in0S6klnWzV7qIa
 CDOQqZ7n0hGEqnLAEwf2TSDv/t/qNRA4aIbJCrKYYYxVGaACRDgS56EAxvOP0aBS
 qAMK04zlf5vZiv3dJilqvssEw3Y5EyRoOQCIojTe6CiO+Wt8wAmzri3MFMM9yGts
 uJy4fpbCzU1M/glvR29I+I/3AQPHJBbZWswwbeEj6sJrGnDh5PeC1AVP7jbwXoNa
 60hp4sby+8wWTupGzbwLI1zxH1hxpbidiDywmWHD2vIzA1A+ESzexLv93S9Llj+C
 qDxAEowk+jDxEJRMisyIiHLiRX+gKxVVaywEOOcQ1DsP97q2EfwcHdCImCjG+RET
 E/pLWt7eiaI85bo1T8eWlkMTisapVUxVchicFsBI1oAHVCdC7cBJEIFMIBxrLF4S
 423lJBRwKQrY5urNTRWB8eJHo5vBuT3G0VsnQ6DfVunqT9u24KEEjSx77um6eGvd
 gK6Wp/ti7pimXGYKMVzPe6hnLzAXiiguLE1ejEPBqFBEu0hlbIB66SxKq+aI+e6O
 VE2NxtDuLtY8yI1SCEmv0SAN+/k0bdLIDfzM0H+Lr/f9plpPU9LDydYM4SFg6uIl
 s4zi3gCMQngyRmOdmDw9w5g3CCV7oHTRGCRH9y8qCwz8XNz/JjM=
 =nn8a
 -----END PGP SIGNATURE-----

Merge tag 'iommu-fixes-v7.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux

Pull iommu fixes from Joerg Roedel:
 "Core:
   - Cache-flushing fix for non-x86 platforms

  AMD-Vi:
   - Security fix when SEV-SNP is enabled
   - Operator precedence fix in DTE setting"

* tag 'iommu-fixes-v7.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux:
  iommu/amd: Fix precedence order in set_dte_passthrough()
  iommu/pages: Fix iommu_pages_flush_incoherent() for non-x86
  iommu/amd: Use maximum PPR log buffer size when SNP is enabled on Family 0x19
  iommu/amd: Use maximum Event log buffer size when SNP is enabled on Family 0x19
This commit is contained in:
Linus Torvalds 2026-05-08 08:16:07 -07:00
commit fa7431eb99
6 changed files with 118 additions and 54 deletions

View File

@ -11,6 +11,9 @@
#include "amd_iommu_types.h"
extern int amd_iommu_evtlog_size;
extern int amd_iommu_pprlog_size;
irqreturn_t amd_iommu_int_thread(int irq, void *data);
irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);

View File

@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/iommufd.h>
@ -141,7 +142,6 @@
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
#define EVENT_TYPE_MASK 0xf
#define EVENT_TYPE_ILL_DEV 0x1
@ -259,15 +259,20 @@
#define MMIO_CMD_BUFFER_TAIL(x) FIELD_GET(MMIO_CMD_TAIL_MASK, (x))
/* constants for event buffer handling */
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56)
#define EVTLOG_ENTRY_SIZE 0x10
#define EVTLOG_SIZE_SHIFT 56
#define EVTLOG_SIZE_DEF SZ_8K /* 512 entries */
#define EVTLOG_LEN_MASK_DEF (0x9ULL << EVTLOG_SIZE_SHIFT)
#define EVTLOG_SIZE_MAX SZ_512K /* 32K entries */
#define EVTLOG_LEN_MASK_MAX (0xFULL << EVTLOG_SIZE_SHIFT)
/* Constants for PPR Log handling */
#define PPR_LOG_ENTRIES 512
#define PPR_LOG_SIZE_SHIFT 56
#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
#define PPR_ENTRY_SIZE 16
#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
#define PPRLOG_ENTRY_SIZE 0x10
#define PPRLOG_SIZE_SHIFT 56
#define PPRLOG_SIZE_DEF SZ_8K /* 512 entries */
#define PPRLOG_LEN_MASK_DEF (0x9ULL << PPRLOG_SIZE_SHIFT)
#define PPRLOG_SIZE_MAX SZ_512K /* 32K entries */
#define PPRLOG_LEN_MASK_MAX (0xFULL << PPRLOG_SIZE_SHIFT)
/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */
#define PPR_FLAG_EXEC 0x002 /* Execute permission requested */

View File

@ -132,6 +132,9 @@ struct ivhd_entry {
u8 uid;
} __attribute__((packed));
int amd_iommu_evtlog_size = EVTLOG_SIZE_DEF;
int amd_iommu_pprlog_size = PPRLOG_SIZE_DEF;
/*
* An AMD IOMMU memory definition structure. It defines things like exclusion
* ranges for devices and regions that should be unity mapped.
@ -865,35 +868,47 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
}
/* allocates the memory where the IOMMU will log its events to */
static int __init alloc_event_buffer(struct amd_iommu *iommu)
static int __init alloc_event_buffer(void)
{
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
EVT_BUFFER_SIZE);
struct amd_iommu *iommu;
return iommu->evt_buf ? 0 : -ENOMEM;
}
static void iommu_enable_event_buffer(struct amd_iommu *iommu)
{
u64 entry;
BUG_ON(iommu->evt_buf == NULL);
if (!is_kdump_kernel()) {
/*
* Event buffer is re-used for kdump kernel and setting
* of MMIO register is not required.
*/
entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
&entry, sizeof(entry));
for_each_iommu(iommu) {
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
amd_iommu_evtlog_size);
if (!iommu->evt_buf)
return -ENOMEM;
}
/* set head and tail to zero manually */
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
return 0;
}
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
static void iommu_enable_event_buffer(void)
{
struct amd_iommu *iommu;
u64 entry;
for_each_iommu(iommu) {
BUG_ON(iommu->evt_buf == NULL);
if (!is_kdump_kernel()) {
/*
* Event buffer is re-used for kdump kernel and setting
* of MMIO register is not required.
*/
entry = iommu_virt_to_phys(iommu->evt_buf);
entry |= (amd_iommu_evtlog_size == EVTLOG_SIZE_DEF) ?
EVTLOG_LEN_MASK_DEF : EVTLOG_LEN_MASK_MAX;
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
&entry, sizeof(entry));
}
/* set head and tail to zero manually */
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
}
}
/*
@ -984,15 +999,20 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
return 0;
}
static int __init remap_event_buffer(struct amd_iommu *iommu)
static int __init remap_event_buffer(void)
{
struct amd_iommu *iommu;
u64 paddr;
pr_info_once("Re-using event buffer from the previous kernel\n");
paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
for_each_iommu(iommu) {
paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
iommu->evt_buf = iommu_memremap(paddr, amd_iommu_evtlog_size);
if (!iommu->evt_buf)
return -ENOMEM;
}
return iommu->evt_buf ? 0 : -ENOMEM;
return 0;
}
static int __init remap_command_buffer(struct amd_iommu *iommu)
@ -1044,10 +1064,6 @@ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
ret = remap_command_buffer(iommu);
if (ret)
return ret;
ret = remap_event_buffer(iommu);
if (ret)
return ret;
} else {
ret = alloc_cwwb_sem(iommu);
if (ret)
@ -1056,10 +1072,6 @@ static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
ret = alloc_command_buffer(iommu);
if (ret)
return ret;
ret = alloc_event_buffer(iommu);
if (ret)
return ret;
}
return 0;
@ -2893,7 +2905,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
@ -2957,7 +2968,6 @@ static void early_enable_iommus(void)
iommu_disable_event_buffer(iommu);
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@ -3070,6 +3080,7 @@ static void amd_iommu_resume(void *data)
for_each_iommu(iommu)
early_enable_iommu(iommu);
iommu_enable_event_buffer();
amd_iommu_enable_interrupts();
}
@ -3399,6 +3410,33 @@ static __init void iommu_snp_enable(void)
#endif
}
static void amd_iommu_apply_erratum_snp(void)
{
#ifdef CONFIG_KVM_AMD_SEV
if (!amd_iommu_snp_en)
return;
/* Errata fix for Family 0x19 */
if (boot_cpu_data.x86 != 0x19)
return;
/* Set event log buffer size to max */
amd_iommu_evtlog_size = EVTLOG_SIZE_MAX;
pr_info("Applying erratum: Increase Event log size to 0x%x\n",
amd_iommu_evtlog_size);
/*
* Set PPR log buffer size to max.
* (Family 0x19, model < 0x10 doesn't support PPR when SNP is enabled).
*/
if (boot_cpu_data.x86_model >= 0x10) {
amd_iommu_pprlog_size = PPRLOG_SIZE_MAX;
pr_info("Applying erratum: Increase PPR log size to 0x%x\n",
amd_iommu_pprlog_size);
}
#endif
}
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@ -3435,6 +3473,21 @@ static int __init state_next(void)
case IOMMU_ENABLED:
register_syscore(&amd_iommu_syscore);
iommu_snp_enable();
amd_iommu_apply_erratum_snp();
/* Allocate/enable event log buffer */
if (is_kdump_kernel())
ret = remap_event_buffer();
else
ret = alloc_event_buffer();
if (ret) {
init_state = IOMMU_INIT_ERROR;
break;
}
iommu_enable_event_buffer();
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
@ -4037,11 +4090,11 @@ int amd_iommu_snp_disable(void)
return 0;
for_each_iommu(iommu) {
ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
ret = iommu_make_shared(iommu->evt_buf, amd_iommu_evtlog_size);
if (ret)
return ret;
ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
ret = iommu_make_shared(iommu->ppr_log, amd_iommu_pprlog_size);
if (ret)
return ret;

View File

@ -1010,7 +1010,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
iommu_print_event(iommu, iommu->evt_buf + head);
/* Update head pointer of hardware ring-buffer */
head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
head = (head + EVTLOG_ENTRY_SIZE) % amd_iommu_evtlog_size;
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
}
@ -2149,7 +2149,8 @@ static void set_dte_passthrough(struct iommu_dev_data *dev_data,
new->data[0] |= DTE_FLAG_TV | DTE_FLAG_IR | DTE_FLAG_IW;
new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domain->id) |
(dev_data->ats_enabled) ? DTE_FLAG_IOTLB : 0;
(dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
}
static void set_dte_entry(struct amd_iommu *iommu,

View File

@ -20,7 +20,7 @@
int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
{
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
PPR_LOG_SIZE);
amd_iommu_pprlog_size);
return iommu->ppr_log ? 0 : -ENOMEM;
}
@ -33,7 +33,9 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_PPR_EN);
entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
entry = iommu_virt_to_phys(iommu->ppr_log);
entry |= (amd_iommu_pprlog_size == PPRLOG_SIZE_DEF) ?
PPRLOG_LEN_MASK_DEF : PPRLOG_LEN_MASK_MAX;
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
&entry, sizeof(entry));
@ -201,7 +203,7 @@ void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
raw[0] = raw[1] = 0UL;
/* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
head = (head + PPRLOG_ENTRY_SIZE) % amd_iommu_pprlog_size;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
/* Handle PPR entry */

View File

@ -137,7 +137,7 @@ static inline void iommu_pages_flush_incoherent(struct device *dma_dev,
void *virt, size_t offset,
size_t len)
{
dma_sync_single_for_device(dma_dev, (uintptr_t)virt + offset, len,
dma_sync_single_for_device(dma_dev, virt_to_phys(virt) + offset, len,
DMA_TO_DEVICE);
}
void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,