#include <linux/mutex.h>
#include <linux/msi.h>
#include <linux/list.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/iommufd.h>
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
/* event logging constants */
-#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
#define EVENT_TYPE_MASK 0xf
#define EVENT_TYPE_ILL_DEV 0x1
#define MMIO_CMD_BUFFER_TAIL(x) FIELD_GET(MMIO_CMD_TAIL_MASK, (x))
/* constants for event buffer handling */
-#define EVT_BUFFER_SIZE 8192 /* 512 entries */
-#define EVT_LEN_MASK (0x9ULL << 56)
+#define EVTLOG_ENTRY_SIZE 0x10
+#define EVTLOG_SIZE_SHIFT 56
+#define EVTLOG_SIZE_DEF SZ_8K /* 512 entries */
+#define EVTLOG_LEN_MASK_DEF (0x9ULL << EVTLOG_SIZE_SHIFT)
+#define EVTLOG_SIZE_MAX SZ_512K /* 32K entries */
+#define EVTLOG_LEN_MASK_MAX (0xFULL << EVTLOG_SIZE_SHIFT)
/* Constants for PPR Log handling */
#define PPR_LOG_ENTRIES 512
u8 uid;
} __attribute__((packed));
+int amd_iommu_evtlog_size = EVTLOG_SIZE_DEF;
+
/*
* An AMD IOMMU memory definition structure. It defines things like exclusion
* ranges for devices and regions that should be unity mapped.
}
/* allocates the memory where the IOMMU will log its events to */
-static int __init alloc_event_buffer(struct amd_iommu *iommu)
+static int __init alloc_event_buffer(void)
{
- iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
- EVT_BUFFER_SIZE);
+ struct amd_iommu *iommu;
- return iommu->evt_buf ? 0 : -ENOMEM;
+ for_each_iommu(iommu) {
+ iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
+ amd_iommu_evtlog_size);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
}
-static void iommu_enable_event_buffer(struct amd_iommu *iommu)
+static void iommu_enable_event_buffer(void)
{
+ struct amd_iommu *iommu;
u64 entry;
- BUG_ON(iommu->evt_buf == NULL);
+ for_each_iommu(iommu) {
+ BUG_ON(iommu->evt_buf == NULL);
- if (!is_kdump_kernel()) {
- /*
- * Event buffer is re-used for kdump kernel and setting
- * of MMIO register is not required.
- */
- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
- }
+ if (!is_kdump_kernel()) {
+ /*
+ * Event buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->evt_buf);
+ entry |= (amd_iommu_evtlog_size == EVTLOG_SIZE_DEF) ?
+ EVTLOG_LEN_MASK_DEF : EVTLOG_LEN_MASK_MAX;
+
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
- /* set head and tail to zero manually */
- writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
- iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ }
}
/*
return 0;
}
-static int __init remap_event_buffer(struct amd_iommu *iommu)
+static int __init remap_event_buffer(void)
{
+ struct amd_iommu *iommu;
u64 paddr;
pr_info_once("Re-using event buffer from the previous kernel\n");
- paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
- iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+ for_each_iommu(iommu) {
+ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->evt_buf = iommu_memremap(paddr, amd_iommu_evtlog_size);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+ }
- return iommu->evt_buf ? 0 : -ENOMEM;
+ return 0;
}
static int __init remap_command_buffer(struct amd_iommu *iommu)
ret = remap_command_buffer(iommu);
if (ret)
return ret;
-
- ret = remap_event_buffer(iommu);
- if (ret)
- return ret;
} else {
ret = alloc_cwwb_sem(iommu);
if (ret)
ret = alloc_command_buffer(iommu);
if (ret)
return ret;
-
- ret = alloc_event_buffer(iommu);
- if (ret)
- return ret;
}
return 0;
iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_disable_event_buffer(iommu);
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
- iommu_enable_event_buffer(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
for_each_iommu(iommu)
early_enable_iommu(iommu);
+ iommu_enable_event_buffer();
amd_iommu_enable_interrupts();
}
#endif
}
+static void amd_iommu_apply_erratum_snp(void)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+ if (!amd_iommu_snp_en)
+ return;
+
+ /* Errata fix for Family 0x19 */
+ if (boot_cpu_data.x86 != 0x19)
+ return;
+
+ /* Set event log buffer size to max */
+ amd_iommu_evtlog_size = EVTLOG_SIZE_MAX;
+ pr_info("Applying erratum: Increase Event log size to 0x%x\n",
+ amd_iommu_evtlog_size);
+#endif
+}
+
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
case IOMMU_ENABLED:
register_syscore(&amd_iommu_syscore);
iommu_snp_enable();
+
+ amd_iommu_apply_erratum_snp();
+
+ /* Allocate/enable event log buffer */
+ if (is_kdump_kernel())
+ ret = remap_event_buffer();
+ else
+ ret = alloc_event_buffer();
+
+ if (ret) {
+ init_state = IOMMU_INIT_ERROR;
+ break;
+ }
+ iommu_enable_event_buffer();
+
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
return 0;
for_each_iommu(iommu) {
- ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+ ret = iommu_make_shared(iommu->evt_buf, amd_iommu_evtlog_size);
if (ret)
return ret;