]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/amd: Clean up RMP entries for IOMMU pages during SNP shutdown
authorAshish Kalra <ashish.kalra@amd.com>
Fri, 26 Jan 2024 04:11:19 +0000 (22:11 -0600)
committerBorislav Petkov (AMD) <bp@alien8.de>
Mon, 29 Jan 2024 19:34:18 +0000 (20:34 +0100)
Add a new IOMMU API interface amd_iommu_snp_disable() to transition
IOMMU pages to Hypervisor state from Reclaim state after SNP_SHUTDOWN_EX
command. Invoke this API from the CCP driver after SNP_SHUTDOWN_EX
command.

Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240126041126.1927228-20-michael.roth@amd.com
drivers/crypto/ccp/sev-dev.c
drivers/iommu/amd/init.c
include/linux/amd-iommu.h

index a70f8b11bccf12d3e0980d94454118c8f97143b5..605c6bf88cf7aeabb3621a39d4736f19fd6bb844 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/fs_struct.h>
 #include <linux/psp.h>
+#include <linux/amd-iommu.h>
 
 #include <asm/smp.h>
 #include <asm/cacheflush.h>
@@ -1655,6 +1656,25 @@ static int __sev_snp_shutdown_locked(int *error)
                return ret;
        }
 
+       /*
+        * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP
+        * enforcement by the IOMMU and also transitions all pages
+        * associated with the IOMMU to the Reclaim state.
+        * Firmware was transitioning the IOMMU pages to Hypervisor state
+        * before version 1.53. But, accounting for the number of assigned
+        * 4kB pages in a 2M page was done incorrectly by not transitioning
+        * to the Reclaim state. This resulted in RMP #PF when later accessing
+        * the 2M page containing those pages during kexec boot. Hence, the
+        * firmware now transitions these pages to Reclaim state and hypervisor
+        * needs to transition these pages to shared state. SNP Firmware
+        * version 1.53 and above are needed for kexec boot.
+        */
+       ret = amd_iommu_snp_disable();
+       if (ret) {
+               dev_err(sev->dev, "SNP IOMMU shutdown failed\n");
+               return ret;
+       }
+
        sev->snp_initialized = false;
        dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
 
index 3a4eeb26d515ce7875adb3343ef593ddec70df1d..88bb08ae39b2bab56c5cb20d4ad51ede69e9d7a0 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/io_apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/set_memory.h>
+#include <asm/sev.h>
 
 #include <linux/crash_dump.h>
 
@@ -3797,3 +3798,81 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
 
        return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
 }
+
+#ifdef CONFIG_KVM_AMD_SEV
+static int iommu_page_make_shared(void *page)
+{
+       unsigned long paddr, pfn;
+
+       paddr = iommu_virt_to_phys(page);
+       /* Cbit maybe set in the paddr */
+       pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+
+       if (!(pfn % PTRS_PER_PMD)) {
+               int ret, level;
+               bool assigned;
+
+               ret = snp_lookup_rmpentry(pfn, &assigned, &level);
+               if (ret)
+                       pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n",
+                               pfn, ret);
+
+               if (!assigned)
+                       pr_warn("IOMMU PFN %lx not assigned in RMP table\n",
+                               pfn);
+
+               if (level > PG_LEVEL_4K) {
+                       ret = psmash(pfn);
+                       if (ret) {
+                               pr_warn("IOMMU PFN %lx had a huge RMP entry, but attempted psmash failed, ret: %d, level: %d\n",
+                                       pfn, ret, level);
+                       }
+               }
+       }
+
+       return rmp_make_shared(pfn, PG_LEVEL_4K);
+}
+
+static int iommu_make_shared(void *va, size_t size)
+{
+       void *page;
+       int ret;
+
+       if (!va)
+               return 0;
+
+       for (page = va; page < (va + size); page += PAGE_SIZE) {
+               ret = iommu_page_make_shared(page);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+int amd_iommu_snp_disable(void)
+{
+       struct amd_iommu *iommu;
+       int ret;
+
+       if (!amd_iommu_snp_en)
+               return 0;
+
+       for_each_iommu(iommu) {
+               ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+               if (ret)
+                       return ret;
+
+               ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
+               if (ret)
+                       return ret;
+
+               ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
+#endif
index 7365be00a795b45d8a12c6e5c81593ba1d2bb07e..2b90c48a6a871dc62b4292aa3e062dd12c270189 100644 (file)
@@ -85,4 +85,10 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
                u64 *value);
 struct amd_iommu *get_amd_iommu(unsigned int idx);
 
+#ifdef CONFIG_KVM_AMD_SEV
+int amd_iommu_snp_disable(void);
+#else
+static inline int amd_iommu_snp_disable(void) { return 0; }
+#endif
+
 #endif /* _ASM_X86_AMD_IOMMU_H */