]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommu/arm-smmu-v3: Set MEV bit in nested STE for DoS mitigations
authorNicolin Chen <nicolinc@nvidia.com>
Tue, 11 Mar 2025 19:44:32 +0000 (12:44 -0700)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 18 Mar 2025 17:17:48 +0000 (14:17 -0300)
There is a DoS concern on the shared hardware event queue among devices
passed through to VMs, that too many translation failures that belong to
VMs could overflow the shared hardware event queue if those VMs or their
VMMs don't handle/recover the devices properly.

The MEV bit in the STE allows to configure the SMMU HW to merge similar
event records, though there is no guarantee. Set it in a nested STE for
DoS mitigations.

In the future, we might want to enable the MEV for non-nested cases too
such as domain->type == IOMMU_DOMAIN_UNMANAGED or even IOMMU_DOMAIN_DMA.

Link: https://patch.msgid.link/r/8ed12feef67fc65273d0f5925f401a81f56acebe.1741719725.git.nicolinc@nvidia.com
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Pranjal Shrivastava <praan@google.com>
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h

index 65adfed56969c89dc08dd136607b6d24300e9fed..e4fd8d522af8824554c2c7666a6318072d7f70e0 100644 (file)
@@ -43,6 +43,8 @@ static void arm_smmu_make_nested_cd_table_ste(
        target->data[0] |= nested_domain->ste[0] &
                           ~cpu_to_le64(STRTAB_STE_0_CFG);
        target->data[1] |= nested_domain->ste[1];
+       /* Merge events for DoS mitigations on eventq */
+       target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
 }
 
 /*
index 5fa817a8f5f11dbdb671ab5ab244882b34f395d8..b4c21aaed1266a025167e03a7e616c6bdc7a6c66 100644 (file)
@@ -1052,7 +1052,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
                        cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
                                    STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
                                    STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
-                                   STRTAB_STE_1_EATS);
+                                   STRTAB_STE_1_EATS | STRTAB_STE_1_MEV);
                used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
 
                /*
@@ -1068,7 +1068,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
        if (cfg & BIT(1)) {
                used_bits[1] |=
                        cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS |
-                                   STRTAB_STE_1_SHCFG);
+                                   STRTAB_STE_1_SHCFG | STRTAB_STE_1_MEV);
                used_bits[2] |=
                        cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
                                    STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
index df06076a16982231b360a84b3e0876cebc279a34..dd1ad56ce8639208e1db6b452630c18d74ae7337 100644 (file)
@@ -266,6 +266,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
 #define STRTAB_STE_1_S1COR             GENMASK_ULL(5, 4)
 #define STRTAB_STE_1_S1CSH             GENMASK_ULL(7, 6)
 
+#define STRTAB_STE_1_MEV               (1UL << 19)
 #define STRTAB_STE_1_S2FWB             (1UL << 25)
 #define STRTAB_STE_1_S1STALLD          (1UL << 27)