]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
iommu/vt-d: Enable QI on all IOMMUs before setting root entry
authorJoerg Roedel <jroedel@suse.de>
Fri, 17 Jun 2016 09:29:48 +0000 (11:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 27 Jul 2016 15:42:15 +0000 (08:42 -0700)
commit a4c34ff1c029e90e7d5f8dd8d29b0a93b31c3cb2 upstream.

This seems to be required on some X58 chipsets on systems
with more than one IOMMU. QI does not work until it is
enabled on all IOMMUs in the system.

Reported-by: Dheeraj CVR <cvr.dheeraj@gmail.com>
Tested-by: Dheeraj CVR <cvr.dheeraj@gmail.com>
Fixes: 5f0a7f7614a9 ('iommu/vt-d: Make root entry visible for hardware right after allocation')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/iommu/intel-iommu.c

index e1852e845d21f1f8014f7e51789d0e8682c5c8e8..ae364e07840ca1c2cea95d08ee753f4a286e772c 100644 (file)
@@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
                        }
                }
 
-               iommu_flush_write_buffer(iommu);
-               iommu_set_root_entry(iommu);
-               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
 #ifdef CONFIG_INTEL_IOMMU_SVM
@@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
 #endif
        }
 
+       /*
+        * Now that qi is enabled on all iommus, set the root entry and flush
+        * caches. This is required on some Intel X58 chipsets, otherwise the
+        * flush_context function will loop forever and the boot hangs.
+        */
+       for_each_active_iommu(iommu, drhd) {
+               iommu_flush_write_buffer(iommu);
+               iommu_set_root_entry(iommu);
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+       }
+
        if (iommu_pass_through)
                iommu_identity_mapping |= IDENTMAP_ALL;