]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.6.5/iommu-vt-d-enable-qi-on-all-iommus-before-setting-root-entry.patch
5.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.6.5 / iommu-vt-d-enable-qi-on-all-iommus-before-setting-root-entry.patch
CommitLineData
6e4f2b75
GKH
1From a4c34ff1c029e90e7d5f8dd8d29b0a93b31c3cb2 Mon Sep 17 00:00:00 2001
2From: Joerg Roedel <jroedel@suse.de>
3Date: Fri, 17 Jun 2016 11:29:48 +0200
4Subject: iommu/vt-d: Enable QI on all IOMMUs before setting root entry
5
6From: Joerg Roedel <jroedel@suse.de>
7
8commit a4c34ff1c029e90e7d5f8dd8d29b0a93b31c3cb2 upstream.
9
10This seems to be required on some X58 chipsets on systems
11with more than one IOMMU. QI does not work until it is
12enabled on all IOMMUs in the system.
13
14Reported-by: Dheeraj CVR <cvr.dheeraj@gmail.com>
15Tested-by: Dheeraj CVR <cvr.dheeraj@gmail.com>
16Fixes: 5f0a7f7614a9 ('iommu/vt-d: Make root entry visible for hardware right after allocation')
17Signed-off-by: Joerg Roedel <jroedel@suse.de>
18Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19
20---
21 drivers/iommu/intel-iommu.c | 17 ++++++++++++-----
22 1 file changed, 12 insertions(+), 5 deletions(-)
23
24--- a/drivers/iommu/intel-iommu.c
25+++ b/drivers/iommu/intel-iommu.c
26@@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
27 }
28 }
29
30- iommu_flush_write_buffer(iommu);
31- iommu_set_root_entry(iommu);
32- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
33- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
34-
35 if (!ecap_pass_through(iommu->ecap))
36 hw_pass_through = 0;
37 #ifdef CONFIG_INTEL_IOMMU_SVM
38@@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
39 #endif
40 }
41
42+ /*
43+ * Now that qi is enabled on all iommus, set the root entry and flush
44+ * caches. This is required on some Intel X58 chipsets, otherwise the
45+ * flush_context function will loop forever and the boot hangs.
46+ */
47+ for_each_active_iommu(iommu, drhd) {
48+ iommu_flush_write_buffer(iommu);
49+ iommu_set_root_entry(iommu);
50+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
51+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
52+ }
53+
54 if (iommu_pass_through)
55 iommu_identity_mapping |= IDENTMAP_ALL;
56