]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.arch/x2APIC_PATCH_02_of_41_c42d9f32443397aed2d37d37df161392e6a5862f
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.arch / x2APIC_PATCH_02_of_41_c42d9f32443397aed2d37d37df161392e6a5862f
CommitLineData
2cb7cef9
BS
1From: Suresh Siddha <suresh.b.siddha@intel.com>
2Subject: x64, x2apic/intr-remap: fix the need for sequential array allocation of iommus
3References: fate #303948 and fate #303984
4Patch-Mainline: queued for .28
5Commit-ID: c42d9f32443397aed2d37d37df161392e6a5862f
6
7Signed-off-by: Thomas Renninger <trenn@suse.de>
8
9Clean up the intel-iommu code related to deferred iommu flush logic. There is
10no need to allocate all the iommu's as a sequential array.
11
12This will be used later in the interrupt-remapping patch series to
13allocate iommu much early and individually for each device remapping
14hardware unit.
15
16Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
17Cc: akpm@linux-foundation.org
18Cc: arjan@linux.intel.com
19Cc: andi@firstfloor.org
20Cc: ebiederm@xmission.com
21Cc: jbarnes@virtuousgeek.org
22Cc: steiner@sgi.com
23Signed-off-by: Ingo Molnar <mingo@elte.hu>
24
25---
26 drivers/pci/dmar.c | 11 +++++++++--
27 drivers/pci/intel-iommu.c | 23 +++++++----------------
28 drivers/pci/intel-iommu.h | 4 ++--
29 3 files changed, 18 insertions(+), 20 deletions(-)
30
31--- a/drivers/pci/dmar.c
32+++ b/drivers/pci/dmar.c
33@@ -375,11 +375,18 @@ int __init early_dmar_detect(void)
34 return (ACPI_SUCCESS(status) ? 1 : 0);
35 }
36
37-struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
38- struct dmar_drhd_unit *drhd)
39+struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
40 {
41+ struct intel_iommu *iommu;
42 int map_size;
43 u32 ver;
44+ static int iommu_allocated = 0;
45+
46+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
47+ if (!iommu)
48+ return NULL;
49+
50+ iommu->seq_id = iommu_allocated++;
51
52 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
53 if (!iommu->reg) {
54--- a/drivers/pci/intel-iommu.c
55+++ b/drivers/pci/intel-iommu.c
56@@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigne
57
58 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
59
60-static struct intel_iommu *g_iommus;
61-
62 #define HIGH_WATER_MARK 250
63 struct deferred_flush_tables {
64 int next;
65@@ -1651,8 +1649,6 @@ int __init init_dmars(void)
66 * endfor
67 */
68 for_each_drhd_unit(drhd) {
69- if (drhd->ignored)
70- continue;
71 g_num_of_iommus++;
72 /*
73 * lock not needed as this is only incremented in the single
74@@ -1661,12 +1657,6 @@ int __init init_dmars(void)
75 */
76 }
77
78- g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
79- if (!g_iommus) {
80- ret = -ENOMEM;
81- goto error;
82- }
83-
84 deferred_flush = kzalloc(g_num_of_iommus *
85 sizeof(struct deferred_flush_tables), GFP_KERNEL);
86 if (!deferred_flush) {
87@@ -1674,12 +1664,10 @@ int __init init_dmars(void)
88 goto error;
89 }
90
91- i = 0;
92 for_each_drhd_unit(drhd) {
93 if (drhd->ignored)
94 continue;
95- iommu = alloc_iommu(&g_iommus[i], drhd);
96- i++;
97+ iommu = alloc_iommu(drhd);
98 if (!iommu) {
99 ret = -ENOMEM;
100 goto error;
101@@ -1771,7 +1759,6 @@ error:
102 iommu = drhd->iommu;
103 free_iommu(iommu);
104 }
105- kfree(g_iommus);
106 return ret;
107 }
108
109@@ -1928,7 +1915,10 @@ static void flush_unmaps(void)
110 /* just flush them all */
111 for (i = 0; i < g_num_of_iommus; i++) {
112 if (deferred_flush[i].next) {
113- iommu_flush_iotlb_global(&g_iommus[i], 0);
114+ struct intel_iommu *iommu =
115+ deferred_flush[i].domain[0]->iommu;
116+
117+ iommu_flush_iotlb_global(iommu, 0);
118 for (j = 0; j < deferred_flush[i].next; j++) {
119 __free_iova(&deferred_flush[i].domain[j]->iovad,
120 deferred_flush[i].iova[j]);
121@@ -1958,7 +1948,8 @@ static void add_unmap(struct dmar_domain
122 if (list_size == HIGH_WATER_MARK)
123 flush_unmaps();
124
125- iommu_id = dom->iommu - g_iommus;
126+ iommu_id = dom->iommu->seq_id;
127+
128 next = deferred_flush[iommu_id].next;
129 deferred_flush[iommu_id].domain[next] = dom;
130 deferred_flush[iommu_id].iova[next] = iova;
131--- a/drivers/pci/intel-iommu.h
132+++ b/drivers/pci/intel-iommu.h
133@@ -182,6 +182,7 @@ struct intel_iommu {
134 int seg;
135 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
136 spinlock_t register_lock; /* protect register handling */
137+ int seq_id; /* sequence id of the iommu */
138
139 #ifdef CONFIG_DMAR
140 unsigned long *domain_ids; /* bitmap of domains */
141@@ -198,8 +199,7 @@ struct intel_iommu {
142
143 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
144
145-extern struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
146- struct dmar_drhd_unit *drhd);
147+extern struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd);
148 extern void free_iommu(struct intel_iommu *iommu);
149
150 #endif