]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.arch/x2APIC_PATCH_10_of_41_b6fcb33ad6c05f152a672f7c96c1fab006527b80
Move xen patchset to new version's subdir.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.arch / x2APIC_PATCH_10_of_41_b6fcb33ad6c05f152a672f7c96c1fab006527b80
CommitLineData
00e5a55c
BS
1From: Suresh Siddha <suresh.b.siddha@intel.com>
2Subject: x64, x2apic/intr-remap: routines managing Interrupt remapping table entries.
3References: fate #303948 and fate #303984
4Patch-Mainline: queued for .28
5Commit-ID: b6fcb33ad6c05f152a672f7c96c1fab006527b80
6
7Signed-off-by: Thomas Renninger <trenn@suse.de>
8
9Routines handling the management of interrupt remapping table entries.
10
11Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
12Cc: akpm@linux-foundation.org
13Cc: arjan@linux.intel.com
14Cc: andi@firstfloor.org
15Cc: ebiederm@xmission.com
16Cc: jbarnes@virtuousgeek.org
17Cc: steiner@sgi.com
18Signed-off-by: Ingo Molnar <mingo@elte.hu>
19
20---
21 drivers/pci/intel-iommu.h | 4
22 drivers/pci/intr_remapping.c | 243 +++++++++++++++++++++++++++++++++++++++++++
23 include/linux/dmar.h | 12 ++
24 3 files changed, 259 insertions(+)
25
26Index: linux-2.6.26/drivers/pci/intel-iommu.h
27===================================================================
28--- linux-2.6.26.orig/drivers/pci/intel-iommu.h
29+++ linux-2.6.26/drivers/pci/intel-iommu.h
30@@ -123,6 +123,7 @@ static inline void dmar_writeq(void __io
31 #define ecap_qis(e) ((e) & 0x2)
32 #define ecap_eim_support(e) ((e >> 4) & 0x1)
33 #define ecap_ir_support(e) ((e >> 3) & 0x1)
34+#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
35
36
37 /* IOTLB_REG */
38@@ -255,6 +256,8 @@ struct q_inval {
39 #define INTR_REMAP_PAGE_ORDER 8
40 #define INTR_REMAP_TABLE_REG_SIZE 0xf
41
42+#define INTR_REMAP_TABLE_ENTRIES 65536
43+
44 struct ir_table {
45 struct irte *base;
46 };
47@@ -300,4 +303,5 @@ extern void free_iommu(struct intel_iomm
48 extern int dmar_enable_qi(struct intel_iommu *iommu);
49 extern void qi_global_iec(struct intel_iommu *iommu);
50
51+extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
52 #endif
53Index: linux-2.6.26/drivers/pci/intr_remapping.c
54===================================================================
55--- linux-2.6.26.orig/drivers/pci/intr_remapping.c
56+++ linux-2.6.26/drivers/pci/intr_remapping.c
57@@ -2,6 +2,7 @@
58 #include <linux/spinlock.h>
59 #include <linux/jiffies.h>
60 #include <linux/pci.h>
61+#include <linux/irq.h>
62 #include <asm/io_apic.h>
63 #include "intel-iommu.h"
64 #include "intr_remapping.h"
65@@ -10,6 +11,248 @@ static struct ioapic_scope ir_ioapic[MAX
66 static int ir_ioapic_num;
67 int intr_remapping_enabled;
68
69+static struct {
70+ struct intel_iommu *iommu;
71+ u16 irte_index;
72+ u16 sub_handle;
73+ u8 irte_mask;
74+} irq_2_iommu[NR_IRQS];
75+
76+static DEFINE_SPINLOCK(irq_2_ir_lock);
77+
78+int irq_remapped(int irq)
79+{
80+ if (irq > NR_IRQS)
81+ return 0;
82+
83+ if (!irq_2_iommu[irq].iommu)
84+ return 0;
85+
86+ return 1;
87+}
88+
89+int get_irte(int irq, struct irte *entry)
90+{
91+ int index;
92+
93+ if (!entry || irq > NR_IRQS)
94+ return -1;
95+
96+ spin_lock(&irq_2_ir_lock);
97+ if (!irq_2_iommu[irq].iommu) {
98+ spin_unlock(&irq_2_ir_lock);
99+ return -1;
100+ }
101+
102+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
103+ *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index);
104+
105+ spin_unlock(&irq_2_ir_lock);
106+ return 0;
107+}
108+
109+int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
110+{
111+ struct ir_table *table = iommu->ir_table;
112+ u16 index, start_index;
113+ unsigned int mask = 0;
114+ int i;
115+
116+ if (!count)
117+ return -1;
118+
119+ /*
120+ * start the IRTE search from index 0.
121+ */
122+ index = start_index = 0;
123+
124+ if (count > 1) {
125+ count = __roundup_pow_of_two(count);
126+ mask = ilog2(count);
127+ }
128+
129+ if (mask > ecap_max_handle_mask(iommu->ecap)) {
130+ printk(KERN_ERR
131+ "Requested mask %x exceeds the max invalidation handle"
132+ " mask value %Lx\n", mask,
133+ ecap_max_handle_mask(iommu->ecap));
134+ return -1;
135+ }
136+
137+ spin_lock(&irq_2_ir_lock);
138+ do {
139+ for (i = index; i < index + count; i++)
140+ if (table->base[i].present)
141+ break;
142+ /* empty index found */
143+ if (i == index + count)
144+ break;
145+
146+ index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
147+
148+ if (index == start_index) {
149+ spin_unlock(&irq_2_ir_lock);
150+ printk(KERN_ERR "can't allocate an IRTE\n");
151+ return -1;
152+ }
153+ } while (1);
154+
155+ for (i = index; i < index + count; i++)
156+ table->base[i].present = 1;
157+
158+ irq_2_iommu[irq].iommu = iommu;
159+ irq_2_iommu[irq].irte_index = index;
160+ irq_2_iommu[irq].sub_handle = 0;
161+ irq_2_iommu[irq].irte_mask = mask;
162+
163+ spin_unlock(&irq_2_ir_lock);
164+
165+ return index;
166+}
167+
168+static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
169+{
170+ struct qi_desc desc;
171+
172+ desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
173+ | QI_IEC_SELECTIVE;
174+ desc.high = 0;
175+
176+ qi_submit_sync(&desc, iommu);
177+}
178+
179+int map_irq_to_irte_handle(int irq, u16 *sub_handle)
180+{
181+ int index;
182+
183+ spin_lock(&irq_2_ir_lock);
184+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
185+ spin_unlock(&irq_2_ir_lock);
186+ return -1;
187+ }
188+
189+ *sub_handle = irq_2_iommu[irq].sub_handle;
190+ index = irq_2_iommu[irq].irte_index;
191+ spin_unlock(&irq_2_ir_lock);
192+ return index;
193+}
194+
195+int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
196+{
197+ spin_lock(&irq_2_ir_lock);
198+ if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) {
199+ spin_unlock(&irq_2_ir_lock);
200+ return -1;
201+ }
202+
203+ irq_2_iommu[irq].iommu = iommu;
204+ irq_2_iommu[irq].irte_index = index;
205+ irq_2_iommu[irq].sub_handle = subhandle;
206+ irq_2_iommu[irq].irte_mask = 0;
207+
208+ spin_unlock(&irq_2_ir_lock);
209+
210+ return 0;
211+}
212+
213+int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
214+{
215+ spin_lock(&irq_2_ir_lock);
216+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
217+ spin_unlock(&irq_2_ir_lock);
218+ return -1;
219+ }
220+
221+ irq_2_iommu[irq].iommu = NULL;
222+ irq_2_iommu[irq].irte_index = 0;
223+ irq_2_iommu[irq].sub_handle = 0;
224+ irq_2_iommu[irq].irte_mask = 0;
225+
226+ spin_unlock(&irq_2_ir_lock);
227+
228+ return 0;
229+}
230+
231+int modify_irte(int irq, struct irte *irte_modified)
232+{
233+ int index;
234+ struct irte *irte;
235+ struct intel_iommu *iommu;
236+
237+ spin_lock(&irq_2_ir_lock);
238+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
239+ spin_unlock(&irq_2_ir_lock);
240+ return -1;
241+ }
242+
243+ iommu = irq_2_iommu[irq].iommu;
244+
245+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
246+ irte = &iommu->ir_table->base[index];
247+
248+ set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
249+ __iommu_flush_cache(iommu, irte, sizeof(*irte));
250+
251+ qi_flush_iec(iommu, index, 0);
252+
253+ spin_unlock(&irq_2_ir_lock);
254+ return 0;
255+}
256+
257+int flush_irte(int irq)
258+{
259+ int index;
260+ struct intel_iommu *iommu;
261+
262+ spin_lock(&irq_2_ir_lock);
263+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
264+ spin_unlock(&irq_2_ir_lock);
265+ return -1;
266+ }
267+
268+ iommu = irq_2_iommu[irq].iommu;
269+
270+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
271+
272+ qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
273+ spin_unlock(&irq_2_ir_lock);
274+
275+ return 0;
276+}
277+
278+int free_irte(int irq)
279+{
280+ int index, i;
281+ struct irte *irte;
282+ struct intel_iommu *iommu;
283+
284+ spin_lock(&irq_2_ir_lock);
285+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
286+ spin_unlock(&irq_2_ir_lock);
287+ return -1;
288+ }
289+
290+ iommu = irq_2_iommu[irq].iommu;
291+
292+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
293+ irte = &iommu->ir_table->base[index];
294+
295+ if (!irq_2_iommu[irq].sub_handle) {
296+ for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++)
297+ set_64bit((unsigned long *)irte, 0);
298+ qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
299+ }
300+
301+ irq_2_iommu[irq].iommu = NULL;
302+ irq_2_iommu[irq].irte_index = 0;
303+ irq_2_iommu[irq].sub_handle = 0;
304+ irq_2_iommu[irq].irte_mask = 0;
305+
306+ spin_unlock(&irq_2_ir_lock);
307+
308+ return 0;
309+}
310+
311 static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
312 {
313 u64 addr;
314Index: linux-2.6.26/include/linux/dmar.h
315===================================================================
316--- linux-2.6.26.orig/include/linux/dmar.h
317+++ linux-2.6.26/include/linux/dmar.h
318@@ -98,7 +98,19 @@ struct irte {
319 __u64 high;
320 };
321 };
322+extern int get_irte(int irq, struct irte *entry);
323+extern int modify_irte(int irq, struct irte *irte_modified);
324+extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
325+extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
326+ u16 sub_handle);
327+extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
328+extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
329+extern int flush_irte(int irq);
330+extern int free_irte(int irq);
331+
332+extern int irq_remapped(int irq);
333 #else
334+#define irq_remapped(irq) (0)
335 #define enable_intr_remapping(mode) (-1)
336 #define intr_remapping_enabled (0)
337 #endif