1 From: Suresh Siddha <suresh.b.siddha@intel.com>
2 Subject: x64, x2apic/intr-remap: fix the need for sequential array allocation of iommus
3 References: fate #303948 and fate #303984
4 Patch-Mainline: queued for .28
5 Commit-ID: c42d9f32443397aed2d37d37df161392e6a5862f
7 Signed-off-by: Thomas Renninger <trenn@suse.de>
9 Clean up the intel-iommu code related to deferred iommu flush logic. There is
10 no need to allocate all the iommu's as a sequential array.
12 This will be used later in the interrupt-remapping patch series to
13 allocate iommu much early and individually for each device remapping
16 Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
17 Cc: akpm@linux-foundation.org
18 Cc: arjan@linux.intel.com
19 Cc: andi@firstfloor.org
20 Cc: ebiederm@xmission.com
21 Cc: jbarnes@virtuousgeek.org
23 Signed-off-by: Ingo Molnar <mingo@elte.hu>
26 drivers/pci/dmar.c | 11 +++++++++--
27 drivers/pci/intel-iommu.c | 23 +++++++----------------
28 drivers/pci/intel-iommu.h | 4 ++--
29 3 files changed, 18 insertions(+), 20 deletions(-)
31 --- a/drivers/pci/dmar.c
32 +++ b/drivers/pci/dmar.c
33 @@ -375,11 +375,18 @@ int __init early_dmar_detect(void)
34 return (ACPI_SUCCESS(status) ? 1 : 0);
37 -struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
38 - struct dmar_drhd_unit *drhd)
39 +struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
41 + struct intel_iommu *iommu;
44 + static int iommu_allocated = 0;
46 + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
50 + iommu->seq_id = iommu_allocated++;
52 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
54 --- a/drivers/pci/intel-iommu.c
55 +++ b/drivers/pci/intel-iommu.c
56 @@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigne
58 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
60 -static struct intel_iommu *g_iommus;
62 #define HIGH_WATER_MARK 250
63 struct deferred_flush_tables {
65 @@ -1651,8 +1649,6 @@ int __init init_dmars(void)
68 for_each_drhd_unit(drhd) {
73 * lock not needed as this is only incremented in the single
74 @@ -1661,12 +1657,6 @@ int __init init_dmars(void)
78 - g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
84 deferred_flush = kzalloc(g_num_of_iommus *
85 sizeof(struct deferred_flush_tables), GFP_KERNEL);
86 if (!deferred_flush) {
87 @@ -1674,12 +1664,10 @@ int __init init_dmars(void)
92 for_each_drhd_unit(drhd) {
95 - iommu = alloc_iommu(&g_iommus[i], drhd);
97 + iommu = alloc_iommu(drhd);
101 @@ -1771,7 +1759,6 @@ error:
109 @@ -1928,7 +1915,10 @@ static void flush_unmaps(void)
110 /* just flush them all */
111 for (i = 0; i < g_num_of_iommus; i++) {
112 if (deferred_flush[i].next) {
113 - iommu_flush_iotlb_global(&g_iommus[i], 0);
114 + struct intel_iommu *iommu =
115 + deferred_flush[i].domain[0]->iommu;
117 + iommu_flush_iotlb_global(iommu, 0);
118 for (j = 0; j < deferred_flush[i].next; j++) {
119 __free_iova(&deferred_flush[i].domain[j]->iovad,
120 deferred_flush[i].iova[j]);
121 @@ -1958,7 +1948,8 @@ static void add_unmap(struct dmar_domain
122 if (list_size == HIGH_WATER_MARK)
125 - iommu_id = dom->iommu - g_iommus;
126 + iommu_id = dom->iommu->seq_id;
128 next = deferred_flush[iommu_id].next;
129 deferred_flush[iommu_id].domain[next] = dom;
130 deferred_flush[iommu_id].iova[next] = iova;
131 --- a/drivers/pci/intel-iommu.h
132 +++ b/drivers/pci/intel-iommu.h
133 @@ -182,6 +182,7 @@ struct intel_iommu {
135 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
136 spinlock_t register_lock; /* protect register handling */
137 + int seq_id; /* sequence id of the iommu */
140 unsigned long *domain_ids; /* bitmap of domains */
141 @@ -198,8 +199,7 @@ struct intel_iommu {
143 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
145 -extern struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
146 - struct dmar_drhd_unit *drhd);
147 +extern struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd);
148 extern void free_iommu(struct intel_iommu *iommu);