]>
Commit | Line | Data |
---|---|---|
45ae7cff WD |
1 | /* |
2 | * IOMMU API for ARM architected SMMU implementations. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
16 | * | |
17 | * Copyright (C) 2013 ARM Limited | |
18 | * | |
19 | * Author: Will Deacon <will.deacon@arm.com> | |
20 | * | |
21 | * This driver currently supports: | |
22 | * - SMMUv1 and v2 implementations | |
23 | * - Stream-matching and stream-indexing | |
24 | * - v7/v8 long-descriptor format | |
25 | * - Non-secure access to the SMMU | |
45ae7cff WD |
26 | * - Context fault reporting |
27 | */ | |
28 | ||
29 | #define pr_fmt(fmt) "arm-smmu: " fmt | |
30 | ||
31 | #include <linux/delay.h> | |
32 | #include <linux/dma-mapping.h> | |
33 | #include <linux/err.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/io.h> | |
36 | #include <linux/iommu.h> | |
859a732e | 37 | #include <linux/iopoll.h> |
45ae7cff WD |
38 | #include <linux/module.h> |
39 | #include <linux/of.h> | |
bae2c2d4 | 40 | #include <linux/of_address.h> |
a9a1b0b5 | 41 | #include <linux/pci.h> |
45ae7cff WD |
42 | #include <linux/platform_device.h> |
43 | #include <linux/slab.h> | |
44 | #include <linux/spinlock.h> | |
45 | ||
46 | #include <linux/amba/bus.h> | |
47 | ||
518f7136 | 48 | #include "io-pgtable.h" |
45ae7cff WD |
49 | |
50 | /* Maximum number of stream IDs assigned to a single device */ | |
636e97b0 | 51 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
45ae7cff WD |
52 | |
53 | /* Maximum number of context banks per SMMU */ | |
54 | #define ARM_SMMU_MAX_CBS 128 | |
55 | ||
56 | /* Maximum number of mapping groups per SMMU */ | |
57 | #define ARM_SMMU_MAX_SMRS 128 | |
58 | ||
45ae7cff WD |
59 | /* SMMU global address space */ |
60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | |
c757e852 | 61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) |
45ae7cff | 62 | |
3a5df8ff AH |
63 | /* |
64 | * SMMU global address space with conditional offset to access secure | |
65 | * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, | |
66 | * nsGFSYNR0: 0x450) | |
67 | */ | |
68 | #define ARM_SMMU_GR0_NS(smmu) \ | |
69 | ((smmu)->base + \ | |
70 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | |
71 | ? 0x400 : 0)) | |
72 | ||
668b4ada TC |
73 | #ifdef CONFIG_64BIT |
74 | #define smmu_writeq writeq_relaxed | |
75 | #else | |
76 | #define smmu_writeq(reg64, addr) \ | |
77 | do { \ | |
78 | u64 __val = (reg64); \ | |
79 | void __iomem *__addr = (addr); \ | |
80 | writel_relaxed(__val >> 32, __addr + 4); \ | |
81 | writel_relaxed(__val, __addr); \ | |
82 | } while (0) | |
83 | #endif | |
84 | ||
45ae7cff WD |
85 | /* Configuration registers */ |
86 | #define ARM_SMMU_GR0_sCR0 0x0 | |
87 | #define sCR0_CLIENTPD (1 << 0) | |
88 | #define sCR0_GFRE (1 << 1) | |
89 | #define sCR0_GFIE (1 << 2) | |
90 | #define sCR0_GCFGFRE (1 << 4) | |
91 | #define sCR0_GCFGFIE (1 << 5) | |
92 | #define sCR0_USFCFG (1 << 10) | |
93 | #define sCR0_VMIDPNE (1 << 11) | |
94 | #define sCR0_PTM (1 << 12) | |
95 | #define sCR0_FB (1 << 13) | |
96 | #define sCR0_BSU_SHIFT 14 | |
97 | #define sCR0_BSU_MASK 0x3 | |
98 | ||
99 | /* Identification registers */ | |
100 | #define ARM_SMMU_GR0_ID0 0x20 | |
101 | #define ARM_SMMU_GR0_ID1 0x24 | |
102 | #define ARM_SMMU_GR0_ID2 0x28 | |
103 | #define ARM_SMMU_GR0_ID3 0x2c | |
104 | #define ARM_SMMU_GR0_ID4 0x30 | |
105 | #define ARM_SMMU_GR0_ID5 0x34 | |
106 | #define ARM_SMMU_GR0_ID6 0x38 | |
107 | #define ARM_SMMU_GR0_ID7 0x3c | |
108 | #define ARM_SMMU_GR0_sGFSR 0x48 | |
109 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | |
110 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | |
111 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | |
45ae7cff WD |
112 | |
113 | #define ID0_S1TS (1 << 30) | |
114 | #define ID0_S2TS (1 << 29) | |
115 | #define ID0_NTS (1 << 28) | |
116 | #define ID0_SMS (1 << 27) | |
859a732e | 117 | #define ID0_ATOSNS (1 << 26) |
45ae7cff WD |
118 | #define ID0_CTTW (1 << 14) |
119 | #define ID0_NUMIRPT_SHIFT 16 | |
120 | #define ID0_NUMIRPT_MASK 0xff | |
3c8766d0 OH |
121 | #define ID0_NUMSIDB_SHIFT 9 |
122 | #define ID0_NUMSIDB_MASK 0xf | |
45ae7cff WD |
123 | #define ID0_NUMSMRG_SHIFT 0 |
124 | #define ID0_NUMSMRG_MASK 0xff | |
125 | ||
126 | #define ID1_PAGESIZE (1 << 31) | |
127 | #define ID1_NUMPAGENDXB_SHIFT 28 | |
128 | #define ID1_NUMPAGENDXB_MASK 7 | |
129 | #define ID1_NUMS2CB_SHIFT 16 | |
130 | #define ID1_NUMS2CB_MASK 0xff | |
131 | #define ID1_NUMCB_SHIFT 0 | |
132 | #define ID1_NUMCB_MASK 0xff | |
133 | ||
134 | #define ID2_OAS_SHIFT 4 | |
135 | #define ID2_OAS_MASK 0xf | |
136 | #define ID2_IAS_SHIFT 0 | |
137 | #define ID2_IAS_MASK 0xf | |
138 | #define ID2_UBS_SHIFT 8 | |
139 | #define ID2_UBS_MASK 0xf | |
140 | #define ID2_PTFS_4K (1 << 12) | |
141 | #define ID2_PTFS_16K (1 << 13) | |
142 | #define ID2_PTFS_64K (1 << 14) | |
143 | ||
45ae7cff | 144 | /* Global TLB invalidation */ |
45ae7cff WD |
145 | #define ARM_SMMU_GR0_TLBIVMID 0x64 |
146 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | |
147 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | |
148 | #define ARM_SMMU_GR0_sTLBGSYNC 0x70 | |
149 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | |
150 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | |
151 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | |
152 | ||
153 | /* Stream mapping registers */ | |
154 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | |
155 | #define SMR_VALID (1 << 31) | |
156 | #define SMR_MASK_SHIFT 16 | |
157 | #define SMR_MASK_MASK 0x7fff | |
158 | #define SMR_ID_SHIFT 0 | |
159 | #define SMR_ID_MASK 0x7fff | |
160 | ||
161 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | |
162 | #define S2CR_CBNDX_SHIFT 0 | |
163 | #define S2CR_CBNDX_MASK 0xff | |
164 | #define S2CR_TYPE_SHIFT 16 | |
165 | #define S2CR_TYPE_MASK 0x3 | |
166 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | |
167 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | |
168 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | |
169 | ||
d346180e RM |
170 | #define S2CR_PRIVCFG_SHIFT 24 |
171 | #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT) | |
172 | ||
45ae7cff WD |
173 | /* Context bank attribute registers */ |
174 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | |
175 | #define CBAR_VMID_SHIFT 0 | |
176 | #define CBAR_VMID_MASK 0xff | |
57ca90f6 WD |
177 | #define CBAR_S1_BPSHCFG_SHIFT 8 |
178 | #define CBAR_S1_BPSHCFG_MASK 3 | |
179 | #define CBAR_S1_BPSHCFG_NSH 3 | |
45ae7cff WD |
180 | #define CBAR_S1_MEMATTR_SHIFT 12 |
181 | #define CBAR_S1_MEMATTR_MASK 0xf | |
182 | #define CBAR_S1_MEMATTR_WB 0xf | |
183 | #define CBAR_TYPE_SHIFT 16 | |
184 | #define CBAR_TYPE_MASK 0x3 | |
185 | #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) | |
186 | #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) | |
187 | #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) | |
188 | #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) | |
189 | #define CBAR_IRPTNDX_SHIFT 24 | |
190 | #define CBAR_IRPTNDX_MASK 0xff | |
191 | ||
192 | #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) | |
193 | #define CBA2R_RW64_32BIT (0 << 0) | |
194 | #define CBA2R_RW64_64BIT (1 << 0) | |
195 | ||
196 | /* Translation context bank */ | |
197 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | |
c757e852 | 198 | #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) |
45ae7cff WD |
199 | |
200 | #define ARM_SMMU_CB_SCTLR 0x0 | |
201 | #define ARM_SMMU_CB_RESUME 0x8 | |
202 | #define ARM_SMMU_CB_TTBCR2 0x10 | |
668b4ada TC |
203 | #define ARM_SMMU_CB_TTBR0 0x20 |
204 | #define ARM_SMMU_CB_TTBR1 0x28 | |
45ae7cff WD |
205 | #define ARM_SMMU_CB_TTBCR 0x30 |
206 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | |
518f7136 | 207 | #define ARM_SMMU_CB_S1_MAIR1 0x3c |
859a732e MH |
208 | #define ARM_SMMU_CB_PAR_LO 0x50 |
209 | #define ARM_SMMU_CB_PAR_HI 0x54 | |
45ae7cff WD |
210 | #define ARM_SMMU_CB_FSR 0x58 |
211 | #define ARM_SMMU_CB_FAR_LO 0x60 | |
212 | #define ARM_SMMU_CB_FAR_HI 0x64 | |
213 | #define ARM_SMMU_CB_FSYNR0 0x68 | |
518f7136 | 214 | #define ARM_SMMU_CB_S1_TLBIVA 0x600 |
1463fe44 | 215 | #define ARM_SMMU_CB_S1_TLBIASID 0x610 |
518f7136 WD |
216 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 |
217 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 | |
218 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 | |
661d962f | 219 | #define ARM_SMMU_CB_ATS1PR 0x800 |
859a732e | 220 | #define ARM_SMMU_CB_ATSR 0x8f0 |
45ae7cff WD |
221 | |
222 | #define SCTLR_S1_ASIDPNE (1 << 12) | |
223 | #define SCTLR_CFCFG (1 << 7) | |
224 | #define SCTLR_CFIE (1 << 6) | |
225 | #define SCTLR_CFRE (1 << 5) | |
226 | #define SCTLR_E (1 << 4) | |
227 | #define SCTLR_AFE (1 << 2) | |
228 | #define SCTLR_TRE (1 << 1) | |
229 | #define SCTLR_M (1 << 0) | |
230 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | |
231 | ||
859a732e MH |
232 | #define CB_PAR_F (1 << 0) |
233 | ||
234 | #define ATSR_ACTIVE (1 << 0) | |
235 | ||
45ae7cff WD |
236 | #define RESUME_RETRY (0 << 0) |
237 | #define RESUME_TERMINATE (1 << 0) | |
238 | ||
45ae7cff | 239 | #define TTBCR2_SEP_SHIFT 15 |
5dc5616e | 240 | #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) |
45ae7cff | 241 | |
668b4ada | 242 | #define TTBRn_ASID_SHIFT 48 |
45ae7cff WD |
243 | |
244 | #define FSR_MULTI (1 << 31) | |
245 | #define FSR_SS (1 << 30) | |
246 | #define FSR_UUT (1 << 8) | |
247 | #define FSR_ASF (1 << 7) | |
248 | #define FSR_TLBLKF (1 << 6) | |
249 | #define FSR_TLBMCF (1 << 5) | |
250 | #define FSR_EF (1 << 4) | |
251 | #define FSR_PF (1 << 3) | |
252 | #define FSR_AFF (1 << 2) | |
253 | #define FSR_TF (1 << 1) | |
254 | ||
2907320d MH |
255 | #define FSR_IGN (FSR_AFF | FSR_ASF | \ |
256 | FSR_TLBMCF | FSR_TLBLKF) | |
257 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | |
adaba320 | 258 | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) |
45ae7cff WD |
259 | |
260 | #define FSYNR0_WNR (1 << 4) | |
261 | ||
4cf740b0 | 262 | static int force_stage; |
e3ce0c94 | 263 | module_param_named(force_stage, force_stage, int, S_IRUGO); |
4cf740b0 WD |
264 | MODULE_PARM_DESC(force_stage, |
265 | "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); | |
266 | ||
09360403 RM |
267 | enum arm_smmu_arch_version { |
268 | ARM_SMMU_V1 = 1, | |
269 | ARM_SMMU_V2, | |
270 | }; | |
271 | ||
45ae7cff WD |
272 | struct arm_smmu_smr { |
273 | u8 idx; | |
274 | u16 mask; | |
275 | u16 id; | |
276 | }; | |
277 | ||
a9a1b0b5 | 278 | struct arm_smmu_master_cfg { |
45ae7cff WD |
279 | int num_streamids; |
280 | u16 streamids[MAX_MASTER_STREAMIDS]; | |
45ae7cff WD |
281 | struct arm_smmu_smr *smrs; |
282 | }; | |
283 | ||
a9a1b0b5 WD |
284 | struct arm_smmu_master { |
285 | struct device_node *of_node; | |
a9a1b0b5 WD |
286 | struct rb_node node; |
287 | struct arm_smmu_master_cfg cfg; | |
288 | }; | |
289 | ||
45ae7cff WD |
290 | struct arm_smmu_device { |
291 | struct device *dev; | |
45ae7cff WD |
292 | |
293 | void __iomem *base; | |
294 | unsigned long size; | |
c757e852 | 295 | unsigned long pgshift; |
45ae7cff WD |
296 | |
297 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | |
298 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | |
299 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | |
300 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | |
301 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | |
859a732e | 302 | #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) |
45ae7cff | 303 | u32 features; |
3a5df8ff AH |
304 | |
305 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | |
306 | u32 options; | |
09360403 | 307 | enum arm_smmu_arch_version version; |
45ae7cff WD |
308 | |
309 | u32 num_context_banks; | |
310 | u32 num_s2_context_banks; | |
311 | DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); | |
312 | atomic_t irptndx; | |
313 | ||
314 | u32 num_mapping_groups; | |
315 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | |
316 | ||
518f7136 WD |
317 | unsigned long va_size; |
318 | unsigned long ipa_size; | |
319 | unsigned long pa_size; | |
45ae7cff WD |
320 | |
321 | u32 num_global_irqs; | |
322 | u32 num_context_irqs; | |
323 | unsigned int *irqs; | |
324 | ||
45ae7cff WD |
325 | struct list_head list; |
326 | struct rb_root masters; | |
327 | }; | |
328 | ||
329 | struct arm_smmu_cfg { | |
45ae7cff WD |
330 | u8 cbndx; |
331 | u8 irptndx; | |
332 | u32 cbar; | |
45ae7cff | 333 | }; |
faea13b7 | 334 | #define INVALID_IRPTNDX 0xff |
45ae7cff | 335 | |
ecfadb6e WD |
336 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) |
337 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | |
338 | ||
c752ce45 WD |
339 | enum arm_smmu_domain_stage { |
340 | ARM_SMMU_DOMAIN_S1 = 0, | |
341 | ARM_SMMU_DOMAIN_S2, | |
342 | ARM_SMMU_DOMAIN_NESTED, | |
343 | }; | |
344 | ||
45ae7cff | 345 | struct arm_smmu_domain { |
44680eed | 346 | struct arm_smmu_device *smmu; |
518f7136 WD |
347 | struct io_pgtable_ops *pgtbl_ops; |
348 | spinlock_t pgtbl_lock; | |
44680eed | 349 | struct arm_smmu_cfg cfg; |
c752ce45 | 350 | enum arm_smmu_domain_stage stage; |
518f7136 | 351 | struct mutex init_mutex; /* Protects smmu pointer */ |
1d672638 | 352 | struct iommu_domain domain; |
45ae7cff WD |
353 | }; |
354 | ||
518f7136 WD |
355 | static struct iommu_ops arm_smmu_ops; |
356 | ||
45ae7cff WD |
357 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
358 | static LIST_HEAD(arm_smmu_devices); | |
359 | ||
3a5df8ff AH |
360 | struct arm_smmu_option_prop { |
361 | u32 opt; | |
362 | const char *prop; | |
363 | }; | |
364 | ||
2907320d | 365 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
3a5df8ff AH |
366 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, |
367 | { 0, NULL}, | |
368 | }; | |
369 | ||
1d672638 JR |
370 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) |
371 | { | |
372 | return container_of(dom, struct arm_smmu_domain, domain); | |
373 | } | |
374 | ||
3a5df8ff AH |
375 | static void parse_driver_options(struct arm_smmu_device *smmu) |
376 | { | |
377 | int i = 0; | |
2907320d | 378 | |
3a5df8ff AH |
379 | do { |
380 | if (of_property_read_bool(smmu->dev->of_node, | |
381 | arm_smmu_options[i].prop)) { | |
382 | smmu->options |= arm_smmu_options[i].opt; | |
383 | dev_notice(smmu->dev, "option %s\n", | |
384 | arm_smmu_options[i].prop); | |
385 | } | |
386 | } while (arm_smmu_options[++i].opt); | |
387 | } | |
388 | ||
8f68f8e2 | 389 | static struct device_node *dev_get_dev_node(struct device *dev) |
a9a1b0b5 WD |
390 | { |
391 | if (dev_is_pci(dev)) { | |
392 | struct pci_bus *bus = to_pci_dev(dev)->bus; | |
2907320d | 393 | |
a9a1b0b5 WD |
394 | while (!pci_is_root_bus(bus)) |
395 | bus = bus->parent; | |
8f68f8e2 | 396 | return bus->bridge->parent->of_node; |
a9a1b0b5 WD |
397 | } |
398 | ||
8f68f8e2 | 399 | return dev->of_node; |
a9a1b0b5 WD |
400 | } |
401 | ||
45ae7cff WD |
402 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
403 | struct device_node *dev_node) | |
404 | { | |
405 | struct rb_node *node = smmu->masters.rb_node; | |
406 | ||
407 | while (node) { | |
408 | struct arm_smmu_master *master; | |
2907320d | 409 | |
45ae7cff WD |
410 | master = container_of(node, struct arm_smmu_master, node); |
411 | ||
412 | if (dev_node < master->of_node) | |
413 | node = node->rb_left; | |
414 | else if (dev_node > master->of_node) | |
415 | node = node->rb_right; | |
416 | else | |
417 | return master; | |
418 | } | |
419 | ||
420 | return NULL; | |
421 | } | |
422 | ||
a9a1b0b5 | 423 | static struct arm_smmu_master_cfg * |
8f68f8e2 | 424 | find_smmu_master_cfg(struct device *dev) |
a9a1b0b5 | 425 | { |
8f68f8e2 WD |
426 | struct arm_smmu_master_cfg *cfg = NULL; |
427 | struct iommu_group *group = iommu_group_get(dev); | |
a9a1b0b5 | 428 | |
8f68f8e2 WD |
429 | if (group) { |
430 | cfg = iommu_group_get_iommudata(group); | |
431 | iommu_group_put(group); | |
432 | } | |
a9a1b0b5 | 433 | |
8f68f8e2 | 434 | return cfg; |
a9a1b0b5 WD |
435 | } |
436 | ||
45ae7cff WD |
437 | static int insert_smmu_master(struct arm_smmu_device *smmu, |
438 | struct arm_smmu_master *master) | |
439 | { | |
440 | struct rb_node **new, *parent; | |
441 | ||
442 | new = &smmu->masters.rb_node; | |
443 | parent = NULL; | |
444 | while (*new) { | |
2907320d MH |
445 | struct arm_smmu_master *this |
446 | = container_of(*new, struct arm_smmu_master, node); | |
45ae7cff WD |
447 | |
448 | parent = *new; | |
449 | if (master->of_node < this->of_node) | |
450 | new = &((*new)->rb_left); | |
451 | else if (master->of_node > this->of_node) | |
452 | new = &((*new)->rb_right); | |
453 | else | |
454 | return -EEXIST; | |
455 | } | |
456 | ||
457 | rb_link_node(&master->node, parent, new); | |
458 | rb_insert_color(&master->node, &smmu->masters); | |
459 | return 0; | |
460 | } | |
461 | ||
462 | static int register_smmu_master(struct arm_smmu_device *smmu, | |
463 | struct device *dev, | |
464 | struct of_phandle_args *masterspec) | |
465 | { | |
466 | int i; | |
467 | struct arm_smmu_master *master; | |
468 | ||
469 | master = find_smmu_master(smmu, masterspec->np); | |
470 | if (master) { | |
471 | dev_err(dev, | |
472 | "rejecting multiple registrations for master device %s\n", | |
473 | masterspec->np->name); | |
474 | return -EBUSY; | |
475 | } | |
476 | ||
477 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | |
478 | dev_err(dev, | |
479 | "reached maximum number (%d) of stream IDs for master device %s\n", | |
480 | MAX_MASTER_STREAMIDS, masterspec->np->name); | |
481 | return -ENOSPC; | |
482 | } | |
483 | ||
484 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | |
485 | if (!master) | |
486 | return -ENOMEM; | |
487 | ||
a9a1b0b5 WD |
488 | master->of_node = masterspec->np; |
489 | master->cfg.num_streamids = masterspec->args_count; | |
45ae7cff | 490 | |
3c8766d0 OH |
491 | for (i = 0; i < master->cfg.num_streamids; ++i) { |
492 | u16 streamid = masterspec->args[i]; | |
45ae7cff | 493 | |
3c8766d0 OH |
494 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && |
495 | (streamid >= smmu->num_mapping_groups)) { | |
496 | dev_err(dev, | |
497 | "stream ID for master device %s greater than maximum allowed (%d)\n", | |
498 | masterspec->np->name, smmu->num_mapping_groups); | |
499 | return -ERANGE; | |
500 | } | |
501 | master->cfg.streamids[i] = streamid; | |
502 | } | |
45ae7cff WD |
503 | return insert_smmu_master(smmu, master); |
504 | } | |
505 | ||
44680eed | 506 | static struct arm_smmu_device *find_smmu_for_device(struct device *dev) |
45ae7cff | 507 | { |
44680eed | 508 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 509 | struct arm_smmu_master *master = NULL; |
8f68f8e2 | 510 | struct device_node *dev_node = dev_get_dev_node(dev); |
45ae7cff WD |
511 | |
512 | spin_lock(&arm_smmu_devices_lock); | |
44680eed | 513 | list_for_each_entry(smmu, &arm_smmu_devices, list) { |
a9a1b0b5 WD |
514 | master = find_smmu_master(smmu, dev_node); |
515 | if (master) | |
516 | break; | |
517 | } | |
45ae7cff | 518 | spin_unlock(&arm_smmu_devices_lock); |
44680eed | 519 | |
a9a1b0b5 | 520 | return master ? smmu : NULL; |
45ae7cff WD |
521 | } |
522 | ||
523 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | |
524 | { | |
525 | int idx; | |
526 | ||
527 | do { | |
528 | idx = find_next_zero_bit(map, end, start); | |
529 | if (idx == end) | |
530 | return -ENOSPC; | |
531 | } while (test_and_set_bit(idx, map)); | |
532 | ||
533 | return idx; | |
534 | } | |
535 | ||
536 | static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | |
537 | { | |
538 | clear_bit(idx, map); | |
539 | } | |
540 | ||
541 | /* Wait for any pending TLB invalidations to complete */ | |
518f7136 | 542 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) |
45ae7cff WD |
543 | { |
544 | int count = 0; | |
545 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
546 | ||
547 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | |
548 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | |
549 | & sTLBGSTATUS_GSACTIVE) { | |
550 | cpu_relax(); | |
551 | if (++count == TLB_LOOP_TIMEOUT) { | |
552 | dev_err_ratelimited(smmu->dev, | |
553 | "TLB sync timed out -- SMMU may be deadlocked\n"); | |
554 | return; | |
555 | } | |
556 | udelay(1); | |
557 | } | |
558 | } | |
559 | ||
518f7136 WD |
560 | static void arm_smmu_tlb_sync(void *cookie) |
561 | { | |
562 | struct arm_smmu_domain *smmu_domain = cookie; | |
563 | __arm_smmu_tlb_sync(smmu_domain->smmu); | |
564 | } | |
565 | ||
566 | static void arm_smmu_tlb_inv_context(void *cookie) | |
1463fe44 | 567 | { |
518f7136 | 568 | struct arm_smmu_domain *smmu_domain = cookie; |
44680eed WD |
569 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
570 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
1463fe44 | 571 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
518f7136 | 572 | void __iomem *base; |
1463fe44 WD |
573 | |
574 | if (stage1) { | |
575 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
ecfadb6e WD |
576 | writel_relaxed(ARM_SMMU_CB_ASID(cfg), |
577 | base + ARM_SMMU_CB_S1_TLBIASID); | |
1463fe44 WD |
578 | } else { |
579 | base = ARM_SMMU_GR0(smmu); | |
ecfadb6e WD |
580 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), |
581 | base + ARM_SMMU_GR0_TLBIVMID); | |
1463fe44 WD |
582 | } |
583 | ||
518f7136 WD |
584 | __arm_smmu_tlb_sync(smmu); |
585 | } | |
586 | ||
587 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
06c610e8 | 588 | size_t granule, bool leaf, void *cookie) |
518f7136 WD |
589 | { |
590 | struct arm_smmu_domain *smmu_domain = cookie; | |
591 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
592 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
593 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | |
594 | void __iomem *reg; | |
595 | ||
596 | if (stage1) { | |
597 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
598 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
599 | ||
600 | if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { | |
601 | iova &= ~12UL; | |
602 | iova |= ARM_SMMU_CB_ASID(cfg); | |
75df1386 RM |
603 | do { |
604 | writel_relaxed(iova, reg); | |
605 | iova += granule; | |
606 | } while (size -= granule); | |
518f7136 WD |
607 | #ifdef CONFIG_64BIT |
608 | } else { | |
609 | iova >>= 12; | |
610 | iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; | |
75df1386 RM |
611 | do { |
612 | writeq_relaxed(iova, reg); | |
613 | iova += granule >> 12; | |
614 | } while (size -= granule); | |
518f7136 WD |
615 | #endif |
616 | } | |
617 | #ifdef CONFIG_64BIT | |
618 | } else if (smmu->version == ARM_SMMU_V2) { | |
619 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
620 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : | |
621 | ARM_SMMU_CB_S2_TLBIIPAS2; | |
75df1386 RM |
622 | iova >>= 12; |
623 | do { | |
624 | writeq_relaxed(iova, reg); | |
625 | iova += granule >> 12; | |
626 | } while (size -= granule); | |
518f7136 WD |
627 | #endif |
628 | } else { | |
629 | reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; | |
630 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); | |
631 | } | |
632 | } | |
633 | ||
518f7136 WD |
634 | static struct iommu_gather_ops arm_smmu_gather_ops = { |
635 | .tlb_flush_all = arm_smmu_tlb_inv_context, | |
636 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | |
637 | .tlb_sync = arm_smmu_tlb_sync, | |
518f7136 WD |
638 | }; |
639 | ||
45ae7cff WD |
640 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
641 | { | |
642 | int flags, ret; | |
643 | u32 fsr, far, fsynr, resume; | |
644 | unsigned long iova; | |
645 | struct iommu_domain *domain = dev; | |
1d672638 | 646 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed WD |
647 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
648 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
45ae7cff WD |
649 | void __iomem *cb_base; |
650 | ||
44680eed | 651 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
45ae7cff WD |
652 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); |
653 | ||
654 | if (!(fsr & FSR_FAULT)) | |
655 | return IRQ_NONE; | |
656 | ||
657 | if (fsr & FSR_IGN) | |
658 | dev_err_ratelimited(smmu->dev, | |
70c9a7db | 659 | "Unexpected context fault (fsr 0x%x)\n", |
45ae7cff WD |
660 | fsr); |
661 | ||
662 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | |
663 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
664 | ||
665 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); | |
666 | iova = far; | |
667 | #ifdef CONFIG_64BIT | |
668 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); | |
669 | iova |= ((unsigned long)far << 32); | |
670 | #endif | |
671 | ||
672 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | |
673 | ret = IRQ_HANDLED; | |
674 | resume = RESUME_RETRY; | |
675 | } else { | |
2ef0f031 AH |
676 | dev_err_ratelimited(smmu->dev, |
677 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | |
44680eed | 678 | iova, fsynr, cfg->cbndx); |
45ae7cff WD |
679 | ret = IRQ_NONE; |
680 | resume = RESUME_TERMINATE; | |
681 | } | |
682 | ||
683 | /* Clear the faulting FSR */ | |
684 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | |
685 | ||
686 | /* Retry or terminate any stalled transactions */ | |
687 | if (fsr & FSR_SS) | |
688 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | |
689 | ||
690 | return ret; | |
691 | } | |
692 | ||
693 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |
694 | { | |
695 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | |
696 | struct arm_smmu_device *smmu = dev; | |
3a5df8ff | 697 | void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); |
45ae7cff WD |
698 | |
699 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | |
700 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | |
701 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | |
702 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | |
703 | ||
3a5df8ff AH |
704 | if (!gfsr) |
705 | return IRQ_NONE; | |
706 | ||
45ae7cff WD |
707 | dev_err_ratelimited(smmu->dev, |
708 | "Unexpected global fault, this could be serious\n"); | |
709 | dev_err_ratelimited(smmu->dev, | |
710 | "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", | |
711 | gfsr, gfsynr0, gfsynr1, gfsynr2); | |
712 | ||
713 | writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); | |
adaba320 | 714 | return IRQ_HANDLED; |
45ae7cff WD |
715 | } |
716 | ||
518f7136 WD |
717 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
718 | struct io_pgtable_cfg *pgtbl_cfg) | |
45ae7cff WD |
719 | { |
720 | u32 reg; | |
668b4ada | 721 | u64 reg64; |
45ae7cff | 722 | bool stage1; |
44680eed WD |
723 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
724 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
c88ae5de | 725 | void __iomem *cb_base, *gr1_base; |
45ae7cff | 726 | |
45ae7cff | 727 | gr1_base = ARM_SMMU_GR1(smmu); |
44680eed WD |
728 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
729 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
45ae7cff | 730 | |
4a1c93cb WD |
731 | if (smmu->version > ARM_SMMU_V1) { |
732 | /* | |
733 | * CBA2R. | |
734 | * *Must* be initialised before CBAR thanks to VMID16 | |
735 | * architectural oversight affected some implementations. | |
736 | */ | |
737 | #ifdef CONFIG_64BIT | |
738 | reg = CBA2R_RW64_64BIT; | |
739 | #else | |
740 | reg = CBA2R_RW64_32BIT; | |
741 | #endif | |
742 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | |
743 | } | |
744 | ||
45ae7cff | 745 | /* CBAR */ |
44680eed | 746 | reg = cfg->cbar; |
09360403 | 747 | if (smmu->version == ARM_SMMU_V1) |
2907320d | 748 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; |
45ae7cff | 749 | |
57ca90f6 WD |
750 | /* |
751 | * Use the weakest shareability/memory types, so they are | |
752 | * overridden by the ttbcr/pte. | |
753 | */ | |
754 | if (stage1) { | |
755 | reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | | |
756 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | |
757 | } else { | |
44680eed | 758 | reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; |
57ca90f6 | 759 | } |
44680eed | 760 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
45ae7cff | 761 | |
518f7136 WD |
762 | /* TTBRs */ |
763 | if (stage1) { | |
668b4ada TC |
764 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; |
765 | ||
766 | reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT; | |
767 | smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0); | |
768 | ||
769 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; | |
770 | reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT; | |
771 | smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1); | |
518f7136 | 772 | } else { |
668b4ada TC |
773 | reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; |
774 | smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0); | |
518f7136 | 775 | } |
a65217a4 | 776 | |
518f7136 WD |
777 | /* TTBCR */ |
778 | if (stage1) { | |
779 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | |
780 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
781 | if (smmu->version > ARM_SMMU_V1) { | |
782 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | |
5dc5616e | 783 | reg |= TTBCR2_SEP_UPSTREAM; |
518f7136 | 784 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
45ae7cff WD |
785 | } |
786 | } else { | |
518f7136 WD |
787 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
788 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
45ae7cff WD |
789 | } |
790 | ||
518f7136 | 791 | /* MAIRs (stage-1 only) */ |
45ae7cff | 792 | if (stage1) { |
518f7136 | 793 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; |
45ae7cff | 794 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); |
518f7136 WD |
795 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; |
796 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); | |
45ae7cff WD |
797 | } |
798 | ||
45ae7cff WD |
799 | /* SCTLR */ |
800 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | |
801 | if (stage1) | |
802 | reg |= SCTLR_S1_ASIDPNE; | |
803 | #ifdef __BIG_ENDIAN | |
804 | reg |= SCTLR_E; | |
805 | #endif | |
25724841 | 806 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); |
45ae7cff WD |
807 | } |
808 | ||
809 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |
44680eed | 810 | struct arm_smmu_device *smmu) |
45ae7cff | 811 | { |
a18037b2 | 812 | int irq, start, ret = 0; |
518f7136 WD |
813 | unsigned long ias, oas; |
814 | struct io_pgtable_ops *pgtbl_ops; | |
815 | struct io_pgtable_cfg pgtbl_cfg; | |
816 | enum io_pgtable_fmt fmt; | |
1d672638 | 817 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed | 818 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
45ae7cff | 819 | |
518f7136 | 820 | mutex_lock(&smmu_domain->init_mutex); |
a18037b2 MH |
821 | if (smmu_domain->smmu) |
822 | goto out_unlock; | |
823 | ||
c752ce45 WD |
824 | /* |
825 | * Mapping the requested stage onto what we support is surprisingly | |
826 | * complicated, mainly because the spec allows S1+S2 SMMUs without | |
827 | * support for nested translation. That means we end up with the | |
828 | * following table: | |
829 | * | |
830 | * Requested Supported Actual | |
831 | * S1 N S1 | |
832 | * S1 S1+S2 S1 | |
833 | * S1 S2 S2 | |
834 | * S1 S1 S1 | |
835 | * N N N | |
836 | * N S1+S2 S2 | |
837 | * N S2 S2 | |
838 | * N S1 S1 | |
839 | * | |
840 | * Note that you can't actually request stage-2 mappings. | |
841 | */ | |
842 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) | |
843 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; | |
844 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) | |
845 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
846 | ||
847 | switch (smmu_domain->stage) { | |
848 | case ARM_SMMU_DOMAIN_S1: | |
849 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | |
850 | start = smmu->num_s2_context_banks; | |
518f7136 WD |
851 | ias = smmu->va_size; |
852 | oas = smmu->ipa_size; | |
853 | if (IS_ENABLED(CONFIG_64BIT)) | |
854 | fmt = ARM_64_LPAE_S1; | |
855 | else | |
856 | fmt = ARM_32_LPAE_S1; | |
c752ce45 WD |
857 | break; |
858 | case ARM_SMMU_DOMAIN_NESTED: | |
45ae7cff WD |
859 | /* |
860 | * We will likely want to change this if/when KVM gets | |
861 | * involved. | |
862 | */ | |
c752ce45 | 863 | case ARM_SMMU_DOMAIN_S2: |
9c5c92e3 WD |
864 | cfg->cbar = CBAR_TYPE_S2_TRANS; |
865 | start = 0; | |
518f7136 WD |
866 | ias = smmu->ipa_size; |
867 | oas = smmu->pa_size; | |
868 | if (IS_ENABLED(CONFIG_64BIT)) | |
869 | fmt = ARM_64_LPAE_S2; | |
870 | else | |
871 | fmt = ARM_32_LPAE_S2; | |
c752ce45 WD |
872 | break; |
873 | default: | |
874 | ret = -EINVAL; | |
875 | goto out_unlock; | |
45ae7cff WD |
876 | } |
877 | ||
878 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | |
879 | smmu->num_context_banks); | |
880 | if (IS_ERR_VALUE(ret)) | |
a18037b2 | 881 | goto out_unlock; |
45ae7cff | 882 | |
44680eed | 883 | cfg->cbndx = ret; |
09360403 | 884 | if (smmu->version == ARM_SMMU_V1) { |
44680eed WD |
885 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); |
886 | cfg->irptndx %= smmu->num_context_irqs; | |
45ae7cff | 887 | } else { |
44680eed | 888 | cfg->irptndx = cfg->cbndx; |
45ae7cff WD |
889 | } |
890 | ||
518f7136 WD |
891 | pgtbl_cfg = (struct io_pgtable_cfg) { |
892 | .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, | |
893 | .ias = ias, | |
894 | .oas = oas, | |
895 | .tlb = &arm_smmu_gather_ops, | |
2df7a25c | 896 | .iommu_dev = smmu->dev, |
518f7136 WD |
897 | }; |
898 | ||
899 | smmu_domain->smmu = smmu; | |
900 | pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); | |
901 | if (!pgtbl_ops) { | |
902 | ret = -ENOMEM; | |
903 | goto out_clear_smmu; | |
904 | } | |
905 | ||
906 | /* Update our support page sizes to reflect the page table format */ | |
907 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
a18037b2 | 908 | |
518f7136 WD |
909 | /* Initialise the context bank with our page table cfg */ |
910 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); | |
911 | ||
912 | /* | |
913 | * Request context fault interrupt. Do this last to avoid the | |
914 | * handler seeing a half-initialised domain state. | |
915 | */ | |
44680eed | 916 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
45ae7cff WD |
917 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
918 | "arm-smmu-context-fault", domain); | |
919 | if (IS_ERR_VALUE(ret)) { | |
920 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | |
44680eed WD |
921 | cfg->irptndx, irq); |
922 | cfg->irptndx = INVALID_IRPTNDX; | |
45ae7cff WD |
923 | } |
924 | ||
518f7136 WD |
925 | mutex_unlock(&smmu_domain->init_mutex); |
926 | ||
927 | /* Publish page table ops for map/unmap */ | |
928 | smmu_domain->pgtbl_ops = pgtbl_ops; | |
a9a1b0b5 | 929 | return 0; |
45ae7cff | 930 | |
518f7136 WD |
931 | out_clear_smmu: |
932 | smmu_domain->smmu = NULL; | |
a18037b2 | 933 | out_unlock: |
518f7136 | 934 | mutex_unlock(&smmu_domain->init_mutex); |
45ae7cff WD |
935 | return ret; |
936 | } | |
937 | ||
938 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |
939 | { | |
1d672638 | 940 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed WD |
941 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
942 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1463fe44 | 943 | void __iomem *cb_base; |
45ae7cff WD |
944 | int irq; |
945 | ||
946 | if (!smmu) | |
947 | return; | |
948 | ||
518f7136 WD |
949 | /* |
950 | * Disable the context bank and free the page tables before freeing | |
951 | * it. | |
952 | */ | |
44680eed | 953 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
1463fe44 | 954 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1463fe44 | 955 | |
44680eed WD |
956 | if (cfg->irptndx != INVALID_IRPTNDX) { |
957 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | |
45ae7cff WD |
958 | free_irq(irq, domain); |
959 | } | |
960 | ||
44830b0c | 961 | free_io_pgtable_ops(smmu_domain->pgtbl_ops); |
44680eed | 962 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
45ae7cff WD |
963 | } |
964 | ||
1d672638 | 965 | static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) |
45ae7cff WD |
966 | { |
967 | struct arm_smmu_domain *smmu_domain; | |
45ae7cff | 968 | |
1d672638 JR |
969 | if (type != IOMMU_DOMAIN_UNMANAGED) |
970 | return NULL; | |
45ae7cff WD |
971 | /* |
972 | * Allocate the domain and initialise some of its data structures. | |
973 | * We can't really do anything meaningful until we've added a | |
974 | * master. | |
975 | */ | |
976 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | |
977 | if (!smmu_domain) | |
1d672638 | 978 | return NULL; |
45ae7cff | 979 | |
518f7136 WD |
980 | mutex_init(&smmu_domain->init_mutex); |
981 | spin_lock_init(&smmu_domain->pgtbl_lock); | |
1d672638 JR |
982 | |
983 | return &smmu_domain->domain; | |
45ae7cff WD |
984 | } |
985 | ||
1d672638 | 986 | static void arm_smmu_domain_free(struct iommu_domain *domain) |
45ae7cff | 987 | { |
1d672638 | 988 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1463fe44 WD |
989 | |
990 | /* | |
991 | * Free the domain resources. We assume that all devices have | |
992 | * already been detached. | |
993 | */ | |
45ae7cff | 994 | arm_smmu_destroy_domain_context(domain); |
45ae7cff WD |
995 | kfree(smmu_domain); |
996 | } | |
997 | ||
998 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 999 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1000 | { |
1001 | int i; | |
1002 | struct arm_smmu_smr *smrs; | |
1003 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1004 | ||
1005 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | |
1006 | return 0; | |
1007 | ||
a9a1b0b5 | 1008 | if (cfg->smrs) |
45ae7cff WD |
1009 | return -EEXIST; |
1010 | ||
2907320d | 1011 | smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); |
45ae7cff | 1012 | if (!smrs) { |
a9a1b0b5 WD |
1013 | dev_err(smmu->dev, "failed to allocate %d SMRs\n", |
1014 | cfg->num_streamids); | |
45ae7cff WD |
1015 | return -ENOMEM; |
1016 | } | |
1017 | ||
44680eed | 1018 | /* Allocate the SMRs on the SMMU */ |
a9a1b0b5 | 1019 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1020 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, |
1021 | smmu->num_mapping_groups); | |
1022 | if (IS_ERR_VALUE(idx)) { | |
1023 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | |
1024 | goto err_free_smrs; | |
1025 | } | |
1026 | ||
1027 | smrs[i] = (struct arm_smmu_smr) { | |
1028 | .idx = idx, | |
1029 | .mask = 0, /* We don't currently share SMRs */ | |
a9a1b0b5 | 1030 | .id = cfg->streamids[i], |
45ae7cff WD |
1031 | }; |
1032 | } | |
1033 | ||
1034 | /* It worked! Now, poke the actual hardware */ | |
a9a1b0b5 | 1035 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1036 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | |
1037 | smrs[i].mask << SMR_MASK_SHIFT; | |
1038 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | |
1039 | } | |
1040 | ||
a9a1b0b5 | 1041 | cfg->smrs = smrs; |
45ae7cff WD |
1042 | return 0; |
1043 | ||
1044 | err_free_smrs: | |
1045 | while (--i >= 0) | |
1046 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | |
1047 | kfree(smrs); | |
1048 | return -ENOSPC; | |
1049 | } | |
1050 | ||
1051 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 1052 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1053 | { |
1054 | int i; | |
1055 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
a9a1b0b5 | 1056 | struct arm_smmu_smr *smrs = cfg->smrs; |
45ae7cff | 1057 | |
43b412be WD |
1058 | if (!smrs) |
1059 | return; | |
1060 | ||
45ae7cff | 1061 | /* Invalidate the SMRs before freeing back to the allocator */ |
a9a1b0b5 | 1062 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1063 | u8 idx = smrs[i].idx; |
2907320d | 1064 | |
45ae7cff WD |
1065 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); |
1066 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | |
1067 | } | |
1068 | ||
a9a1b0b5 | 1069 | cfg->smrs = NULL; |
45ae7cff WD |
1070 | kfree(smrs); |
1071 | } | |
1072 | ||
45ae7cff | 1073 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
a9a1b0b5 | 1074 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1075 | { |
1076 | int i, ret; | |
44680eed | 1077 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
45ae7cff WD |
1078 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1079 | ||
8f68f8e2 | 1080 | /* Devices in an IOMMU group may already be configured */ |
a9a1b0b5 | 1081 | ret = arm_smmu_master_configure_smrs(smmu, cfg); |
45ae7cff | 1082 | if (ret) |
8f68f8e2 | 1083 | return ret == -EEXIST ? 0 : ret; |
45ae7cff | 1084 | |
a9a1b0b5 | 1085 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1086 | u32 idx, s2cr; |
2907320d | 1087 | |
a9a1b0b5 | 1088 | idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; |
d346180e | 1089 | s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV | |
44680eed | 1090 | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); |
45ae7cff WD |
1091 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); |
1092 | } | |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | ||
1097 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | |
a9a1b0b5 | 1098 | struct arm_smmu_master_cfg *cfg) |
45ae7cff | 1099 | { |
43b412be | 1100 | int i; |
44680eed | 1101 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
43b412be | 1102 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
45ae7cff | 1103 | |
8f68f8e2 WD |
1104 | /* An IOMMU group is torn down by the first device to be removed */ |
1105 | if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) | |
1106 | return; | |
45ae7cff WD |
1107 | |
1108 | /* | |
1109 | * We *must* clear the S2CR first, because freeing the SMR means | |
1110 | * that it can be re-allocated immediately. | |
1111 | */ | |
43b412be WD |
1112 | for (i = 0; i < cfg->num_streamids; ++i) { |
1113 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | |
1114 | ||
1115 | writel_relaxed(S2CR_TYPE_BYPASS, | |
1116 | gr0_base + ARM_SMMU_GR0_S2CR(idx)); | |
1117 | } | |
1118 | ||
a9a1b0b5 | 1119 | arm_smmu_master_free_smrs(smmu, cfg); |
45ae7cff WD |
1120 | } |
1121 | ||
1122 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
1123 | { | |
a18037b2 | 1124 | int ret; |
1d672638 | 1125 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1126 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 1127 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1128 | |
8f68f8e2 | 1129 | smmu = find_smmu_for_device(dev); |
44680eed | 1130 | if (!smmu) { |
45ae7cff WD |
1131 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
1132 | return -ENXIO; | |
1133 | } | |
1134 | ||
844e35bd WD |
1135 | if (dev->archdata.iommu) { |
1136 | dev_err(dev, "already attached to IOMMU domain\n"); | |
1137 | return -EEXIST; | |
1138 | } | |
1139 | ||
518f7136 WD |
1140 | /* Ensure that the domain is finalised */ |
1141 | ret = arm_smmu_init_domain_context(domain, smmu); | |
1142 | if (IS_ERR_VALUE(ret)) | |
1143 | return ret; | |
1144 | ||
45ae7cff | 1145 | /* |
44680eed WD |
1146 | * Sanity check the domain. We don't support domains across |
1147 | * different SMMUs. | |
45ae7cff | 1148 | */ |
518f7136 | 1149 | if (smmu_domain->smmu != smmu) { |
45ae7cff WD |
1150 | dev_err(dev, |
1151 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | |
a18037b2 MH |
1152 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
1153 | return -EINVAL; | |
45ae7cff | 1154 | } |
45ae7cff WD |
1155 | |
1156 | /* Looks ok, so add the device to the domain */ | |
8f68f8e2 | 1157 | cfg = find_smmu_master_cfg(dev); |
a9a1b0b5 | 1158 | if (!cfg) |
45ae7cff WD |
1159 | return -ENODEV; |
1160 | ||
844e35bd WD |
1161 | ret = arm_smmu_domain_add_master(smmu_domain, cfg); |
1162 | if (!ret) | |
1163 | dev->archdata.iommu = domain; | |
45ae7cff WD |
1164 | return ret; |
1165 | } | |
1166 | ||
1167 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
1168 | { | |
1d672638 | 1169 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
a9a1b0b5 | 1170 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1171 | |
8f68f8e2 | 1172 | cfg = find_smmu_master_cfg(dev); |
844e35bd WD |
1173 | if (!cfg) |
1174 | return; | |
1175 | ||
1176 | dev->archdata.iommu = NULL; | |
1177 | arm_smmu_domain_remove_master(smmu_domain, cfg); | |
45ae7cff WD |
1178 | } |
1179 | ||
45ae7cff | 1180 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
b410aed9 | 1181 | phys_addr_t paddr, size_t size, int prot) |
45ae7cff | 1182 | { |
518f7136 WD |
1183 | int ret; |
1184 | unsigned long flags; | |
1d672638 | 1185 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1186 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1187 | |
518f7136 | 1188 | if (!ops) |
45ae7cff WD |
1189 | return -ENODEV; |
1190 | ||
518f7136 WD |
1191 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
1192 | ret = ops->map(ops, iova, paddr, size, prot); | |
1193 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1194 | return ret; | |
45ae7cff WD |
1195 | } |
1196 | ||
1197 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |
1198 | size_t size) | |
1199 | { | |
518f7136 WD |
1200 | size_t ret; |
1201 | unsigned long flags; | |
1d672638 | 1202 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1203 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1204 | |
518f7136 WD |
1205 | if (!ops) |
1206 | return 0; | |
1207 | ||
1208 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | |
1209 | ret = ops->unmap(ops, iova, size); | |
1210 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1211 | return ret; | |
45ae7cff WD |
1212 | } |
1213 | ||
859a732e MH |
1214 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |
1215 | dma_addr_t iova) | |
1216 | { | |
1d672638 | 1217 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
859a732e MH |
1218 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1219 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1220 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | |
1221 | struct device *dev = smmu->dev; | |
1222 | void __iomem *cb_base; | |
1223 | u32 tmp; | |
1224 | u64 phys; | |
661d962f | 1225 | unsigned long va; |
859a732e MH |
1226 | |
1227 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
1228 | ||
661d962f RM |
1229 | /* ATS1 registers can only be written atomically */ |
1230 | va = iova & ~0xfffUL; | |
661d962f | 1231 | if (smmu->version == ARM_SMMU_V2) |
668b4ada | 1232 | smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR); |
661d962f | 1233 | else |
661d962f | 1234 | writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); |
859a732e MH |
1235 | |
1236 | if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, | |
1237 | !(tmp & ATSR_ACTIVE), 5, 50)) { | |
1238 | dev_err(dev, | |
077124c9 | 1239 | "iova to phys timed out on %pad. Falling back to software table walk.\n", |
859a732e MH |
1240 | &iova); |
1241 | return ops->iova_to_phys(ops, iova); | |
1242 | } | |
1243 | ||
1244 | phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); | |
1245 | phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; | |
1246 | ||
1247 | if (phys & CB_PAR_F) { | |
1248 | dev_err(dev, "translation fault!\n"); | |
1249 | dev_err(dev, "PAR = 0x%llx\n", phys); | |
1250 | return 0; | |
1251 | } | |
1252 | ||
1253 | return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); | |
1254 | } | |
1255 | ||
45ae7cff | 1256 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, |
859a732e | 1257 | dma_addr_t iova) |
45ae7cff | 1258 | { |
518f7136 WD |
1259 | phys_addr_t ret; |
1260 | unsigned long flags; | |
1d672638 | 1261 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1262 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1263 | |
518f7136 | 1264 | if (!ops) |
a44a9791 | 1265 | return 0; |
45ae7cff | 1266 | |
518f7136 | 1267 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
83a60ed8 BR |
1268 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && |
1269 | smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | |
859a732e | 1270 | ret = arm_smmu_iova_to_phys_hard(domain, iova); |
83a60ed8 | 1271 | } else { |
859a732e | 1272 | ret = ops->iova_to_phys(ops, iova); |
83a60ed8 BR |
1273 | } |
1274 | ||
518f7136 | 1275 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
859a732e | 1276 | |
518f7136 | 1277 | return ret; |
45ae7cff WD |
1278 | } |
1279 | ||
1fd0c775 | 1280 | static bool arm_smmu_capable(enum iommu_cap cap) |
45ae7cff | 1281 | { |
d0948945 WD |
1282 | switch (cap) { |
1283 | case IOMMU_CAP_CACHE_COHERENCY: | |
1fd0c775 JR |
1284 | /* |
1285 | * Return true here as the SMMU can always send out coherent | |
1286 | * requests. | |
1287 | */ | |
1288 | return true; | |
d0948945 | 1289 | case IOMMU_CAP_INTR_REMAP: |
1fd0c775 | 1290 | return true; /* MSIs are just memory writes */ |
0029a8dd AM |
1291 | case IOMMU_CAP_NOEXEC: |
1292 | return true; | |
d0948945 | 1293 | default: |
1fd0c775 | 1294 | return false; |
d0948945 | 1295 | } |
45ae7cff | 1296 | } |
45ae7cff | 1297 | |
a9a1b0b5 WD |
1298 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) |
1299 | { | |
1300 | *((u16 *)data) = alias; | |
1301 | return 0; /* Continue walking */ | |
45ae7cff WD |
1302 | } |
1303 | ||
8f68f8e2 WD |
1304 | static void __arm_smmu_release_pci_iommudata(void *data) |
1305 | { | |
1306 | kfree(data); | |
1307 | } | |
1308 | ||
af659932 JR |
1309 | static int arm_smmu_init_pci_device(struct pci_dev *pdev, |
1310 | struct iommu_group *group) | |
45ae7cff | 1311 | { |
03edb226 | 1312 | struct arm_smmu_master_cfg *cfg; |
af659932 JR |
1313 | u16 sid; |
1314 | int i; | |
a9a1b0b5 | 1315 | |
03edb226 WD |
1316 | cfg = iommu_group_get_iommudata(group); |
1317 | if (!cfg) { | |
a9a1b0b5 | 1318 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
af659932 JR |
1319 | if (!cfg) |
1320 | return -ENOMEM; | |
a9a1b0b5 | 1321 | |
03edb226 WD |
1322 | iommu_group_set_iommudata(group, cfg, |
1323 | __arm_smmu_release_pci_iommudata); | |
1324 | } | |
8f68f8e2 | 1325 | |
af659932 JR |
1326 | if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) |
1327 | return -ENOSPC; | |
a9a1b0b5 | 1328 | |
03edb226 WD |
1329 | /* |
1330 | * Assume Stream ID == Requester ID for now. | |
1331 | * We need a way to describe the ID mappings in FDT. | |
1332 | */ | |
1333 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); | |
1334 | for (i = 0; i < cfg->num_streamids; ++i) | |
1335 | if (cfg->streamids[i] == sid) | |
1336 | break; | |
1337 | ||
1338 | /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ | |
1339 | if (i == cfg->num_streamids) | |
1340 | cfg->streamids[cfg->num_streamids++] = sid; | |
5fc63a7c | 1341 | |
03edb226 | 1342 | return 0; |
45ae7cff WD |
1343 | } |
1344 | ||
af659932 JR |
1345 | static int arm_smmu_init_platform_device(struct device *dev, |
1346 | struct iommu_group *group) | |
03edb226 | 1347 | { |
03edb226 | 1348 | struct arm_smmu_device *smmu = find_smmu_for_device(dev); |
af659932 | 1349 | struct arm_smmu_master *master; |
03edb226 WD |
1350 | |
1351 | if (!smmu) | |
1352 | return -ENODEV; | |
1353 | ||
1354 | master = find_smmu_master(smmu, dev->of_node); | |
1355 | if (!master) | |
1356 | return -ENODEV; | |
1357 | ||
03edb226 | 1358 | iommu_group_set_iommudata(group, &master->cfg, NULL); |
af659932 JR |
1359 | |
1360 | return 0; | |
03edb226 WD |
1361 | } |
1362 | ||
1363 | static int arm_smmu_add_device(struct device *dev) | |
1364 | { | |
af659932 | 1365 | struct iommu_group *group; |
03edb226 | 1366 | |
af659932 JR |
1367 | group = iommu_group_get_for_dev(dev); |
1368 | if (IS_ERR(group)) | |
1369 | return PTR_ERR(group); | |
03edb226 | 1370 | |
9a4a9d8c | 1371 | iommu_group_put(group); |
af659932 | 1372 | return 0; |
03edb226 WD |
1373 | } |
1374 | ||
45ae7cff WD |
1375 | static void arm_smmu_remove_device(struct device *dev) |
1376 | { | |
5fc63a7c | 1377 | iommu_group_remove_device(dev); |
45ae7cff WD |
1378 | } |
1379 | ||
af659932 JR |
1380 | static struct iommu_group *arm_smmu_device_group(struct device *dev) |
1381 | { | |
1382 | struct iommu_group *group; | |
1383 | int ret; | |
1384 | ||
1385 | if (dev_is_pci(dev)) | |
1386 | group = pci_device_group(dev); | |
1387 | else | |
1388 | group = generic_device_group(dev); | |
1389 | ||
1390 | if (IS_ERR(group)) | |
1391 | return group; | |
1392 | ||
1393 | if (dev_is_pci(dev)) | |
1394 | ret = arm_smmu_init_pci_device(to_pci_dev(dev), group); | |
1395 | else | |
1396 | ret = arm_smmu_init_platform_device(dev, group); | |
1397 | ||
1398 | if (ret) { | |
1399 | iommu_group_put(group); | |
1400 | group = ERR_PTR(ret); | |
1401 | } | |
1402 | ||
1403 | return group; | |
1404 | } | |
1405 | ||
c752ce45 WD |
1406 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
1407 | enum iommu_attr attr, void *data) | |
1408 | { | |
1d672638 | 1409 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
c752ce45 WD |
1410 | |
1411 | switch (attr) { | |
1412 | case DOMAIN_ATTR_NESTING: | |
1413 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | |
1414 | return 0; | |
1415 | default: | |
1416 | return -ENODEV; | |
1417 | } | |
1418 | } | |
1419 | ||
1420 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |
1421 | enum iommu_attr attr, void *data) | |
1422 | { | |
518f7136 | 1423 | int ret = 0; |
1d672638 | 1424 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
c752ce45 | 1425 | |
518f7136 WD |
1426 | mutex_lock(&smmu_domain->init_mutex); |
1427 | ||
c752ce45 WD |
1428 | switch (attr) { |
1429 | case DOMAIN_ATTR_NESTING: | |
518f7136 WD |
1430 | if (smmu_domain->smmu) { |
1431 | ret = -EPERM; | |
1432 | goto out_unlock; | |
1433 | } | |
1434 | ||
c752ce45 WD |
1435 | if (*(int *)data) |
1436 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; | |
1437 | else | |
1438 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
1439 | ||
518f7136 | 1440 | break; |
c752ce45 | 1441 | default: |
518f7136 | 1442 | ret = -ENODEV; |
c752ce45 | 1443 | } |
518f7136 WD |
1444 | |
1445 | out_unlock: | |
1446 | mutex_unlock(&smmu_domain->init_mutex); | |
1447 | return ret; | |
c752ce45 WD |
1448 | } |
1449 | ||
518f7136 | 1450 | static struct iommu_ops arm_smmu_ops = { |
c752ce45 | 1451 | .capable = arm_smmu_capable, |
1d672638 JR |
1452 | .domain_alloc = arm_smmu_domain_alloc, |
1453 | .domain_free = arm_smmu_domain_free, | |
c752ce45 WD |
1454 | .attach_dev = arm_smmu_attach_dev, |
1455 | .detach_dev = arm_smmu_detach_dev, | |
1456 | .map = arm_smmu_map, | |
1457 | .unmap = arm_smmu_unmap, | |
76771c93 | 1458 | .map_sg = default_iommu_map_sg, |
c752ce45 WD |
1459 | .iova_to_phys = arm_smmu_iova_to_phys, |
1460 | .add_device = arm_smmu_add_device, | |
1461 | .remove_device = arm_smmu_remove_device, | |
af659932 | 1462 | .device_group = arm_smmu_device_group, |
c752ce45 WD |
1463 | .domain_get_attr = arm_smmu_domain_get_attr, |
1464 | .domain_set_attr = arm_smmu_domain_set_attr, | |
518f7136 | 1465 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
45ae7cff WD |
1466 | }; |
1467 | ||
1468 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |
1469 | { | |
1470 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
659db6f6 | 1471 | void __iomem *cb_base; |
45ae7cff | 1472 | int i = 0; |
659db6f6 AH |
1473 | u32 reg; |
1474 | ||
3a5df8ff AH |
1475 | /* clear global FSR */ |
1476 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
1477 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
45ae7cff WD |
1478 | |
1479 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | |
1480 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | |
3c8766d0 | 1481 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); |
2907320d MH |
1482 | writel_relaxed(S2CR_TYPE_BYPASS, |
1483 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | |
45ae7cff WD |
1484 | } |
1485 | ||
659db6f6 AH |
1486 | /* Make sure all context banks are disabled and clear CB_FSR */ |
1487 | for (i = 0; i < smmu->num_context_banks; ++i) { | |
1488 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); | |
1489 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | |
1490 | writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); | |
1491 | } | |
1463fe44 | 1492 | |
45ae7cff | 1493 | /* Invalidate the TLB, just in case */ |
45ae7cff WD |
1494 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
1495 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | |
1496 | ||
3a5df8ff | 1497 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
659db6f6 | 1498 | |
45ae7cff | 1499 | /* Enable fault reporting */ |
659db6f6 | 1500 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); |
45ae7cff WD |
1501 | |
1502 | /* Disable TLB broadcasting. */ | |
659db6f6 | 1503 | reg |= (sCR0_VMIDPNE | sCR0_PTM); |
45ae7cff WD |
1504 | |
1505 | /* Enable client access, but bypass when no mapping is found */ | |
659db6f6 | 1506 | reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); |
45ae7cff WD |
1507 | |
1508 | /* Disable forced broadcasting */ | |
659db6f6 | 1509 | reg &= ~sCR0_FB; |
45ae7cff WD |
1510 | |
1511 | /* Don't upgrade barriers */ | |
659db6f6 | 1512 | reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); |
45ae7cff WD |
1513 | |
1514 | /* Push the button */ | |
518f7136 | 1515 | __arm_smmu_tlb_sync(smmu); |
3a5df8ff | 1516 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1517 | } |
1518 | ||
1519 | static int arm_smmu_id_size_to_bits(int size) | |
1520 | { | |
1521 | switch (size) { | |
1522 | case 0: | |
1523 | return 32; | |
1524 | case 1: | |
1525 | return 36; | |
1526 | case 2: | |
1527 | return 40; | |
1528 | case 3: | |
1529 | return 42; | |
1530 | case 4: | |
1531 | return 44; | |
1532 | case 5: | |
1533 | default: | |
1534 | return 48; | |
1535 | } | |
1536 | } | |
1537 | ||
1538 | static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |
1539 | { | |
1540 | unsigned long size; | |
1541 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1542 | u32 id; | |
bae2c2d4 | 1543 | bool cttw_dt, cttw_reg; |
45ae7cff WD |
1544 | |
1545 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | |
45ae7cff WD |
1546 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); |
1547 | ||
1548 | /* ID0 */ | |
1549 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | |
4cf740b0 WD |
1550 | |
1551 | /* Restrict available stages based on module parameter */ | |
1552 | if (force_stage == 1) | |
1553 | id &= ~(ID0_S2TS | ID0_NTS); | |
1554 | else if (force_stage == 2) | |
1555 | id &= ~(ID0_S1TS | ID0_NTS); | |
1556 | ||
45ae7cff WD |
1557 | if (id & ID0_S1TS) { |
1558 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | |
1559 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | |
1560 | } | |
1561 | ||
1562 | if (id & ID0_S2TS) { | |
1563 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; | |
1564 | dev_notice(smmu->dev, "\tstage 2 translation\n"); | |
1565 | } | |
1566 | ||
1567 | if (id & ID0_NTS) { | |
1568 | smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; | |
1569 | dev_notice(smmu->dev, "\tnested translation\n"); | |
1570 | } | |
1571 | ||
1572 | if (!(smmu->features & | |
4cf740b0 | 1573 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { |
45ae7cff WD |
1574 | dev_err(smmu->dev, "\tno translation support!\n"); |
1575 | return -ENODEV; | |
1576 | } | |
1577 | ||
d38f0ff9 | 1578 | if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { |
859a732e MH |
1579 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; |
1580 | dev_notice(smmu->dev, "\taddress translation ops\n"); | |
1581 | } | |
1582 | ||
bae2c2d4 RM |
1583 | /* |
1584 | * In order for DMA API calls to work properly, we must defer to what | |
1585 | * the DT says about coherency, regardless of what the hardware claims. | |
1586 | * Fortunately, this also opens up a workaround for systems where the | |
1587 | * ID register value has ended up configured incorrectly. | |
1588 | */ | |
1589 | cttw_dt = of_dma_is_coherent(smmu->dev->of_node); | |
1590 | cttw_reg = !!(id & ID0_CTTW); | |
1591 | if (cttw_dt) | |
45ae7cff | 1592 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; |
bae2c2d4 RM |
1593 | if (cttw_dt || cttw_reg) |
1594 | dev_notice(smmu->dev, "\t%scoherent table walk\n", | |
1595 | cttw_dt ? "" : "non-"); | |
1596 | if (cttw_dt != cttw_reg) | |
1597 | dev_notice(smmu->dev, | |
1598 | "\t(IDR0.CTTW overridden by dma-coherent property)\n"); | |
45ae7cff WD |
1599 | |
1600 | if (id & ID0_SMS) { | |
1601 | u32 smr, sid, mask; | |
1602 | ||
1603 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | |
1604 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | |
1605 | ID0_NUMSMRG_MASK; | |
1606 | if (smmu->num_mapping_groups == 0) { | |
1607 | dev_err(smmu->dev, | |
1608 | "stream-matching supported, but no SMRs present!\n"); | |
1609 | return -ENODEV; | |
1610 | } | |
1611 | ||
1612 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | |
1613 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | |
1614 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1615 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1616 | ||
1617 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | |
1618 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | |
1619 | if ((mask & sid) != sid) { | |
1620 | dev_err(smmu->dev, | |
1621 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | |
1622 | mask, sid); | |
1623 | return -ENODEV; | |
1624 | } | |
1625 | ||
1626 | dev_notice(smmu->dev, | |
1627 | "\tstream matching with %u register groups, mask 0x%x", | |
1628 | smmu->num_mapping_groups, mask); | |
3c8766d0 OH |
1629 | } else { |
1630 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | |
1631 | ID0_NUMSIDB_MASK; | |
45ae7cff WD |
1632 | } |
1633 | ||
1634 | /* ID1 */ | |
1635 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | |
c757e852 | 1636 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; |
45ae7cff | 1637 | |
c55af7f7 | 1638 | /* Check for size mismatch of SMMU address space from mapped region */ |
518f7136 | 1639 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
c757e852 | 1640 | size *= 2 << smmu->pgshift; |
c55af7f7 | 1641 | if (smmu->size != size) |
2907320d MH |
1642 | dev_warn(smmu->dev, |
1643 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | |
1644 | size, smmu->size); | |
45ae7cff | 1645 | |
518f7136 | 1646 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; |
45ae7cff WD |
1647 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; |
1648 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | |
1649 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | |
1650 | return -ENODEV; | |
1651 | } | |
1652 | dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", | |
1653 | smmu->num_context_banks, smmu->num_s2_context_banks); | |
1654 | ||
1655 | /* ID2 */ | |
1656 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | |
1657 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | |
518f7136 | 1658 | smmu->ipa_size = size; |
45ae7cff | 1659 | |
518f7136 | 1660 | /* The output mask is also applied for bypass */ |
45ae7cff | 1661 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
518f7136 | 1662 | smmu->pa_size = size; |
45ae7cff | 1663 | |
f1d84548 RM |
1664 | /* |
1665 | * What the page table walker can address actually depends on which | |
1666 | * descriptor format is in use, but since a) we don't know that yet, | |
1667 | * and b) it can vary per context bank, this will have to do... | |
1668 | */ | |
1669 | if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) | |
1670 | dev_warn(smmu->dev, | |
1671 | "failed to set DMA mask for table walker\n"); | |
1672 | ||
09360403 | 1673 | if (smmu->version == ARM_SMMU_V1) { |
518f7136 WD |
1674 | smmu->va_size = smmu->ipa_size; |
1675 | size = SZ_4K | SZ_2M | SZ_1G; | |
45ae7cff | 1676 | } else { |
45ae7cff | 1677 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; |
518f7136 WD |
1678 | smmu->va_size = arm_smmu_id_size_to_bits(size); |
1679 | #ifndef CONFIG_64BIT | |
1680 | smmu->va_size = min(32UL, smmu->va_size); | |
45ae7cff | 1681 | #endif |
518f7136 WD |
1682 | size = 0; |
1683 | if (id & ID2_PTFS_4K) | |
1684 | size |= SZ_4K | SZ_2M | SZ_1G; | |
1685 | if (id & ID2_PTFS_16K) | |
1686 | size |= SZ_16K | SZ_32M; | |
1687 | if (id & ID2_PTFS_64K) | |
1688 | size |= SZ_64K | SZ_512M; | |
45ae7cff WD |
1689 | } |
1690 | ||
518f7136 WD |
1691 | arm_smmu_ops.pgsize_bitmap &= size; |
1692 | dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); | |
1693 | ||
28d6007b WD |
1694 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) |
1695 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", | |
518f7136 | 1696 | smmu->va_size, smmu->ipa_size); |
28d6007b WD |
1697 | |
1698 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) | |
1699 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", | |
518f7136 | 1700 | smmu->ipa_size, smmu->pa_size); |
28d6007b | 1701 | |
45ae7cff WD |
1702 | return 0; |
1703 | } | |
1704 | ||
09b5269a | 1705 | static const struct of_device_id arm_smmu_of_match[] = { |
09360403 RM |
1706 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, |
1707 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, | |
1708 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, | |
d3aba046 | 1709 | { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 }, |
09360403 RM |
1710 | { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 }, |
1711 | { }, | |
1712 | }; | |
1713 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | |
1714 | ||
45ae7cff WD |
1715 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1716 | { | |
09360403 | 1717 | const struct of_device_id *of_id; |
45ae7cff WD |
1718 | struct resource *res; |
1719 | struct arm_smmu_device *smmu; | |
45ae7cff WD |
1720 | struct device *dev = &pdev->dev; |
1721 | struct rb_node *node; | |
1722 | struct of_phandle_args masterspec; | |
1723 | int num_irqs, i, err; | |
1724 | ||
1725 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | |
1726 | if (!smmu) { | |
1727 | dev_err(dev, "failed to allocate arm_smmu_device\n"); | |
1728 | return -ENOMEM; | |
1729 | } | |
1730 | smmu->dev = dev; | |
1731 | ||
09360403 RM |
1732 | of_id = of_match_node(arm_smmu_of_match, dev->of_node); |
1733 | smmu->version = (enum arm_smmu_arch_version)of_id->data; | |
1734 | ||
45ae7cff | 1735 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
8a7f4312 JL |
1736 | smmu->base = devm_ioremap_resource(dev, res); |
1737 | if (IS_ERR(smmu->base)) | |
1738 | return PTR_ERR(smmu->base); | |
45ae7cff | 1739 | smmu->size = resource_size(res); |
45ae7cff WD |
1740 | |
1741 | if (of_property_read_u32(dev->of_node, "#global-interrupts", | |
1742 | &smmu->num_global_irqs)) { | |
1743 | dev_err(dev, "missing #global-interrupts property\n"); | |
1744 | return -ENODEV; | |
1745 | } | |
1746 | ||
1747 | num_irqs = 0; | |
1748 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | |
1749 | num_irqs++; | |
1750 | if (num_irqs > smmu->num_global_irqs) | |
1751 | smmu->num_context_irqs++; | |
1752 | } | |
1753 | ||
44a08de2 AH |
1754 | if (!smmu->num_context_irqs) { |
1755 | dev_err(dev, "found %d interrupts but expected at least %d\n", | |
1756 | num_irqs, smmu->num_global_irqs + 1); | |
1757 | return -ENODEV; | |
45ae7cff | 1758 | } |
45ae7cff WD |
1759 | |
1760 | smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, | |
1761 | GFP_KERNEL); | |
1762 | if (!smmu->irqs) { | |
1763 | dev_err(dev, "failed to allocate %d irqs\n", num_irqs); | |
1764 | return -ENOMEM; | |
1765 | } | |
1766 | ||
1767 | for (i = 0; i < num_irqs; ++i) { | |
1768 | int irq = platform_get_irq(pdev, i); | |
2907320d | 1769 | |
45ae7cff WD |
1770 | if (irq < 0) { |
1771 | dev_err(dev, "failed to get irq index %d\n", i); | |
1772 | return -ENODEV; | |
1773 | } | |
1774 | smmu->irqs[i] = irq; | |
1775 | } | |
1776 | ||
3c8766d0 OH |
1777 | err = arm_smmu_device_cfg_probe(smmu); |
1778 | if (err) | |
1779 | return err; | |
1780 | ||
45ae7cff WD |
1781 | i = 0; |
1782 | smmu->masters = RB_ROOT; | |
1783 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | |
1784 | "#stream-id-cells", i, | |
1785 | &masterspec)) { | |
1786 | err = register_smmu_master(smmu, dev, &masterspec); | |
1787 | if (err) { | |
1788 | dev_err(dev, "failed to add master %s\n", | |
1789 | masterspec.np->name); | |
1790 | goto out_put_masters; | |
1791 | } | |
1792 | ||
1793 | i++; | |
1794 | } | |
1795 | dev_notice(dev, "registered %d master devices\n", i); | |
1796 | ||
3a5df8ff AH |
1797 | parse_driver_options(smmu); |
1798 | ||
09360403 | 1799 | if (smmu->version > ARM_SMMU_V1 && |
45ae7cff WD |
1800 | smmu->num_context_banks != smmu->num_context_irqs) { |
1801 | dev_err(dev, | |
1802 | "found only %d context interrupt(s) but %d required\n", | |
1803 | smmu->num_context_irqs, smmu->num_context_banks); | |
89a23cde | 1804 | err = -ENODEV; |
44680eed | 1805 | goto out_put_masters; |
45ae7cff WD |
1806 | } |
1807 | ||
45ae7cff WD |
1808 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
1809 | err = request_irq(smmu->irqs[i], | |
1810 | arm_smmu_global_fault, | |
1811 | IRQF_SHARED, | |
1812 | "arm-smmu global fault", | |
1813 | smmu); | |
1814 | if (err) { | |
1815 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | |
1816 | i, smmu->irqs[i]); | |
1817 | goto out_free_irqs; | |
1818 | } | |
1819 | } | |
1820 | ||
1821 | INIT_LIST_HEAD(&smmu->list); | |
1822 | spin_lock(&arm_smmu_devices_lock); | |
1823 | list_add(&smmu->list, &arm_smmu_devices); | |
1824 | spin_unlock(&arm_smmu_devices_lock); | |
fd90cecb WD |
1825 | |
1826 | arm_smmu_device_reset(smmu); | |
45ae7cff WD |
1827 | return 0; |
1828 | ||
1829 | out_free_irqs: | |
1830 | while (i--) | |
1831 | free_irq(smmu->irqs[i], smmu); | |
1832 | ||
45ae7cff WD |
1833 | out_put_masters: |
1834 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | |
2907320d MH |
1835 | struct arm_smmu_master *master |
1836 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1837 | of_node_put(master->of_node); |
1838 | } | |
1839 | ||
1840 | return err; | |
1841 | } | |
1842 | ||
1843 | static int arm_smmu_device_remove(struct platform_device *pdev) | |
1844 | { | |
1845 | int i; | |
1846 | struct device *dev = &pdev->dev; | |
1847 | struct arm_smmu_device *curr, *smmu = NULL; | |
1848 | struct rb_node *node; | |
1849 | ||
1850 | spin_lock(&arm_smmu_devices_lock); | |
1851 | list_for_each_entry(curr, &arm_smmu_devices, list) { | |
1852 | if (curr->dev == dev) { | |
1853 | smmu = curr; | |
1854 | list_del(&smmu->list); | |
1855 | break; | |
1856 | } | |
1857 | } | |
1858 | spin_unlock(&arm_smmu_devices_lock); | |
1859 | ||
1860 | if (!smmu) | |
1861 | return -ENODEV; | |
1862 | ||
45ae7cff | 1863 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { |
2907320d MH |
1864 | struct arm_smmu_master *master |
1865 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1866 | of_node_put(master->of_node); |
1867 | } | |
1868 | ||
ecfadb6e | 1869 | if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) |
45ae7cff WD |
1870 | dev_err(dev, "removing device with active domains!\n"); |
1871 | ||
1872 | for (i = 0; i < smmu->num_global_irqs; ++i) | |
1873 | free_irq(smmu->irqs[i], smmu); | |
1874 | ||
1875 | /* Turn the thing off */ | |
2907320d | 1876 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1877 | return 0; |
1878 | } | |
1879 | ||
45ae7cff WD |
1880 | static struct platform_driver arm_smmu_driver = { |
1881 | .driver = { | |
45ae7cff WD |
1882 | .name = "arm-smmu", |
1883 | .of_match_table = of_match_ptr(arm_smmu_of_match), | |
1884 | }, | |
1885 | .probe = arm_smmu_device_dt_probe, | |
1886 | .remove = arm_smmu_device_remove, | |
1887 | }; | |
1888 | ||
1889 | static int __init arm_smmu_init(void) | |
1890 | { | |
0e7d37ad | 1891 | struct device_node *np; |
45ae7cff WD |
1892 | int ret; |
1893 | ||
0e7d37ad TR |
1894 | /* |
1895 | * Play nice with systems that don't have an ARM SMMU by checking that | |
1896 | * an ARM SMMU exists in the system before proceeding with the driver | |
1897 | * and IOMMU bus operation registration. | |
1898 | */ | |
1899 | np = of_find_matching_node(NULL, arm_smmu_of_match); | |
1900 | if (!np) | |
1901 | return 0; | |
1902 | ||
1903 | of_node_put(np); | |
1904 | ||
45ae7cff WD |
1905 | ret = platform_driver_register(&arm_smmu_driver); |
1906 | if (ret) | |
1907 | return ret; | |
1908 | ||
1909 | /* Oh, for a proper bus abstraction */ | |
6614ee77 | 1910 | if (!iommu_present(&platform_bus_type)) |
45ae7cff WD |
1911 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1912 | ||
d123cf82 | 1913 | #ifdef CONFIG_ARM_AMBA |
6614ee77 | 1914 | if (!iommu_present(&amba_bustype)) |
45ae7cff | 1915 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
d123cf82 | 1916 | #endif |
45ae7cff | 1917 | |
a9a1b0b5 WD |
1918 | #ifdef CONFIG_PCI |
1919 | if (!iommu_present(&pci_bus_type)) | |
1920 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | |
1921 | #endif | |
1922 | ||
45ae7cff WD |
1923 | return 0; |
1924 | } | |
1925 | ||
1926 | static void __exit arm_smmu_exit(void) | |
1927 | { | |
1928 | return platform_driver_unregister(&arm_smmu_driver); | |
1929 | } | |
1930 | ||
b1950b27 | 1931 | subsys_initcall(arm_smmu_init); |
45ae7cff WD |
1932 | module_exit(arm_smmu_exit); |
1933 | ||
1934 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | |
1935 | MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); | |
1936 | MODULE_LICENSE("GPL v2"); |