]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/iommu/arm-smmu.c
iommu/dma: Finish optimising higher-order allocations
[thirdparty/kernel/stable.git] / drivers / iommu / arm-smmu.c
CommitLineData
45ae7cff
WD
1/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
45ae7cff
WD
26 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
9adb9594 32#include <linux/dma-iommu.h>
45ae7cff
WD
33#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
f9a05f05 37#include <linux/io-64-nonatomic-hi-lo.h>
45ae7cff 38#include <linux/iommu.h>
859a732e 39#include <linux/iopoll.h>
45ae7cff
WD
40#include <linux/module.h>
41#include <linux/of.h>
bae2c2d4 42#include <linux/of_address.h>
a9a1b0b5 43#include <linux/pci.h>
45ae7cff
WD
44#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
518f7136 50#include "io-pgtable.h"
45ae7cff
WD
51
52/* Maximum number of stream IDs assigned to a single device */
636e97b0 53#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
45ae7cff
WD
54
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
45ae7cff
WD
61/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
c757e852 63#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
45ae7cff 64
3a5df8ff
AH
65/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
f9a05f05
RM
75/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
668b4ada 80#ifdef CONFIG_64BIT
f9a05f05 81#define smmu_write_atomic_lq writeq_relaxed
668b4ada 82#else
f9a05f05 83#define smmu_write_atomic_lq writel_relaxed
668b4ada
TC
84#endif
85
45ae7cff
WD
86/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
4e3e9b69 97#define sCR0_VMID16EN (1 << 31)
45ae7cff
WD
98#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
3ca3712a
PF
101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
45ae7cff
WD
104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
45ae7cff
WD
117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
859a732e 122#define ID0_ATOSNS (1 << 26)
7602b871
RM
123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
45ae7cff
WD
125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
3c8766d0
OH
128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
45ae7cff
WD
130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
4e3e9b69 150#define ID2_VMID16 (1 << 15)
45ae7cff 151
3ca3712a
PF
152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
45ae7cff 154
45ae7cff 155/* Global TLB invalidation */
45ae7cff
WD
156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
d346180e
RM
181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
45ae7cff
WD
184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
57ca90f6
WD
188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
45ae7cff
WD
191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
4e3e9b69
TC
206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
45ae7cff
WD
208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
c757e852 211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
45ae7cff
WD
212
213#define ARM_SMMU_CB_SCTLR 0x0
f0cfffc4 214#define ARM_SMMU_CB_ACTLR 0x4
45ae7cff
WD
215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
668b4ada
TC
217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
45ae7cff
WD
219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
518f7136 221#define ARM_SMMU_CB_S1_MAIR1 0x3c
f9a05f05 222#define ARM_SMMU_CB_PAR 0x50
45ae7cff 223#define ARM_SMMU_CB_FSR 0x58
f9a05f05 224#define ARM_SMMU_CB_FAR 0x60
45ae7cff 225#define ARM_SMMU_CB_FSYNR0 0x68
518f7136 226#define ARM_SMMU_CB_S1_TLBIVA 0x600
1463fe44 227#define ARM_SMMU_CB_S1_TLBIASID 0x610
518f7136
WD
228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
661d962f 231#define ARM_SMMU_CB_ATS1PR 0x800
859a732e 232#define ARM_SMMU_CB_ATSR 0x8f0
45ae7cff
WD
233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
f0cfffc4
RM
244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
3ca3712a
PF
246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
859a732e
MH
248#define CB_PAR_F (1 << 0)
249
250#define ATSR_ACTIVE (1 << 0)
251
45ae7cff
WD
252#define RESUME_RETRY (0 << 0)
253#define RESUME_TERMINATE (1 << 0)
254
45ae7cff 255#define TTBCR2_SEP_SHIFT 15
5dc5616e 256#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
45ae7cff 257
668b4ada 258#define TTBRn_ASID_SHIFT 48
45ae7cff
WD
259
260#define FSR_MULTI (1 << 31)
261#define FSR_SS (1 << 30)
262#define FSR_UUT (1 << 8)
263#define FSR_ASF (1 << 7)
264#define FSR_TLBLKF (1 << 6)
265#define FSR_TLBMCF (1 << 5)
266#define FSR_EF (1 << 4)
267#define FSR_PF (1 << 3)
268#define FSR_AFF (1 << 2)
269#define FSR_TF (1 << 1)
270
2907320d
MH
271#define FSR_IGN (FSR_AFF | FSR_ASF | \
272 FSR_TLBMCF | FSR_TLBLKF)
273#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
adaba320 274 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
45ae7cff
WD
275
276#define FSYNR0_WNR (1 << 4)
277
4cf740b0 278static int force_stage;
25a1c96c 279module_param(force_stage, int, S_IRUGO);
4cf740b0
WD
280MODULE_PARM_DESC(force_stage,
281 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
25a1c96c
RM
282static bool disable_bypass;
283module_param(disable_bypass, bool, S_IRUGO);
284MODULE_PARM_DESC(disable_bypass,
285 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
4cf740b0 286
09360403 287enum arm_smmu_arch_version {
b7862e35
RM
288 ARM_SMMU_V1,
289 ARM_SMMU_V1_64K,
09360403
RM
290 ARM_SMMU_V2,
291};
292
67b65a3f
RM
293enum arm_smmu_implementation {
294 GENERIC_SMMU,
f0cfffc4 295 ARM_MMU500,
e086d912 296 CAVIUM_SMMUV2,
67b65a3f
RM
297};
298
45ae7cff
WD
299struct arm_smmu_smr {
300 u8 idx;
301 u16 mask;
302 u16 id;
303};
304
a9a1b0b5 305struct arm_smmu_master_cfg {
45ae7cff
WD
306 int num_streamids;
307 u16 streamids[MAX_MASTER_STREAMIDS];
45ae7cff
WD
308 struct arm_smmu_smr *smrs;
309};
310
a9a1b0b5
WD
311struct arm_smmu_master {
312 struct device_node *of_node;
a9a1b0b5
WD
313 struct rb_node node;
314 struct arm_smmu_master_cfg cfg;
315};
316
45ae7cff
WD
317struct arm_smmu_device {
318 struct device *dev;
45ae7cff
WD
319
320 void __iomem *base;
321 unsigned long size;
c757e852 322 unsigned long pgshift;
45ae7cff
WD
323
324#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
325#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
326#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
327#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
328#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
859a732e 329#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
4e3e9b69 330#define ARM_SMMU_FEAT_VMID16 (1 << 6)
7602b871
RM
331#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
332#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
333#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
334#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
335#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
45ae7cff 336 u32 features;
3a5df8ff
AH
337
338#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
339 u32 options;
09360403 340 enum arm_smmu_arch_version version;
67b65a3f 341 enum arm_smmu_implementation model;
45ae7cff
WD
342
343 u32 num_context_banks;
344 u32 num_s2_context_banks;
345 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
346 atomic_t irptndx;
347
348 u32 num_mapping_groups;
349 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
350
518f7136
WD
351 unsigned long va_size;
352 unsigned long ipa_size;
353 unsigned long pa_size;
45ae7cff
WD
354
355 u32 num_global_irqs;
356 u32 num_context_irqs;
357 unsigned int *irqs;
358
45ae7cff
WD
359 struct list_head list;
360 struct rb_root masters;
1bd37a68
TC
361
362 u32 cavium_id_base; /* Specific to Cavium */
45ae7cff
WD
363};
364
7602b871
RM
365enum arm_smmu_context_fmt {
366 ARM_SMMU_CTX_FMT_NONE,
367 ARM_SMMU_CTX_FMT_AARCH64,
368 ARM_SMMU_CTX_FMT_AARCH32_L,
369 ARM_SMMU_CTX_FMT_AARCH32_S,
45ae7cff
WD
370};
371
372struct arm_smmu_cfg {
45ae7cff
WD
373 u8 cbndx;
374 u8 irptndx;
375 u32 cbar;
7602b871 376 enum arm_smmu_context_fmt fmt;
45ae7cff 377};
faea13b7 378#define INVALID_IRPTNDX 0xff
45ae7cff 379
1bd37a68
TC
380#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
381#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
ecfadb6e 382
c752ce45
WD
383enum arm_smmu_domain_stage {
384 ARM_SMMU_DOMAIN_S1 = 0,
385 ARM_SMMU_DOMAIN_S2,
386 ARM_SMMU_DOMAIN_NESTED,
387};
388
45ae7cff 389struct arm_smmu_domain {
44680eed 390 struct arm_smmu_device *smmu;
518f7136
WD
391 struct io_pgtable_ops *pgtbl_ops;
392 spinlock_t pgtbl_lock;
44680eed 393 struct arm_smmu_cfg cfg;
c752ce45 394 enum arm_smmu_domain_stage stage;
518f7136 395 struct mutex init_mutex; /* Protects smmu pointer */
1d672638 396 struct iommu_domain domain;
45ae7cff
WD
397};
398
518f7136
WD
399static struct iommu_ops arm_smmu_ops;
400
45ae7cff
WD
401static DEFINE_SPINLOCK(arm_smmu_devices_lock);
402static LIST_HEAD(arm_smmu_devices);
403
3a5df8ff
AH
404struct arm_smmu_option_prop {
405 u32 opt;
406 const char *prop;
407};
408
1bd37a68
TC
409static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
410
2907320d 411static struct arm_smmu_option_prop arm_smmu_options[] = {
3a5df8ff
AH
412 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
413 { 0, NULL},
414};
415
1d672638
JR
416static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
417{
418 return container_of(dom, struct arm_smmu_domain, domain);
419}
420
3a5df8ff
AH
421static void parse_driver_options(struct arm_smmu_device *smmu)
422{
423 int i = 0;
2907320d 424
3a5df8ff
AH
425 do {
426 if (of_property_read_bool(smmu->dev->of_node,
427 arm_smmu_options[i].prop)) {
428 smmu->options |= arm_smmu_options[i].opt;
429 dev_notice(smmu->dev, "option %s\n",
430 arm_smmu_options[i].prop);
431 }
432 } while (arm_smmu_options[++i].opt);
433}
434
8f68f8e2 435static struct device_node *dev_get_dev_node(struct device *dev)
a9a1b0b5
WD
436{
437 if (dev_is_pci(dev)) {
438 struct pci_bus *bus = to_pci_dev(dev)->bus;
2907320d 439
a9a1b0b5
WD
440 while (!pci_is_root_bus(bus))
441 bus = bus->parent;
8f68f8e2 442 return bus->bridge->parent->of_node;
a9a1b0b5
WD
443 }
444
8f68f8e2 445 return dev->of_node;
a9a1b0b5
WD
446}
447
45ae7cff
WD
448static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
449 struct device_node *dev_node)
450{
451 struct rb_node *node = smmu->masters.rb_node;
452
453 while (node) {
454 struct arm_smmu_master *master;
2907320d 455
45ae7cff
WD
456 master = container_of(node, struct arm_smmu_master, node);
457
458 if (dev_node < master->of_node)
459 node = node->rb_left;
460 else if (dev_node > master->of_node)
461 node = node->rb_right;
462 else
463 return master;
464 }
465
466 return NULL;
467}
468
a9a1b0b5 469static struct arm_smmu_master_cfg *
8f68f8e2 470find_smmu_master_cfg(struct device *dev)
a9a1b0b5 471{
8f68f8e2
WD
472 struct arm_smmu_master_cfg *cfg = NULL;
473 struct iommu_group *group = iommu_group_get(dev);
a9a1b0b5 474
8f68f8e2
WD
475 if (group) {
476 cfg = iommu_group_get_iommudata(group);
477 iommu_group_put(group);
478 }
a9a1b0b5 479
8f68f8e2 480 return cfg;
a9a1b0b5
WD
481}
482
45ae7cff
WD
483static int insert_smmu_master(struct arm_smmu_device *smmu,
484 struct arm_smmu_master *master)
485{
486 struct rb_node **new, *parent;
487
488 new = &smmu->masters.rb_node;
489 parent = NULL;
490 while (*new) {
2907320d
MH
491 struct arm_smmu_master *this
492 = container_of(*new, struct arm_smmu_master, node);
45ae7cff
WD
493
494 parent = *new;
495 if (master->of_node < this->of_node)
496 new = &((*new)->rb_left);
497 else if (master->of_node > this->of_node)
498 new = &((*new)->rb_right);
499 else
500 return -EEXIST;
501 }
502
503 rb_link_node(&master->node, parent, new);
504 rb_insert_color(&master->node, &smmu->masters);
505 return 0;
506}
507
508static int register_smmu_master(struct arm_smmu_device *smmu,
509 struct device *dev,
510 struct of_phandle_args *masterspec)
511{
512 int i;
513 struct arm_smmu_master *master;
514
515 master = find_smmu_master(smmu, masterspec->np);
516 if (master) {
517 dev_err(dev,
518 "rejecting multiple registrations for master device %s\n",
519 masterspec->np->name);
520 return -EBUSY;
521 }
522
523 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
524 dev_err(dev,
525 "reached maximum number (%d) of stream IDs for master device %s\n",
526 MAX_MASTER_STREAMIDS, masterspec->np->name);
527 return -ENOSPC;
528 }
529
530 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
531 if (!master)
532 return -ENOMEM;
533
a9a1b0b5
WD
534 master->of_node = masterspec->np;
535 master->cfg.num_streamids = masterspec->args_count;
45ae7cff 536
3c8766d0
OH
537 for (i = 0; i < master->cfg.num_streamids; ++i) {
538 u16 streamid = masterspec->args[i];
45ae7cff 539
3c8766d0
OH
540 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
541 (streamid >= smmu->num_mapping_groups)) {
542 dev_err(dev,
543 "stream ID for master device %s greater than maximum allowed (%d)\n",
544 masterspec->np->name, smmu->num_mapping_groups);
545 return -ERANGE;
546 }
547 master->cfg.streamids[i] = streamid;
548 }
45ae7cff
WD
549 return insert_smmu_master(smmu, master);
550}
551
44680eed 552static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
45ae7cff 553{
44680eed 554 struct arm_smmu_device *smmu;
a9a1b0b5 555 struct arm_smmu_master *master = NULL;
8f68f8e2 556 struct device_node *dev_node = dev_get_dev_node(dev);
45ae7cff
WD
557
558 spin_lock(&arm_smmu_devices_lock);
44680eed 559 list_for_each_entry(smmu, &arm_smmu_devices, list) {
a9a1b0b5
WD
560 master = find_smmu_master(smmu, dev_node);
561 if (master)
562 break;
563 }
45ae7cff 564 spin_unlock(&arm_smmu_devices_lock);
44680eed 565
a9a1b0b5 566 return master ? smmu : NULL;
45ae7cff
WD
567}
568
569static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
570{
571 int idx;
572
573 do {
574 idx = find_next_zero_bit(map, end, start);
575 if (idx == end)
576 return -ENOSPC;
577 } while (test_and_set_bit(idx, map));
578
579 return idx;
580}
581
582static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
583{
584 clear_bit(idx, map);
585}
586
587/* Wait for any pending TLB invalidations to complete */
518f7136 588static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
45ae7cff
WD
589{
590 int count = 0;
591 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
592
593 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
594 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
595 & sTLBGSTATUS_GSACTIVE) {
596 cpu_relax();
597 if (++count == TLB_LOOP_TIMEOUT) {
598 dev_err_ratelimited(smmu->dev,
599 "TLB sync timed out -- SMMU may be deadlocked\n");
600 return;
601 }
602 udelay(1);
603 }
604}
605
518f7136
WD
606static void arm_smmu_tlb_sync(void *cookie)
607{
608 struct arm_smmu_domain *smmu_domain = cookie;
609 __arm_smmu_tlb_sync(smmu_domain->smmu);
610}
611
612static void arm_smmu_tlb_inv_context(void *cookie)
1463fe44 613{
518f7136 614 struct arm_smmu_domain *smmu_domain = cookie;
44680eed
WD
615 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
616 struct arm_smmu_device *smmu = smmu_domain->smmu;
1463fe44 617 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
518f7136 618 void __iomem *base;
1463fe44
WD
619
620 if (stage1) {
621 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1bd37a68 622 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
ecfadb6e 623 base + ARM_SMMU_CB_S1_TLBIASID);
1463fe44
WD
624 } else {
625 base = ARM_SMMU_GR0(smmu);
1bd37a68 626 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
ecfadb6e 627 base + ARM_SMMU_GR0_TLBIVMID);
1463fe44
WD
628 }
629
518f7136
WD
630 __arm_smmu_tlb_sync(smmu);
631}
632
633static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
06c610e8 634 size_t granule, bool leaf, void *cookie)
518f7136
WD
635{
636 struct arm_smmu_domain *smmu_domain = cookie;
637 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
638 struct arm_smmu_device *smmu = smmu_domain->smmu;
639 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
640 void __iomem *reg;
641
642 if (stage1) {
643 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
644 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
645
7602b871 646 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 647 iova &= ~12UL;
1bd37a68 648 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
75df1386
RM
649 do {
650 writel_relaxed(iova, reg);
651 iova += granule;
652 } while (size -= granule);
518f7136
WD
653 } else {
654 iova >>= 12;
1bd37a68 655 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
75df1386
RM
656 do {
657 writeq_relaxed(iova, reg);
658 iova += granule >> 12;
659 } while (size -= granule);
518f7136 660 }
518f7136
WD
661 } else if (smmu->version == ARM_SMMU_V2) {
662 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
663 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
664 ARM_SMMU_CB_S2_TLBIIPAS2;
75df1386
RM
665 iova >>= 12;
666 do {
f9a05f05 667 smmu_write_atomic_lq(iova, reg);
75df1386
RM
668 iova += granule >> 12;
669 } while (size -= granule);
518f7136
WD
670 } else {
671 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
1bd37a68 672 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
518f7136
WD
673 }
674}
675
518f7136
WD
676static struct iommu_gather_ops arm_smmu_gather_ops = {
677 .tlb_flush_all = arm_smmu_tlb_inv_context,
678 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
679 .tlb_sync = arm_smmu_tlb_sync,
518f7136
WD
680};
681
45ae7cff
WD
682static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
683{
684 int flags, ret;
f9a05f05 685 u32 fsr, fsynr, resume;
45ae7cff
WD
686 unsigned long iova;
687 struct iommu_domain *domain = dev;
1d672638 688 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
689 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
690 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
691 void __iomem *cb_base;
692
44680eed 693 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff
WD
694 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
695
696 if (!(fsr & FSR_FAULT))
697 return IRQ_NONE;
698
699 if (fsr & FSR_IGN)
700 dev_err_ratelimited(smmu->dev,
70c9a7db 701 "Unexpected context fault (fsr 0x%x)\n",
45ae7cff
WD
702 fsr);
703
704 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
705 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
706
f9a05f05 707 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
45ae7cff
WD
708 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
709 ret = IRQ_HANDLED;
710 resume = RESUME_RETRY;
711 } else {
2ef0f031
AH
712 dev_err_ratelimited(smmu->dev,
713 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
44680eed 714 iova, fsynr, cfg->cbndx);
45ae7cff
WD
715 ret = IRQ_NONE;
716 resume = RESUME_TERMINATE;
717 }
718
719 /* Clear the faulting FSR */
720 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
721
722 /* Retry or terminate any stalled transactions */
723 if (fsr & FSR_SS)
724 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
725
726 return ret;
727}
728
729static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
730{
731 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
732 struct arm_smmu_device *smmu = dev;
3a5df8ff 733 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
45ae7cff
WD
734
735 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
736 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
737 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
738 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
739
3a5df8ff
AH
740 if (!gfsr)
741 return IRQ_NONE;
742
45ae7cff
WD
743 dev_err_ratelimited(smmu->dev,
744 "Unexpected global fault, this could be serious\n");
745 dev_err_ratelimited(smmu->dev,
746 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
747 gfsr, gfsynr0, gfsynr1, gfsynr2);
748
749 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
adaba320 750 return IRQ_HANDLED;
45ae7cff
WD
751}
752
518f7136
WD
753static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
754 struct io_pgtable_cfg *pgtbl_cfg)
45ae7cff
WD
755{
756 u32 reg;
668b4ada 757 u64 reg64;
45ae7cff 758 bool stage1;
44680eed
WD
759 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
760 struct arm_smmu_device *smmu = smmu_domain->smmu;
c88ae5de 761 void __iomem *cb_base, *gr1_base;
45ae7cff 762
45ae7cff 763 gr1_base = ARM_SMMU_GR1(smmu);
44680eed
WD
764 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
765 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff 766
4a1c93cb 767 if (smmu->version > ARM_SMMU_V1) {
7602b871
RM
768 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
769 reg = CBA2R_RW64_64BIT;
770 else
771 reg = CBA2R_RW64_32BIT;
4e3e9b69
TC
772 /* 16-bit VMIDs live in CBA2R */
773 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1bd37a68 774 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
4e3e9b69 775
4a1c93cb
WD
776 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
777 }
778
45ae7cff 779 /* CBAR */
44680eed 780 reg = cfg->cbar;
b7862e35 781 if (smmu->version < ARM_SMMU_V2)
2907320d 782 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
45ae7cff 783
57ca90f6
WD
784 /*
785 * Use the weakest shareability/memory types, so they are
786 * overridden by the ttbcr/pte.
787 */
788 if (stage1) {
789 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
790 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
4e3e9b69
TC
791 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
792 /* 8-bit VMIDs live in CBAR */
1bd37a68 793 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
57ca90f6 794 }
44680eed 795 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
45ae7cff 796
518f7136
WD
797 /* TTBRs */
798 if (stage1) {
668b4ada
TC
799 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
800
1bd37a68 801 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
f9a05f05 802 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
668b4ada
TC
803
804 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1bd37a68 805 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
f9a05f05 806 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
518f7136 807 } else {
668b4ada 808 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
f9a05f05 809 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
518f7136 810 }
a65217a4 811
518f7136
WD
812 /* TTBCR */
813 if (stage1) {
814 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
815 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
816 if (smmu->version > ARM_SMMU_V1) {
817 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
5dc5616e 818 reg |= TTBCR2_SEP_UPSTREAM;
518f7136 819 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
45ae7cff
WD
820 }
821 } else {
518f7136
WD
822 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
45ae7cff
WD
824 }
825
518f7136 826 /* MAIRs (stage-1 only) */
45ae7cff 827 if (stage1) {
518f7136 828 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
45ae7cff 829 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
518f7136
WD
830 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
831 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
45ae7cff
WD
832 }
833
45ae7cff
WD
834 /* SCTLR */
835 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
836 if (stage1)
837 reg |= SCTLR_S1_ASIDPNE;
838#ifdef __BIG_ENDIAN
839 reg |= SCTLR_E;
840#endif
25724841 841 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
45ae7cff
WD
842}
843
844static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44680eed 845 struct arm_smmu_device *smmu)
45ae7cff 846{
a18037b2 847 int irq, start, ret = 0;
518f7136
WD
848 unsigned long ias, oas;
849 struct io_pgtable_ops *pgtbl_ops;
850 struct io_pgtable_cfg pgtbl_cfg;
851 enum io_pgtable_fmt fmt;
1d672638 852 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed 853 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
45ae7cff 854
518f7136 855 mutex_lock(&smmu_domain->init_mutex);
a18037b2
MH
856 if (smmu_domain->smmu)
857 goto out_unlock;
858
9800699c
RM
859 /* We're bypassing these SIDs, so don't allocate an actual context */
860 if (domain->type == IOMMU_DOMAIN_DMA) {
861 smmu_domain->smmu = smmu;
862 goto out_unlock;
863 }
864
c752ce45
WD
865 /*
866 * Mapping the requested stage onto what we support is surprisingly
867 * complicated, mainly because the spec allows S1+S2 SMMUs without
868 * support for nested translation. That means we end up with the
869 * following table:
870 *
871 * Requested Supported Actual
872 * S1 N S1
873 * S1 S1+S2 S1
874 * S1 S2 S2
875 * S1 S1 S1
876 * N N N
877 * N S1+S2 S2
878 * N S2 S2
879 * N S1 S1
880 *
881 * Note that you can't actually request stage-2 mappings.
882 */
883 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
884 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
885 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
886 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
887
7602b871
RM
888 /*
889 * Choosing a suitable context format is even more fiddly. Until we
890 * grow some way for the caller to express a preference, and/or move
891 * the decision into the io-pgtable code where it arguably belongs,
892 * just aim for the closest thing to the rest of the system, and hope
893 * that the hardware isn't esoteric enough that we can't assume AArch64
894 * support to be a superset of AArch32 support...
895 */
896 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
897 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
898 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
899 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
900 ARM_SMMU_FEAT_FMT_AARCH64_16K |
901 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
902 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
903
904 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
905 ret = -EINVAL;
906 goto out_unlock;
907 }
908
c752ce45
WD
909 switch (smmu_domain->stage) {
910 case ARM_SMMU_DOMAIN_S1:
911 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
912 start = smmu->num_s2_context_banks;
518f7136
WD
913 ias = smmu->va_size;
914 oas = smmu->ipa_size;
7602b871 915 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 916 fmt = ARM_64_LPAE_S1;
7602b871 917 } else {
518f7136 918 fmt = ARM_32_LPAE_S1;
7602b871
RM
919 ias = min(ias, 32UL);
920 oas = min(oas, 40UL);
921 }
c752ce45
WD
922 break;
923 case ARM_SMMU_DOMAIN_NESTED:
45ae7cff
WD
924 /*
925 * We will likely want to change this if/when KVM gets
926 * involved.
927 */
c752ce45 928 case ARM_SMMU_DOMAIN_S2:
9c5c92e3
WD
929 cfg->cbar = CBAR_TYPE_S2_TRANS;
930 start = 0;
518f7136
WD
931 ias = smmu->ipa_size;
932 oas = smmu->pa_size;
7602b871 933 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 934 fmt = ARM_64_LPAE_S2;
7602b871 935 } else {
518f7136 936 fmt = ARM_32_LPAE_S2;
7602b871
RM
937 ias = min(ias, 40UL);
938 oas = min(oas, 40UL);
939 }
c752ce45
WD
940 break;
941 default:
942 ret = -EINVAL;
943 goto out_unlock;
45ae7cff
WD
944 }
945
946 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
947 smmu->num_context_banks);
948 if (IS_ERR_VALUE(ret))
a18037b2 949 goto out_unlock;
45ae7cff 950
44680eed 951 cfg->cbndx = ret;
b7862e35 952 if (smmu->version < ARM_SMMU_V2) {
44680eed
WD
953 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
954 cfg->irptndx %= smmu->num_context_irqs;
45ae7cff 955 } else {
44680eed 956 cfg->irptndx = cfg->cbndx;
45ae7cff
WD
957 }
958
518f7136
WD
959 pgtbl_cfg = (struct io_pgtable_cfg) {
960 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
961 .ias = ias,
962 .oas = oas,
963 .tlb = &arm_smmu_gather_ops,
2df7a25c 964 .iommu_dev = smmu->dev,
518f7136
WD
965 };
966
967 smmu_domain->smmu = smmu;
968 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
969 if (!pgtbl_ops) {
970 ret = -ENOMEM;
971 goto out_clear_smmu;
972 }
973
974 /* Update our support page sizes to reflect the page table format */
975 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
a18037b2 976
518f7136
WD
977 /* Initialise the context bank with our page table cfg */
978 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
979
980 /*
981 * Request context fault interrupt. Do this last to avoid the
982 * handler seeing a half-initialised domain state.
983 */
44680eed 984 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
45ae7cff
WD
985 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
986 "arm-smmu-context-fault", domain);
987 if (IS_ERR_VALUE(ret)) {
988 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
44680eed
WD
989 cfg->irptndx, irq);
990 cfg->irptndx = INVALID_IRPTNDX;
45ae7cff
WD
991 }
992
518f7136
WD
993 mutex_unlock(&smmu_domain->init_mutex);
994
995 /* Publish page table ops for map/unmap */
996 smmu_domain->pgtbl_ops = pgtbl_ops;
a9a1b0b5 997 return 0;
45ae7cff 998
518f7136
WD
999out_clear_smmu:
1000 smmu_domain->smmu = NULL;
a18037b2 1001out_unlock:
518f7136 1002 mutex_unlock(&smmu_domain->init_mutex);
45ae7cff
WD
1003 return ret;
1004}
1005
1006static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1007{
1d672638 1008 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
1009 struct arm_smmu_device *smmu = smmu_domain->smmu;
1010 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1463fe44 1011 void __iomem *cb_base;
45ae7cff
WD
1012 int irq;
1013
9800699c 1014 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
45ae7cff
WD
1015 return;
1016
518f7136
WD
1017 /*
1018 * Disable the context bank and free the page tables before freeing
1019 * it.
1020 */
44680eed 1021 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1463fe44 1022 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1463fe44 1023
44680eed
WD
1024 if (cfg->irptndx != INVALID_IRPTNDX) {
1025 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
45ae7cff
WD
1026 free_irq(irq, domain);
1027 }
1028
44830b0c 1029 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44680eed 1030 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
45ae7cff
WD
1031}
1032
1d672638 1033static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
45ae7cff
WD
1034{
1035 struct arm_smmu_domain *smmu_domain;
45ae7cff 1036
9adb9594 1037 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1d672638 1038 return NULL;
45ae7cff
WD
1039 /*
1040 * Allocate the domain and initialise some of its data structures.
1041 * We can't really do anything meaningful until we've added a
1042 * master.
1043 */
1044 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1045 if (!smmu_domain)
1d672638 1046 return NULL;
45ae7cff 1047
9adb9594
RM
1048 if (type == IOMMU_DOMAIN_DMA &&
1049 iommu_get_dma_cookie(&smmu_domain->domain)) {
1050 kfree(smmu_domain);
1051 return NULL;
1052 }
1053
518f7136
WD
1054 mutex_init(&smmu_domain->init_mutex);
1055 spin_lock_init(&smmu_domain->pgtbl_lock);
1d672638
JR
1056
1057 return &smmu_domain->domain;
45ae7cff
WD
1058}
1059
1d672638 1060static void arm_smmu_domain_free(struct iommu_domain *domain)
45ae7cff 1061{
1d672638 1062 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1463fe44
WD
1063
1064 /*
1065 * Free the domain resources. We assume that all devices have
1066 * already been detached.
1067 */
9adb9594 1068 iommu_put_dma_cookie(domain);
45ae7cff 1069 arm_smmu_destroy_domain_context(domain);
45ae7cff
WD
1070 kfree(smmu_domain);
1071}
1072
1073static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
a9a1b0b5 1074 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1075{
1076 int i;
1077 struct arm_smmu_smr *smrs;
1078 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1079
1080 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1081 return 0;
1082
a9a1b0b5 1083 if (cfg->smrs)
45ae7cff
WD
1084 return -EEXIST;
1085
2907320d 1086 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
45ae7cff 1087 if (!smrs) {
a9a1b0b5
WD
1088 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1089 cfg->num_streamids);
45ae7cff
WD
1090 return -ENOMEM;
1091 }
1092
44680eed 1093 /* Allocate the SMRs on the SMMU */
a9a1b0b5 1094 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff
WD
1095 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1096 smmu->num_mapping_groups);
1097 if (IS_ERR_VALUE(idx)) {
1098 dev_err(smmu->dev, "failed to allocate free SMR\n");
1099 goto err_free_smrs;
1100 }
1101
1102 smrs[i] = (struct arm_smmu_smr) {
1103 .idx = idx,
1104 .mask = 0, /* We don't currently share SMRs */
a9a1b0b5 1105 .id = cfg->streamids[i],
45ae7cff
WD
1106 };
1107 }
1108
1109 /* It worked! Now, poke the actual hardware */
a9a1b0b5 1110 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff
WD
1111 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1112 smrs[i].mask << SMR_MASK_SHIFT;
1113 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1114 }
1115
a9a1b0b5 1116 cfg->smrs = smrs;
45ae7cff
WD
1117 return 0;
1118
1119err_free_smrs:
1120 while (--i >= 0)
1121 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1122 kfree(smrs);
1123 return -ENOSPC;
1124}
1125
1126static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
a9a1b0b5 1127 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1128{
1129 int i;
1130 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
a9a1b0b5 1131 struct arm_smmu_smr *smrs = cfg->smrs;
45ae7cff 1132
43b412be
WD
1133 if (!smrs)
1134 return;
1135
45ae7cff 1136 /* Invalidate the SMRs before freeing back to the allocator */
a9a1b0b5 1137 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff 1138 u8 idx = smrs[i].idx;
2907320d 1139
45ae7cff
WD
1140 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1141 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1142 }
1143
a9a1b0b5 1144 cfg->smrs = NULL;
45ae7cff
WD
1145 kfree(smrs);
1146}
1147
45ae7cff 1148static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1149 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1150{
1151 int i, ret;
44680eed 1152 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
1153 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1154
cbf8277e
WD
1155 /*
1156 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
5f634956
WD
1157 * for all devices behind the SMMU. Note that we need to take
1158 * care configuring SMRs for devices both a platform_device and
1159 * and a PCI device (i.e. a PCI host controller)
cbf8277e
WD
1160 */
1161 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1162 return 0;
1163
5f634956
WD
1164 /* Devices in an IOMMU group may already be configured */
1165 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1166 if (ret)
1167 return ret == -EEXIST ? 0 : ret;
1168
a9a1b0b5 1169 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff 1170 u32 idx, s2cr;
2907320d 1171
a9a1b0b5 1172 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
d346180e 1173 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
44680eed 1174 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
45ae7cff
WD
1175 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1176 }
1177
1178 return 0;
1179}
1180
1181static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1182 struct arm_smmu_master_cfg *cfg)
45ae7cff 1183{
43b412be 1184 int i;
44680eed 1185 struct arm_smmu_device *smmu = smmu_domain->smmu;
43b412be 1186 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
45ae7cff 1187
8f68f8e2
WD
1188 /* An IOMMU group is torn down by the first device to be removed */
1189 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1190 return;
45ae7cff
WD
1191
1192 /*
1193 * We *must* clear the S2CR first, because freeing the SMR means
1194 * that it can be re-allocated immediately.
1195 */
43b412be
WD
1196 for (i = 0; i < cfg->num_streamids; ++i) {
1197 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
25a1c96c 1198 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
43b412be 1199
25a1c96c 1200 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
43b412be
WD
1201 }
1202
a9a1b0b5 1203 arm_smmu_master_free_smrs(smmu, cfg);
45ae7cff
WD
1204}
1205
bc7f2ce0
WD
1206static void arm_smmu_detach_dev(struct device *dev,
1207 struct arm_smmu_master_cfg *cfg)
1208{
1209 struct iommu_domain *domain = dev->archdata.iommu;
1210 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1211
1212 dev->archdata.iommu = NULL;
1213 arm_smmu_domain_remove_master(smmu_domain, cfg);
1214}
1215
45ae7cff
WD
1216static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1217{
a18037b2 1218 int ret;
1d672638 1219 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1220 struct arm_smmu_device *smmu;
a9a1b0b5 1221 struct arm_smmu_master_cfg *cfg;
45ae7cff 1222
8f68f8e2 1223 smmu = find_smmu_for_device(dev);
44680eed 1224 if (!smmu) {
45ae7cff
WD
1225 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1226 return -ENXIO;
1227 }
1228
518f7136
WD
1229 /* Ensure that the domain is finalised */
1230 ret = arm_smmu_init_domain_context(domain, smmu);
1231 if (IS_ERR_VALUE(ret))
1232 return ret;
1233
45ae7cff 1234 /*
44680eed
WD
1235 * Sanity check the domain. We don't support domains across
1236 * different SMMUs.
45ae7cff 1237 */
518f7136 1238 if (smmu_domain->smmu != smmu) {
45ae7cff
WD
1239 dev_err(dev,
1240 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
a18037b2
MH
1241 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1242 return -EINVAL;
45ae7cff 1243 }
45ae7cff
WD
1244
1245 /* Looks ok, so add the device to the domain */
8f68f8e2 1246 cfg = find_smmu_master_cfg(dev);
a9a1b0b5 1247 if (!cfg)
45ae7cff
WD
1248 return -ENODEV;
1249
bc7f2ce0
WD
1250 /* Detach the dev from its current domain */
1251 if (dev->archdata.iommu)
1252 arm_smmu_detach_dev(dev, cfg);
1253
844e35bd
WD
1254 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1255 if (!ret)
1256 dev->archdata.iommu = domain;
45ae7cff
WD
1257 return ret;
1258}
1259
45ae7cff 1260static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
b410aed9 1261 phys_addr_t paddr, size_t size, int prot)
45ae7cff 1262{
518f7136
WD
1263 int ret;
1264 unsigned long flags;
1d672638 1265 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1266 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1267
518f7136 1268 if (!ops)
45ae7cff
WD
1269 return -ENODEV;
1270
518f7136
WD
1271 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1272 ret = ops->map(ops, iova, paddr, size, prot);
1273 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1274 return ret;
45ae7cff
WD
1275}
1276
1277static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1278 size_t size)
1279{
518f7136
WD
1280 size_t ret;
1281 unsigned long flags;
1d672638 1282 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1284
518f7136
WD
1285 if (!ops)
1286 return 0;
1287
1288 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1289 ret = ops->unmap(ops, iova, size);
1290 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1291 return ret;
45ae7cff
WD
1292}
1293
859a732e
MH
1294static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1295 dma_addr_t iova)
1296{
1d672638 1297 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
859a732e
MH
1298 struct arm_smmu_device *smmu = smmu_domain->smmu;
1299 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1300 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1301 struct device *dev = smmu->dev;
1302 void __iomem *cb_base;
1303 u32 tmp;
1304 u64 phys;
661d962f 1305 unsigned long va;
859a732e
MH
1306
1307 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1308
661d962f
RM
1309 /* ATS1 registers can only be written atomically */
1310 va = iova & ~0xfffUL;
661d962f 1311 if (smmu->version == ARM_SMMU_V2)
f9a05f05
RM
1312 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1313 else /* Register is only 32-bit in v1 */
661d962f 1314 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
859a732e
MH
1315
1316 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1317 !(tmp & ATSR_ACTIVE), 5, 50)) {
1318 dev_err(dev,
077124c9 1319 "iova to phys timed out on %pad. Falling back to software table walk.\n",
859a732e
MH
1320 &iova);
1321 return ops->iova_to_phys(ops, iova);
1322 }
1323
f9a05f05 1324 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
859a732e
MH
1325 if (phys & CB_PAR_F) {
1326 dev_err(dev, "translation fault!\n");
1327 dev_err(dev, "PAR = 0x%llx\n", phys);
1328 return 0;
1329 }
1330
1331 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1332}
1333
45ae7cff 1334static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
859a732e 1335 dma_addr_t iova)
45ae7cff 1336{
518f7136
WD
1337 phys_addr_t ret;
1338 unsigned long flags;
1d672638 1339 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1340 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1341
518f7136 1342 if (!ops)
a44a9791 1343 return 0;
45ae7cff 1344
518f7136 1345 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
83a60ed8
BR
1346 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1347 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
859a732e 1348 ret = arm_smmu_iova_to_phys_hard(domain, iova);
83a60ed8 1349 } else {
859a732e 1350 ret = ops->iova_to_phys(ops, iova);
83a60ed8
BR
1351 }
1352
518f7136 1353 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
859a732e 1354
518f7136 1355 return ret;
45ae7cff
WD
1356}
1357
1fd0c775 1358static bool arm_smmu_capable(enum iommu_cap cap)
45ae7cff 1359{
d0948945
WD
1360 switch (cap) {
1361 case IOMMU_CAP_CACHE_COHERENCY:
1fd0c775
JR
1362 /*
1363 * Return true here as the SMMU can always send out coherent
1364 * requests.
1365 */
1366 return true;
d0948945 1367 case IOMMU_CAP_INTR_REMAP:
1fd0c775 1368 return true; /* MSIs are just memory writes */
0029a8dd
AM
1369 case IOMMU_CAP_NOEXEC:
1370 return true;
d0948945 1371 default:
1fd0c775 1372 return false;
d0948945 1373 }
45ae7cff 1374}
45ae7cff 1375
a9a1b0b5
WD
1376static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1377{
1378 *((u16 *)data) = alias;
1379 return 0; /* Continue walking */
45ae7cff
WD
1380}
1381
8f68f8e2
WD
1382static void __arm_smmu_release_pci_iommudata(void *data)
1383{
1384 kfree(data);
1385}
1386
af659932
JR
1387static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1388 struct iommu_group *group)
45ae7cff 1389{
03edb226 1390 struct arm_smmu_master_cfg *cfg;
af659932
JR
1391 u16 sid;
1392 int i;
a9a1b0b5 1393
03edb226
WD
1394 cfg = iommu_group_get_iommudata(group);
1395 if (!cfg) {
a9a1b0b5 1396 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
af659932
JR
1397 if (!cfg)
1398 return -ENOMEM;
a9a1b0b5 1399
03edb226
WD
1400 iommu_group_set_iommudata(group, cfg,
1401 __arm_smmu_release_pci_iommudata);
1402 }
8f68f8e2 1403
af659932
JR
1404 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1405 return -ENOSPC;
a9a1b0b5 1406
03edb226
WD
1407 /*
1408 * Assume Stream ID == Requester ID for now.
1409 * We need a way to describe the ID mappings in FDT.
1410 */
1411 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1412 for (i = 0; i < cfg->num_streamids; ++i)
1413 if (cfg->streamids[i] == sid)
1414 break;
1415
1416 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1417 if (i == cfg->num_streamids)
1418 cfg->streamids[cfg->num_streamids++] = sid;
5fc63a7c 1419
03edb226 1420 return 0;
45ae7cff
WD
1421}
1422
af659932
JR
1423static int arm_smmu_init_platform_device(struct device *dev,
1424 struct iommu_group *group)
03edb226 1425{
03edb226 1426 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
af659932 1427 struct arm_smmu_master *master;
03edb226
WD
1428
1429 if (!smmu)
1430 return -ENODEV;
1431
1432 master = find_smmu_master(smmu, dev->of_node);
1433 if (!master)
1434 return -ENODEV;
1435
03edb226 1436 iommu_group_set_iommudata(group, &master->cfg, NULL);
af659932
JR
1437
1438 return 0;
03edb226
WD
1439}
1440
1441static int arm_smmu_add_device(struct device *dev)
1442{
af659932 1443 struct iommu_group *group;
03edb226 1444
af659932
JR
1445 group = iommu_group_get_for_dev(dev);
1446 if (IS_ERR(group))
1447 return PTR_ERR(group);
03edb226 1448
9a4a9d8c 1449 iommu_group_put(group);
af659932 1450 return 0;
03edb226
WD
1451}
1452
45ae7cff
WD
1453static void arm_smmu_remove_device(struct device *dev)
1454{
5fc63a7c 1455 iommu_group_remove_device(dev);
45ae7cff
WD
1456}
1457
af659932
JR
1458static struct iommu_group *arm_smmu_device_group(struct device *dev)
1459{
1460 struct iommu_group *group;
1461 int ret;
1462
1463 if (dev_is_pci(dev))
1464 group = pci_device_group(dev);
1465 else
1466 group = generic_device_group(dev);
1467
1468 if (IS_ERR(group))
1469 return group;
1470
1471 if (dev_is_pci(dev))
1472 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1473 else
1474 ret = arm_smmu_init_platform_device(dev, group);
1475
1476 if (ret) {
1477 iommu_group_put(group);
1478 group = ERR_PTR(ret);
1479 }
1480
1481 return group;
1482}
1483
c752ce45
WD
1484static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1485 enum iommu_attr attr, void *data)
1486{
1d672638 1487 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45
WD
1488
1489 switch (attr) {
1490 case DOMAIN_ATTR_NESTING:
1491 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1492 return 0;
1493 default:
1494 return -ENODEV;
1495 }
1496}
1497
1498static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1499 enum iommu_attr attr, void *data)
1500{
518f7136 1501 int ret = 0;
1d672638 1502 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45 1503
518f7136
WD
1504 mutex_lock(&smmu_domain->init_mutex);
1505
c752ce45
WD
1506 switch (attr) {
1507 case DOMAIN_ATTR_NESTING:
518f7136
WD
1508 if (smmu_domain->smmu) {
1509 ret = -EPERM;
1510 goto out_unlock;
1511 }
1512
c752ce45
WD
1513 if (*(int *)data)
1514 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1515 else
1516 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1517
518f7136 1518 break;
c752ce45 1519 default:
518f7136 1520 ret = -ENODEV;
c752ce45 1521 }
518f7136
WD
1522
1523out_unlock:
1524 mutex_unlock(&smmu_domain->init_mutex);
1525 return ret;
c752ce45
WD
1526}
1527
518f7136 1528static struct iommu_ops arm_smmu_ops = {
c752ce45 1529 .capable = arm_smmu_capable,
1d672638
JR
1530 .domain_alloc = arm_smmu_domain_alloc,
1531 .domain_free = arm_smmu_domain_free,
c752ce45 1532 .attach_dev = arm_smmu_attach_dev,
c752ce45
WD
1533 .map = arm_smmu_map,
1534 .unmap = arm_smmu_unmap,
76771c93 1535 .map_sg = default_iommu_map_sg,
c752ce45
WD
1536 .iova_to_phys = arm_smmu_iova_to_phys,
1537 .add_device = arm_smmu_add_device,
1538 .remove_device = arm_smmu_remove_device,
af659932 1539 .device_group = arm_smmu_device_group,
c752ce45
WD
1540 .domain_get_attr = arm_smmu_domain_get_attr,
1541 .domain_set_attr = arm_smmu_domain_set_attr,
518f7136 1542 .pgsize_bitmap = -1UL, /* Restricted during device attach */
45ae7cff
WD
1543};
1544
1545static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1546{
1547 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
659db6f6 1548 void __iomem *cb_base;
45ae7cff 1549 int i = 0;
3ca3712a 1550 u32 reg, major;
659db6f6 1551
3a5df8ff
AH
1552 /* clear global FSR */
1553 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1554 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
45ae7cff 1555
25a1c96c
RM
1556 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1557 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
45ae7cff 1558 for (i = 0; i < smmu->num_mapping_groups; ++i) {
3c8766d0 1559 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
25a1c96c 1560 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
45ae7cff
WD
1561 }
1562
3ca3712a
PF
1563 /*
1564 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1565 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1566 * bit is only present in MMU-500r2 onwards.
1567 */
1568 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1569 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1570 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1571 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1572 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1573 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1574 }
1575
659db6f6
AH
1576 /* Make sure all context banks are disabled and clear CB_FSR */
1577 for (i = 0; i < smmu->num_context_banks; ++i) {
1578 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1579 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1580 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
f0cfffc4
RM
1581 /*
1582 * Disable MMU-500's not-particularly-beneficial next-page
1583 * prefetcher for the sake of errata #841119 and #826419.
1584 */
1585 if (smmu->model == ARM_MMU500) {
1586 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1587 reg &= ~ARM_MMU500_ACTLR_CPRE;
1588 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1589 }
659db6f6 1590 }
1463fe44 1591
45ae7cff 1592 /* Invalidate the TLB, just in case */
45ae7cff
WD
1593 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1594 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1595
3a5df8ff 1596 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
659db6f6 1597
45ae7cff 1598 /* Enable fault reporting */
659db6f6 1599 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
45ae7cff
WD
1600
1601 /* Disable TLB broadcasting. */
659db6f6 1602 reg |= (sCR0_VMIDPNE | sCR0_PTM);
45ae7cff 1603
25a1c96c
RM
1604 /* Enable client access, handling unmatched streams as appropriate */
1605 reg &= ~sCR0_CLIENTPD;
1606 if (disable_bypass)
1607 reg |= sCR0_USFCFG;
1608 else
1609 reg &= ~sCR0_USFCFG;
45ae7cff
WD
1610
1611 /* Disable forced broadcasting */
659db6f6 1612 reg &= ~sCR0_FB;
45ae7cff
WD
1613
1614 /* Don't upgrade barriers */
659db6f6 1615 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
45ae7cff 1616
4e3e9b69
TC
1617 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1618 reg |= sCR0_VMID16EN;
1619
45ae7cff 1620 /* Push the button */
518f7136 1621 __arm_smmu_tlb_sync(smmu);
3a5df8ff 1622 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
1623}
1624
1625static int arm_smmu_id_size_to_bits(int size)
1626{
1627 switch (size) {
1628 case 0:
1629 return 32;
1630 case 1:
1631 return 36;
1632 case 2:
1633 return 40;
1634 case 3:
1635 return 42;
1636 case 4:
1637 return 44;
1638 case 5:
1639 default:
1640 return 48;
1641 }
1642}
1643
1644static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1645{
1646 unsigned long size;
1647 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1648 u32 id;
bae2c2d4 1649 bool cttw_dt, cttw_reg;
45ae7cff
WD
1650
1651 dev_notice(smmu->dev, "probing hardware configuration...\n");
b7862e35
RM
1652 dev_notice(smmu->dev, "SMMUv%d with:\n",
1653 smmu->version == ARM_SMMU_V2 ? 2 : 1);
45ae7cff
WD
1654
1655 /* ID0 */
1656 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
4cf740b0
WD
1657
1658 /* Restrict available stages based on module parameter */
1659 if (force_stage == 1)
1660 id &= ~(ID0_S2TS | ID0_NTS);
1661 else if (force_stage == 2)
1662 id &= ~(ID0_S1TS | ID0_NTS);
1663
45ae7cff
WD
1664 if (id & ID0_S1TS) {
1665 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1666 dev_notice(smmu->dev, "\tstage 1 translation\n");
1667 }
1668
1669 if (id & ID0_S2TS) {
1670 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1671 dev_notice(smmu->dev, "\tstage 2 translation\n");
1672 }
1673
1674 if (id & ID0_NTS) {
1675 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1676 dev_notice(smmu->dev, "\tnested translation\n");
1677 }
1678
1679 if (!(smmu->features &
4cf740b0 1680 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
45ae7cff
WD
1681 dev_err(smmu->dev, "\tno translation support!\n");
1682 return -ENODEV;
1683 }
1684
b7862e35
RM
1685 if ((id & ID0_S1TS) &&
1686 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
859a732e
MH
1687 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1688 dev_notice(smmu->dev, "\taddress translation ops\n");
1689 }
1690
bae2c2d4
RM
1691 /*
1692 * In order for DMA API calls to work properly, we must defer to what
1693 * the DT says about coherency, regardless of what the hardware claims.
1694 * Fortunately, this also opens up a workaround for systems where the
1695 * ID register value has ended up configured incorrectly.
1696 */
1697 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1698 cttw_reg = !!(id & ID0_CTTW);
1699 if (cttw_dt)
45ae7cff 1700 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
bae2c2d4
RM
1701 if (cttw_dt || cttw_reg)
1702 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1703 cttw_dt ? "" : "non-");
1704 if (cttw_dt != cttw_reg)
1705 dev_notice(smmu->dev,
1706 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
45ae7cff
WD
1707
1708 if (id & ID0_SMS) {
1709 u32 smr, sid, mask;
1710
1711 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1712 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1713 ID0_NUMSMRG_MASK;
1714 if (smmu->num_mapping_groups == 0) {
1715 dev_err(smmu->dev,
1716 "stream-matching supported, but no SMRs present!\n");
1717 return -ENODEV;
1718 }
1719
1720 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1721 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1722 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1723 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1724
1725 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1726 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1727 if ((mask & sid) != sid) {
1728 dev_err(smmu->dev,
1729 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1730 mask, sid);
1731 return -ENODEV;
1732 }
1733
1734 dev_notice(smmu->dev,
1735 "\tstream matching with %u register groups, mask 0x%x",
1736 smmu->num_mapping_groups, mask);
3c8766d0
OH
1737 } else {
1738 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1739 ID0_NUMSIDB_MASK;
45ae7cff
WD
1740 }
1741
7602b871
RM
1742 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1743 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1744 if (!(id & ID0_PTFS_NO_AARCH32S))
1745 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1746 }
1747
45ae7cff
WD
1748 /* ID1 */
1749 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
c757e852 1750 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
45ae7cff 1751
c55af7f7 1752 /* Check for size mismatch of SMMU address space from mapped region */
518f7136 1753 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
c757e852 1754 size *= 2 << smmu->pgshift;
c55af7f7 1755 if (smmu->size != size)
2907320d
MH
1756 dev_warn(smmu->dev,
1757 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1758 size, smmu->size);
45ae7cff 1759
518f7136 1760 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
45ae7cff
WD
1761 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1762 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1763 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1764 return -ENODEV;
1765 }
1766 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1767 smmu->num_context_banks, smmu->num_s2_context_banks);
e086d912
RM
1768 /*
1769 * Cavium CN88xx erratum #27704.
1770 * Ensure ASID and VMID allocation is unique across all SMMUs in
1771 * the system.
1772 */
1773 if (smmu->model == CAVIUM_SMMUV2) {
1774 smmu->cavium_id_base =
1775 atomic_add_return(smmu->num_context_banks,
1776 &cavium_smmu_context_count);
1777 smmu->cavium_id_base -= smmu->num_context_banks;
1778 }
45ae7cff
WD
1779
1780 /* ID2 */
1781 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1782 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
518f7136 1783 smmu->ipa_size = size;
45ae7cff 1784
518f7136 1785 /* The output mask is also applied for bypass */
45ae7cff 1786 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
518f7136 1787 smmu->pa_size = size;
45ae7cff 1788
4e3e9b69
TC
1789 if (id & ID2_VMID16)
1790 smmu->features |= ARM_SMMU_FEAT_VMID16;
1791
f1d84548
RM
1792 /*
1793 * What the page table walker can address actually depends on which
1794 * descriptor format is in use, but since a) we don't know that yet,
1795 * and b) it can vary per context bank, this will have to do...
1796 */
1797 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1798 dev_warn(smmu->dev,
1799 "failed to set DMA mask for table walker\n");
1800
b7862e35 1801 if (smmu->version < ARM_SMMU_V2) {
518f7136 1802 smmu->va_size = smmu->ipa_size;
b7862e35
RM
1803 if (smmu->version == ARM_SMMU_V1_64K)
1804 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
45ae7cff 1805 } else {
45ae7cff 1806 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
518f7136 1807 smmu->va_size = arm_smmu_id_size_to_bits(size);
518f7136 1808 if (id & ID2_PTFS_4K)
7602b871 1809 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
518f7136 1810 if (id & ID2_PTFS_16K)
7602b871 1811 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
518f7136 1812 if (id & ID2_PTFS_64K)
7602b871 1813 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
45ae7cff
WD
1814 }
1815
7602b871
RM
1816 /* Now we've corralled the various formats, what'll it do? */
1817 size = 0;
1818 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1819 size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1820 if (smmu->features &
1821 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1822 size |= SZ_4K | SZ_2M | SZ_1G;
1823 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1824 size |= SZ_16K | SZ_32M;
1825 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1826 size |= SZ_64K | SZ_512M;
1827
518f7136
WD
1828 arm_smmu_ops.pgsize_bitmap &= size;
1829 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1830
28d6007b
WD
1831 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1832 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
518f7136 1833 smmu->va_size, smmu->ipa_size);
28d6007b
WD
1834
1835 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1836 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
518f7136 1837 smmu->ipa_size, smmu->pa_size);
28d6007b 1838
45ae7cff
WD
1839 return 0;
1840}
1841
67b65a3f
RM
1842struct arm_smmu_match_data {
1843 enum arm_smmu_arch_version version;
1844 enum arm_smmu_implementation model;
1845};
1846
1847#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1848static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1849
1850ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1851ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
b7862e35 1852ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
f0cfffc4 1853ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
e086d912 1854ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
67b65a3f 1855
09b5269a 1856static const struct of_device_id arm_smmu_of_match[] = {
67b65a3f
RM
1857 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1858 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1859 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
b7862e35 1860 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
f0cfffc4 1861 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
e086d912 1862 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
09360403
RM
1863 { },
1864};
1865MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1866
45ae7cff
WD
1867static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1868{
09360403 1869 const struct of_device_id *of_id;
67b65a3f 1870 const struct arm_smmu_match_data *data;
45ae7cff
WD
1871 struct resource *res;
1872 struct arm_smmu_device *smmu;
45ae7cff
WD
1873 struct device *dev = &pdev->dev;
1874 struct rb_node *node;
1875 struct of_phandle_args masterspec;
1876 int num_irqs, i, err;
1877
1878 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1879 if (!smmu) {
1880 dev_err(dev, "failed to allocate arm_smmu_device\n");
1881 return -ENOMEM;
1882 }
1883 smmu->dev = dev;
1884
09360403 1885 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
67b65a3f
RM
1886 data = of_id->data;
1887 smmu->version = data->version;
1888 smmu->model = data->model;
09360403 1889
45ae7cff 1890 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8a7f4312
JL
1891 smmu->base = devm_ioremap_resource(dev, res);
1892 if (IS_ERR(smmu->base))
1893 return PTR_ERR(smmu->base);
45ae7cff 1894 smmu->size = resource_size(res);
45ae7cff
WD
1895
1896 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1897 &smmu->num_global_irqs)) {
1898 dev_err(dev, "missing #global-interrupts property\n");
1899 return -ENODEV;
1900 }
1901
1902 num_irqs = 0;
1903 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1904 num_irqs++;
1905 if (num_irqs > smmu->num_global_irqs)
1906 smmu->num_context_irqs++;
1907 }
1908
44a08de2
AH
1909 if (!smmu->num_context_irqs) {
1910 dev_err(dev, "found %d interrupts but expected at least %d\n",
1911 num_irqs, smmu->num_global_irqs + 1);
1912 return -ENODEV;
45ae7cff 1913 }
45ae7cff
WD
1914
1915 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1916 GFP_KERNEL);
1917 if (!smmu->irqs) {
1918 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1919 return -ENOMEM;
1920 }
1921
1922 for (i = 0; i < num_irqs; ++i) {
1923 int irq = platform_get_irq(pdev, i);
2907320d 1924
45ae7cff
WD
1925 if (irq < 0) {
1926 dev_err(dev, "failed to get irq index %d\n", i);
1927 return -ENODEV;
1928 }
1929 smmu->irqs[i] = irq;
1930 }
1931
3c8766d0
OH
1932 err = arm_smmu_device_cfg_probe(smmu);
1933 if (err)
1934 return err;
1935
45ae7cff
WD
1936 i = 0;
1937 smmu->masters = RB_ROOT;
1938 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1939 "#stream-id-cells", i,
1940 &masterspec)) {
1941 err = register_smmu_master(smmu, dev, &masterspec);
1942 if (err) {
1943 dev_err(dev, "failed to add master %s\n",
1944 masterspec.np->name);
1945 goto out_put_masters;
1946 }
1947
1948 i++;
1949 }
1950 dev_notice(dev, "registered %d master devices\n", i);
1951
3a5df8ff
AH
1952 parse_driver_options(smmu);
1953
b7862e35 1954 if (smmu->version == ARM_SMMU_V2 &&
45ae7cff
WD
1955 smmu->num_context_banks != smmu->num_context_irqs) {
1956 dev_err(dev,
1957 "found only %d context interrupt(s) but %d required\n",
1958 smmu->num_context_irqs, smmu->num_context_banks);
89a23cde 1959 err = -ENODEV;
44680eed 1960 goto out_put_masters;
45ae7cff
WD
1961 }
1962
45ae7cff
WD
1963 for (i = 0; i < smmu->num_global_irqs; ++i) {
1964 err = request_irq(smmu->irqs[i],
1965 arm_smmu_global_fault,
1966 IRQF_SHARED,
1967 "arm-smmu global fault",
1968 smmu);
1969 if (err) {
1970 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1971 i, smmu->irqs[i]);
1972 goto out_free_irqs;
1973 }
1974 }
1975
1976 INIT_LIST_HEAD(&smmu->list);
1977 spin_lock(&arm_smmu_devices_lock);
1978 list_add(&smmu->list, &arm_smmu_devices);
1979 spin_unlock(&arm_smmu_devices_lock);
fd90cecb
WD
1980
1981 arm_smmu_device_reset(smmu);
45ae7cff
WD
1982 return 0;
1983
1984out_free_irqs:
1985 while (i--)
1986 free_irq(smmu->irqs[i], smmu);
1987
45ae7cff
WD
1988out_put_masters:
1989 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
1990 struct arm_smmu_master *master
1991 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
1992 of_node_put(master->of_node);
1993 }
1994
1995 return err;
1996}
1997
1998static int arm_smmu_device_remove(struct platform_device *pdev)
1999{
2000 int i;
2001 struct device *dev = &pdev->dev;
2002 struct arm_smmu_device *curr, *smmu = NULL;
2003 struct rb_node *node;
2004
2005 spin_lock(&arm_smmu_devices_lock);
2006 list_for_each_entry(curr, &arm_smmu_devices, list) {
2007 if (curr->dev == dev) {
2008 smmu = curr;
2009 list_del(&smmu->list);
2010 break;
2011 }
2012 }
2013 spin_unlock(&arm_smmu_devices_lock);
2014
2015 if (!smmu)
2016 return -ENODEV;
2017
45ae7cff 2018 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
2019 struct arm_smmu_master *master
2020 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
2021 of_node_put(master->of_node);
2022 }
2023
ecfadb6e 2024 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
45ae7cff
WD
2025 dev_err(dev, "removing device with active domains!\n");
2026
2027 for (i = 0; i < smmu->num_global_irqs; ++i)
2028 free_irq(smmu->irqs[i], smmu);
2029
2030 /* Turn the thing off */
2907320d 2031 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
2032 return 0;
2033}
2034
45ae7cff
WD
2035static struct platform_driver arm_smmu_driver = {
2036 .driver = {
45ae7cff
WD
2037 .name = "arm-smmu",
2038 .of_match_table = of_match_ptr(arm_smmu_of_match),
2039 },
2040 .probe = arm_smmu_device_dt_probe,
2041 .remove = arm_smmu_device_remove,
2042};
2043
2044static int __init arm_smmu_init(void)
2045{
0e7d37ad 2046 struct device_node *np;
45ae7cff
WD
2047 int ret;
2048
0e7d37ad
TR
2049 /*
2050 * Play nice with systems that don't have an ARM SMMU by checking that
2051 * an ARM SMMU exists in the system before proceeding with the driver
2052 * and IOMMU bus operation registration.
2053 */
2054 np = of_find_matching_node(NULL, arm_smmu_of_match);
2055 if (!np)
2056 return 0;
2057
2058 of_node_put(np);
2059
45ae7cff
WD
2060 ret = platform_driver_register(&arm_smmu_driver);
2061 if (ret)
2062 return ret;
2063
2064 /* Oh, for a proper bus abstraction */
6614ee77 2065 if (!iommu_present(&platform_bus_type))
45ae7cff
WD
2066 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2067
d123cf82 2068#ifdef CONFIG_ARM_AMBA
6614ee77 2069 if (!iommu_present(&amba_bustype))
45ae7cff 2070 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
d123cf82 2071#endif
45ae7cff 2072
a9a1b0b5
WD
2073#ifdef CONFIG_PCI
2074 if (!iommu_present(&pci_bus_type))
2075 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2076#endif
2077
45ae7cff
WD
2078 return 0;
2079}
2080
2081static void __exit arm_smmu_exit(void)
2082{
2083 return platform_driver_unregister(&arm_smmu_driver);
2084}
2085
b1950b27 2086subsys_initcall(arm_smmu_init);
45ae7cff
WD
2087module_exit(arm_smmu_exit);
2088
2089MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2090MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2091MODULE_LICENSE("GPL v2");