]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/iommu/qcom_iommu.c
gpu: host1x: Use SMMU on Tegra124 and Tegra210
[thirdparty/linux.git] / drivers / iommu / qcom_iommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
4 *
5 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
7 */
8
9 #include <linux/atomic.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-64-nonatomic-hi-lo.h>
19 #include <linux/io-pgtable.h>
20 #include <linux/iommu.h>
21 #include <linux/iopoll.h>
22 #include <linux/kconfig.h>
23 #include <linux/init.h>
24 #include <linux/mutex.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_iommu.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/qcom_scm.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35
36 #include "arm-smmu.h"
37
38 #define SMMU_INTR_SEL_NS 0x2000
39
40 struct qcom_iommu_ctx;
41
42 struct qcom_iommu_dev {
43 /* IOMMU core code handle */
44 struct iommu_device iommu;
45 struct device *dev;
46 struct clk *iface_clk;
47 struct clk *bus_clk;
48 void __iomem *local_base;
49 u32 sec_id;
50 u8 num_ctxs;
51 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
52 };
53
54 struct qcom_iommu_ctx {
55 struct device *dev;
56 void __iomem *base;
57 bool secure_init;
58 u8 asid; /* asid and ctx bank # are 1:1 */
59 struct iommu_domain *domain;
60 };
61
62 struct qcom_iommu_domain {
63 struct io_pgtable_ops *pgtbl_ops;
64 spinlock_t pgtbl_lock;
65 struct mutex init_mutex; /* Protects iommu pointer */
66 struct iommu_domain domain;
67 struct qcom_iommu_dev *iommu;
68 };
69
70 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
71 {
72 return container_of(dom, struct qcom_iommu_domain, domain);
73 }
74
75 static const struct iommu_ops qcom_iommu_ops;
76
77 static struct qcom_iommu_dev * to_iommu(struct device *dev)
78 {
79 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
80
81 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
82 return NULL;
83
84 return dev_iommu_priv_get(dev);
85 }
86
87 static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
88 {
89 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
90 if (!qcom_iommu)
91 return NULL;
92 return qcom_iommu->ctxs[asid - 1];
93 }
94
95 static inline void
96 iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
97 {
98 writel_relaxed(val, ctx->base + reg);
99 }
100
101 static inline void
102 iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
103 {
104 writeq_relaxed(val, ctx->base + reg);
105 }
106
107 static inline u32
108 iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
109 {
110 return readl_relaxed(ctx->base + reg);
111 }
112
113 static inline u64
114 iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
115 {
116 return readq_relaxed(ctx->base + reg);
117 }
118
119 static void qcom_iommu_tlb_sync(void *cookie)
120 {
121 struct iommu_fwspec *fwspec;
122 struct device *dev = cookie;
123 unsigned i;
124
125 fwspec = dev_iommu_fwspec_get(dev);
126
127 for (i = 0; i < fwspec->num_ids; i++) {
128 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
129 unsigned int val, ret;
130
131 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
132
133 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
134 (val & 0x1) == 0, 0, 5000000);
135 if (ret)
136 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
137 }
138 }
139
140 static void qcom_iommu_tlb_inv_context(void *cookie)
141 {
142 struct device *dev = cookie;
143 struct iommu_fwspec *fwspec;
144 unsigned i;
145
146 fwspec = dev_iommu_fwspec_get(dev);
147
148 for (i = 0; i < fwspec->num_ids; i++) {
149 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
150 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
151 }
152
153 qcom_iommu_tlb_sync(cookie);
154 }
155
156 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
157 size_t granule, bool leaf, void *cookie)
158 {
159 struct device *dev = cookie;
160 struct iommu_fwspec *fwspec;
161 unsigned i, reg;
162
163 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
164
165 fwspec = dev_iommu_fwspec_get(dev);
166
167 for (i = 0; i < fwspec->num_ids; i++) {
168 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
169 size_t s = size;
170
171 iova = (iova >> 12) << 12;
172 iova |= ctx->asid;
173 do {
174 iommu_writel(ctx, reg, iova);
175 iova += granule;
176 } while (s -= granule);
177 }
178 }
179
180 static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
181 size_t granule, void *cookie)
182 {
183 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
184 qcom_iommu_tlb_sync(cookie);
185 }
186
187 static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
188 size_t granule, void *cookie)
189 {
190 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
191 qcom_iommu_tlb_sync(cookie);
192 }
193
194 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
195 unsigned long iova, size_t granule,
196 void *cookie)
197 {
198 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
199 }
200
201 static const struct iommu_flush_ops qcom_flush_ops = {
202 .tlb_flush_all = qcom_iommu_tlb_inv_context,
203 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
204 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
205 .tlb_add_page = qcom_iommu_tlb_add_page,
206 };
207
208 static irqreturn_t qcom_iommu_fault(int irq, void *dev)
209 {
210 struct qcom_iommu_ctx *ctx = dev;
211 u32 fsr, fsynr;
212 u64 iova;
213
214 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
215
216 if (!(fsr & ARM_SMMU_FSR_FAULT))
217 return IRQ_NONE;
218
219 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
220 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
221
222 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
223 dev_err_ratelimited(ctx->dev,
224 "Unhandled context fault: fsr=0x%x, "
225 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
226 fsr, iova, fsynr, ctx->asid);
227 }
228
229 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
230 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
231
232 return IRQ_HANDLED;
233 }
234
235 static int qcom_iommu_init_domain(struct iommu_domain *domain,
236 struct qcom_iommu_dev *qcom_iommu,
237 struct device *dev)
238 {
239 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
240 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
241 struct io_pgtable_ops *pgtbl_ops;
242 struct io_pgtable_cfg pgtbl_cfg;
243 int i, ret = 0;
244 u32 reg;
245
246 mutex_lock(&qcom_domain->init_mutex);
247 if (qcom_domain->iommu)
248 goto out_unlock;
249
250 pgtbl_cfg = (struct io_pgtable_cfg) {
251 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
252 .ias = 32,
253 .oas = 40,
254 .tlb = &qcom_flush_ops,
255 .iommu_dev = qcom_iommu->dev,
256 };
257
258 qcom_domain->iommu = qcom_iommu;
259 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
260 if (!pgtbl_ops) {
261 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
262 ret = -ENOMEM;
263 goto out_clear_iommu;
264 }
265
266 /* Update the domain's page sizes to reflect the page table format */
267 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
268 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
269 domain->geometry.force_aperture = true;
270
271 for (i = 0; i < fwspec->num_ids; i++) {
272 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
273
274 if (!ctx->secure_init) {
275 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
276 if (ret) {
277 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
278 goto out_clear_iommu;
279 }
280 ctx->secure_init = true;
281 }
282
283 /* TTBRs */
284 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
285 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
286 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
287 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
288
289 /* TCR */
290 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
291 arm_smmu_lpae_tcr2(&pgtbl_cfg));
292 iommu_writel(ctx, ARM_SMMU_CB_TCR,
293 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
294
295 /* MAIRs (stage-1 only) */
296 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
297 pgtbl_cfg.arm_lpae_s1_cfg.mair);
298 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
299 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
300
301 /* SCTLR */
302 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
303 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
304 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
305 ARM_SMMU_SCTLR_CFCFG;
306
307 if (IS_ENABLED(CONFIG_BIG_ENDIAN))
308 reg |= ARM_SMMU_SCTLR_E;
309
310 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
311
312 ctx->domain = domain;
313 }
314
315 mutex_unlock(&qcom_domain->init_mutex);
316
317 /* Publish page table ops for map/unmap */
318 qcom_domain->pgtbl_ops = pgtbl_ops;
319
320 return 0;
321
322 out_clear_iommu:
323 qcom_domain->iommu = NULL;
324 out_unlock:
325 mutex_unlock(&qcom_domain->init_mutex);
326 return ret;
327 }
328
329 static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
330 {
331 struct qcom_iommu_domain *qcom_domain;
332
333 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
334 return NULL;
335 /*
336 * Allocate the domain and initialise some of its data structures.
337 * We can't really do anything meaningful until we've added a
338 * master.
339 */
340 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
341 if (!qcom_domain)
342 return NULL;
343
344 if (type == IOMMU_DOMAIN_DMA &&
345 iommu_get_dma_cookie(&qcom_domain->domain)) {
346 kfree(qcom_domain);
347 return NULL;
348 }
349
350 mutex_init(&qcom_domain->init_mutex);
351 spin_lock_init(&qcom_domain->pgtbl_lock);
352
353 return &qcom_domain->domain;
354 }
355
356 static void qcom_iommu_domain_free(struct iommu_domain *domain)
357 {
358 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
359
360 iommu_put_dma_cookie(domain);
361
362 if (qcom_domain->iommu) {
363 /*
364 * NOTE: unmap can be called after client device is powered
365 * off, for example, with GPUs or anything involving dma-buf.
366 * So we cannot rely on the device_link. Make sure the IOMMU
367 * is on to avoid unclocked accesses in the TLB inv path:
368 */
369 pm_runtime_get_sync(qcom_domain->iommu->dev);
370 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
371 pm_runtime_put_sync(qcom_domain->iommu->dev);
372 }
373
374 kfree(qcom_domain);
375 }
376
377 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
378 {
379 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
380 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
381 int ret;
382
383 if (!qcom_iommu) {
384 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
385 return -ENXIO;
386 }
387
388 /* Ensure that the domain is finalized */
389 pm_runtime_get_sync(qcom_iommu->dev);
390 ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
391 pm_runtime_put_sync(qcom_iommu->dev);
392 if (ret < 0)
393 return ret;
394
395 /*
396 * Sanity check the domain. We don't support domains across
397 * different IOMMUs.
398 */
399 if (qcom_domain->iommu != qcom_iommu) {
400 dev_err(dev, "cannot attach to IOMMU %s while already "
401 "attached to domain on IOMMU %s\n",
402 dev_name(qcom_domain->iommu->dev),
403 dev_name(qcom_iommu->dev));
404 return -EINVAL;
405 }
406
407 return 0;
408 }
409
410 static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
411 {
412 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
413 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
414 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
415 unsigned i;
416
417 if (WARN_ON(!qcom_domain->iommu))
418 return;
419
420 pm_runtime_get_sync(qcom_iommu->dev);
421 for (i = 0; i < fwspec->num_ids; i++) {
422 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
423
424 /* Disable the context bank: */
425 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
426
427 ctx->domain = NULL;
428 }
429 pm_runtime_put_sync(qcom_iommu->dev);
430 }
431
432 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
433 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
434 {
435 int ret;
436 unsigned long flags;
437 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
438 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
439
440 if (!ops)
441 return -ENODEV;
442
443 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
444 ret = ops->map(ops, iova, paddr, size, prot);
445 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
446 return ret;
447 }
448
449 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
450 size_t size, struct iommu_iotlb_gather *gather)
451 {
452 size_t ret;
453 unsigned long flags;
454 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
455 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
456
457 if (!ops)
458 return 0;
459
460 /* NOTE: unmap can be called after client device is powered off,
461 * for example, with GPUs or anything involving dma-buf. So we
462 * cannot rely on the device_link. Make sure the IOMMU is on to
463 * avoid unclocked accesses in the TLB inv path:
464 */
465 pm_runtime_get_sync(qcom_domain->iommu->dev);
466 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
467 ret = ops->unmap(ops, iova, size, gather);
468 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
469 pm_runtime_put_sync(qcom_domain->iommu->dev);
470
471 return ret;
472 }
473
474 static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
475 {
476 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
477 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
478 struct io_pgtable, ops);
479 if (!qcom_domain->pgtbl_ops)
480 return;
481
482 pm_runtime_get_sync(qcom_domain->iommu->dev);
483 qcom_iommu_tlb_sync(pgtable->cookie);
484 pm_runtime_put_sync(qcom_domain->iommu->dev);
485 }
486
487 static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
488 struct iommu_iotlb_gather *gather)
489 {
490 qcom_iommu_flush_iotlb_all(domain);
491 }
492
493 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
494 dma_addr_t iova)
495 {
496 phys_addr_t ret;
497 unsigned long flags;
498 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
499 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
500
501 if (!ops)
502 return 0;
503
504 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
505 ret = ops->iova_to_phys(ops, iova);
506 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
507
508 return ret;
509 }
510
511 static bool qcom_iommu_capable(enum iommu_cap cap)
512 {
513 switch (cap) {
514 case IOMMU_CAP_CACHE_COHERENCY:
515 /*
516 * Return true here as the SMMU can always send out coherent
517 * requests.
518 */
519 return true;
520 case IOMMU_CAP_NOEXEC:
521 return true;
522 default:
523 return false;
524 }
525 }
526
527 static int qcom_iommu_add_device(struct device *dev)
528 {
529 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
530 struct iommu_group *group;
531 struct device_link *link;
532
533 if (!qcom_iommu)
534 return -ENODEV;
535
536 /*
537 * Establish the link between iommu and master, so that the
538 * iommu gets runtime enabled/disabled as per the master's
539 * needs.
540 */
541 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
542 if (!link) {
543 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
544 dev_name(qcom_iommu->dev), dev_name(dev));
545 return -ENODEV;
546 }
547
548 group = iommu_group_get_for_dev(dev);
549 if (IS_ERR(group))
550 return PTR_ERR(group);
551
552 iommu_group_put(group);
553 iommu_device_link(&qcom_iommu->iommu, dev);
554
555 return 0;
556 }
557
558 static void qcom_iommu_remove_device(struct device *dev)
559 {
560 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
561
562 if (!qcom_iommu)
563 return;
564
565 iommu_device_unlink(&qcom_iommu->iommu, dev);
566 iommu_group_remove_device(dev);
567 iommu_fwspec_free(dev);
568 }
569
570 static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
571 {
572 struct qcom_iommu_dev *qcom_iommu;
573 struct platform_device *iommu_pdev;
574 unsigned asid = args->args[0];
575
576 if (args->args_count != 1) {
577 dev_err(dev, "incorrect number of iommu params found for %s "
578 "(found %d, expected 1)\n",
579 args->np->full_name, args->args_count);
580 return -EINVAL;
581 }
582
583 iommu_pdev = of_find_device_by_node(args->np);
584 if (WARN_ON(!iommu_pdev))
585 return -EINVAL;
586
587 qcom_iommu = platform_get_drvdata(iommu_pdev);
588
589 /* make sure the asid specified in dt is valid, so we don't have
590 * to sanity check this elsewhere, since 'asid - 1' is used to
591 * index into qcom_iommu->ctxs:
592 */
593 if (WARN_ON(asid < 1) ||
594 WARN_ON(asid > qcom_iommu->num_ctxs))
595 return -EINVAL;
596
597 if (!dev_iommu_priv_get(dev)) {
598 dev_iommu_priv_set(dev, qcom_iommu);
599 } else {
600 /* make sure devices iommus dt node isn't referring to
601 * multiple different iommu devices. Multiple context
602 * banks are ok, but multiple devices are not:
603 */
604 if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
605 return -EINVAL;
606 }
607
608 return iommu_fwspec_add_ids(dev, &asid, 1);
609 }
610
611 static const struct iommu_ops qcom_iommu_ops = {
612 .capable = qcom_iommu_capable,
613 .domain_alloc = qcom_iommu_domain_alloc,
614 .domain_free = qcom_iommu_domain_free,
615 .attach_dev = qcom_iommu_attach_dev,
616 .detach_dev = qcom_iommu_detach_dev,
617 .map = qcom_iommu_map,
618 .unmap = qcom_iommu_unmap,
619 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
620 .iotlb_sync = qcom_iommu_iotlb_sync,
621 .iova_to_phys = qcom_iommu_iova_to_phys,
622 .add_device = qcom_iommu_add_device,
623 .remove_device = qcom_iommu_remove_device,
624 .device_group = generic_device_group,
625 .of_xlate = qcom_iommu_of_xlate,
626 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
627 };
628
629 static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
630 {
631 int ret;
632
633 ret = clk_prepare_enable(qcom_iommu->iface_clk);
634 if (ret) {
635 dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
636 return ret;
637 }
638
639 ret = clk_prepare_enable(qcom_iommu->bus_clk);
640 if (ret) {
641 dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
642 clk_disable_unprepare(qcom_iommu->iface_clk);
643 return ret;
644 }
645
646 return 0;
647 }
648
649 static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
650 {
651 clk_disable_unprepare(qcom_iommu->bus_clk);
652 clk_disable_unprepare(qcom_iommu->iface_clk);
653 }
654
655 static int qcom_iommu_sec_ptbl_init(struct device *dev)
656 {
657 size_t psize = 0;
658 unsigned int spare = 0;
659 void *cpu_addr;
660 dma_addr_t paddr;
661 unsigned long attrs;
662 static bool allocated = false;
663 int ret;
664
665 if (allocated)
666 return 0;
667
668 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
669 if (ret) {
670 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
671 ret);
672 return ret;
673 }
674
675 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
676
677 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
678
679 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
680 if (!cpu_addr) {
681 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
682 psize);
683 return -ENOMEM;
684 }
685
686 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
687 if (ret) {
688 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
689 goto free_mem;
690 }
691
692 allocated = true;
693 return 0;
694
695 free_mem:
696 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
697 return ret;
698 }
699
700 static int get_asid(const struct device_node *np)
701 {
702 u32 reg;
703
704 /* read the "reg" property directly to get the relative address
705 * of the context bank, and calculate the asid from that:
706 */
707 if (of_property_read_u32_index(np, "reg", 0, &reg))
708 return -ENODEV;
709
710 return reg / 0x1000; /* context banks are 0x1000 apart */
711 }
712
713 static int qcom_iommu_ctx_probe(struct platform_device *pdev)
714 {
715 struct qcom_iommu_ctx *ctx;
716 struct device *dev = &pdev->dev;
717 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
718 struct resource *res;
719 int ret, irq;
720
721 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
722 if (!ctx)
723 return -ENOMEM;
724
725 ctx->dev = dev;
726 platform_set_drvdata(pdev, ctx);
727
728 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729 ctx->base = devm_ioremap_resource(dev, res);
730 if (IS_ERR(ctx->base))
731 return PTR_ERR(ctx->base);
732
733 irq = platform_get_irq(pdev, 0);
734 if (irq < 0)
735 return -ENODEV;
736
737 /* clear IRQs before registering fault handler, just in case the
738 * boot-loader left us a surprise:
739 */
740 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
741
742 ret = devm_request_irq(dev, irq,
743 qcom_iommu_fault,
744 IRQF_SHARED,
745 "qcom-iommu-fault",
746 ctx);
747 if (ret) {
748 dev_err(dev, "failed to request IRQ %u\n", irq);
749 return ret;
750 }
751
752 ret = get_asid(dev->of_node);
753 if (ret < 0) {
754 dev_err(dev, "missing reg property\n");
755 return ret;
756 }
757
758 ctx->asid = ret;
759
760 dev_dbg(dev, "found asid %u\n", ctx->asid);
761
762 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
763
764 return 0;
765 }
766
767 static int qcom_iommu_ctx_remove(struct platform_device *pdev)
768 {
769 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
770 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
771
772 platform_set_drvdata(pdev, NULL);
773
774 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
775
776 return 0;
777 }
778
779 static const struct of_device_id ctx_of_match[] = {
780 { .compatible = "qcom,msm-iommu-v1-ns" },
781 { .compatible = "qcom,msm-iommu-v1-sec" },
782 { /* sentinel */ }
783 };
784
785 static struct platform_driver qcom_iommu_ctx_driver = {
786 .driver = {
787 .name = "qcom-iommu-ctx",
788 .of_match_table = of_match_ptr(ctx_of_match),
789 },
790 .probe = qcom_iommu_ctx_probe,
791 .remove = qcom_iommu_ctx_remove,
792 };
793
794 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
795 {
796 struct device_node *child;
797
798 for_each_child_of_node(qcom_iommu->dev->of_node, child)
799 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
800 return true;
801
802 return false;
803 }
804
805 static int qcom_iommu_device_probe(struct platform_device *pdev)
806 {
807 struct device_node *child;
808 struct qcom_iommu_dev *qcom_iommu;
809 struct device *dev = &pdev->dev;
810 struct resource *res;
811 int ret, max_asid = 0;
812
813 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
814 * many child ctx devices we have:
815 */
816 for_each_child_of_node(dev->of_node, child)
817 max_asid = max(max_asid, get_asid(child));
818
819 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
820 GFP_KERNEL);
821 if (!qcom_iommu)
822 return -ENOMEM;
823 qcom_iommu->num_ctxs = max_asid;
824 qcom_iommu->dev = dev;
825
826 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
827 if (res)
828 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
829
830 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
831 if (IS_ERR(qcom_iommu->iface_clk)) {
832 dev_err(dev, "failed to get iface clock\n");
833 return PTR_ERR(qcom_iommu->iface_clk);
834 }
835
836 qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
837 if (IS_ERR(qcom_iommu->bus_clk)) {
838 dev_err(dev, "failed to get bus clock\n");
839 return PTR_ERR(qcom_iommu->bus_clk);
840 }
841
842 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
843 &qcom_iommu->sec_id)) {
844 dev_err(dev, "missing qcom,iommu-secure-id property\n");
845 return -ENODEV;
846 }
847
848 if (qcom_iommu_has_secure_context(qcom_iommu)) {
849 ret = qcom_iommu_sec_ptbl_init(dev);
850 if (ret) {
851 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
852 return ret;
853 }
854 }
855
856 platform_set_drvdata(pdev, qcom_iommu);
857
858 pm_runtime_enable(dev);
859
860 /* register context bank devices, which are child nodes: */
861 ret = devm_of_platform_populate(dev);
862 if (ret) {
863 dev_err(dev, "Failed to populate iommu contexts\n");
864 return ret;
865 }
866
867 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
868 dev_name(dev));
869 if (ret) {
870 dev_err(dev, "Failed to register iommu in sysfs\n");
871 return ret;
872 }
873
874 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
875 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
876
877 ret = iommu_device_register(&qcom_iommu->iommu);
878 if (ret) {
879 dev_err(dev, "Failed to register iommu\n");
880 return ret;
881 }
882
883 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
884
885 if (qcom_iommu->local_base) {
886 pm_runtime_get_sync(dev);
887 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
888 pm_runtime_put_sync(dev);
889 }
890
891 return 0;
892 }
893
894 static int qcom_iommu_device_remove(struct platform_device *pdev)
895 {
896 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
897
898 bus_set_iommu(&platform_bus_type, NULL);
899
900 pm_runtime_force_suspend(&pdev->dev);
901 platform_set_drvdata(pdev, NULL);
902 iommu_device_sysfs_remove(&qcom_iommu->iommu);
903 iommu_device_unregister(&qcom_iommu->iommu);
904
905 return 0;
906 }
907
908 static int __maybe_unused qcom_iommu_resume(struct device *dev)
909 {
910 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
911
912 return qcom_iommu_enable_clocks(qcom_iommu);
913 }
914
915 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
916 {
917 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
918
919 qcom_iommu_disable_clocks(qcom_iommu);
920
921 return 0;
922 }
923
924 static const struct dev_pm_ops qcom_iommu_pm_ops = {
925 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
926 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
927 pm_runtime_force_resume)
928 };
929
930 static const struct of_device_id qcom_iommu_of_match[] = {
931 { .compatible = "qcom,msm-iommu-v1" },
932 { /* sentinel */ }
933 };
934
935 static struct platform_driver qcom_iommu_driver = {
936 .driver = {
937 .name = "qcom-iommu",
938 .of_match_table = of_match_ptr(qcom_iommu_of_match),
939 .pm = &qcom_iommu_pm_ops,
940 },
941 .probe = qcom_iommu_device_probe,
942 .remove = qcom_iommu_device_remove,
943 };
944
945 static int __init qcom_iommu_init(void)
946 {
947 int ret;
948
949 ret = platform_driver_register(&qcom_iommu_ctx_driver);
950 if (ret)
951 return ret;
952
953 ret = platform_driver_register(&qcom_iommu_driver);
954 if (ret)
955 platform_driver_unregister(&qcom_iommu_ctx_driver);
956
957 return ret;
958 }
959 device_initcall(qcom_iommu_init);