]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - include/linux/iommu.h
Merge branches 'iommu/fixes', 'arm/tegra', 'arm/smmu', 'virtio', 'x86/vt-d', 'x86...
[thirdparty/kernel/linux.git] / include / linux / iommu.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <uapi/linux/iommu.h>
17
18 #define IOMMU_READ (1 << 0)
19 #define IOMMU_WRITE (1 << 1)
20 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC (1 << 3)
22 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
23 /*
24 * Where the bus hardware includes a privilege level as part of its access type
25 * markings, and certain devices are capable of issuing transactions marked as
26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27 * given permission flags only apply to accesses at the higher privilege level,
28 * and that unprivileged transactions should have as little access as possible.
29 * This would usually imply the same permissions as kernel mappings on the CPU,
30 * if the IOMMU page table format is equivalent.
31 */
32 #define IOMMU_PRIV (1 << 5)
33
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct notifier_block;
41 struct iommu_sva;
42 struct iommu_fault_event;
43 struct iommu_dma_cookie;
44
45 /* iommu fault flags */
46 #define IOMMU_FAULT_READ 0x0
47 #define IOMMU_FAULT_WRITE 0x1
48
49 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
50 struct device *, unsigned long, int, void *);
51 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
52
53 struct iommu_domain_geometry {
54 dma_addr_t aperture_start; /* First address that can be mapped */
55 dma_addr_t aperture_end; /* Last address that can be mapped */
56 bool force_aperture; /* DMA only allowed in mappable range? */
57 };
58
59 /* Domain feature flags */
60 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
61 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
62 implementation */
63 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
64 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
65
66 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
67 #define __IOMMU_DOMAIN_PLATFORM (1U << 5)
68
69 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
70 /*
71 * This are the possible domain-types
72 *
73 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
74 * devices
75 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
76 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
77 * for VMs
78 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
79 * This flag allows IOMMU drivers to implement
80 * certain optimizations for these domains
81 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
82 * invalidation.
83 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
84 * represented by mm_struct's.
85 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
86 * dma_api stuff. Do not use in new drivers.
87 */
88 #define IOMMU_DOMAIN_BLOCKED (0U)
89 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
90 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
91 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
92 __IOMMU_DOMAIN_DMA_API)
93 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
94 __IOMMU_DOMAIN_DMA_API | \
95 __IOMMU_DOMAIN_DMA_FQ)
96 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
97 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
98
99 struct iommu_domain {
100 unsigned type;
101 const struct iommu_domain_ops *ops;
102 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
103 struct iommu_domain_geometry geometry;
104 struct iommu_dma_cookie *iova_cookie;
105 enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault,
106 void *data);
107 void *fault_data;
108 union {
109 struct {
110 iommu_fault_handler_t handler;
111 void *handler_token;
112 };
113 struct { /* IOMMU_DOMAIN_SVA */
114 struct mm_struct *mm;
115 int users;
116 };
117 };
118 };
119
120 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
121 {
122 return domain->type & __IOMMU_DOMAIN_DMA_API;
123 }
124
125 enum iommu_cap {
126 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
127 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
128 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
129 DMA protection and we should too */
130 /*
131 * Per-device flag indicating if enforce_cache_coherency() will work on
132 * this device.
133 */
134 IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
135 /*
136 * IOMMU driver does not issue TLB maintenance during .unmap, so can
137 * usefully support the non-strict DMA flush queue.
138 */
139 IOMMU_CAP_DEFERRED_FLUSH,
140 };
141
142 /* These are the possible reserved region types */
143 enum iommu_resv_type {
144 /* Memory regions which must be mapped 1:1 at all times */
145 IOMMU_RESV_DIRECT,
146 /*
147 * Memory regions which are advertised to be 1:1 but are
148 * commonly considered relaxable in some conditions,
149 * for instance in device assignment use case (USB, Graphics)
150 */
151 IOMMU_RESV_DIRECT_RELAXABLE,
152 /* Arbitrary "never map this or give it to a device" address ranges */
153 IOMMU_RESV_RESERVED,
154 /* Hardware MSI region (untranslated) */
155 IOMMU_RESV_MSI,
156 /* Software-managed MSI translation window */
157 IOMMU_RESV_SW_MSI,
158 };
159
160 /**
161 * struct iommu_resv_region - descriptor for a reserved memory region
162 * @list: Linked list pointers
163 * @start: System physical start address of the region
164 * @length: Length of the region in bytes
165 * @prot: IOMMU Protection flags (READ/WRITE/...)
166 * @type: Type of the reserved region
167 * @free: Callback to free associated memory allocations
168 */
169 struct iommu_resv_region {
170 struct list_head list;
171 phys_addr_t start;
172 size_t length;
173 int prot;
174 enum iommu_resv_type type;
175 void (*free)(struct device *dev, struct iommu_resv_region *region);
176 };
177
178 struct iommu_iort_rmr_data {
179 struct iommu_resv_region rr;
180
181 /* Stream IDs associated with IORT RMR entry */
182 const u32 *sids;
183 u32 num_sids;
184 };
185
186 /**
187 * enum iommu_dev_features - Per device IOMMU features
188 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
189 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
190 * enabling %IOMMU_DEV_FEAT_SVA requires
191 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
192 * Faults themselves instead of relying on the IOMMU. When
193 * supported, this feature must be enabled before and
194 * disabled after %IOMMU_DEV_FEAT_SVA.
195 *
196 * Device drivers enable a feature using iommu_dev_enable_feature().
197 */
198 enum iommu_dev_features {
199 IOMMU_DEV_FEAT_SVA,
200 IOMMU_DEV_FEAT_IOPF,
201 };
202
203 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
204 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
205 #define IOMMU_PASID_INVALID (-1U)
206 typedef unsigned int ioasid_t;
207
208 #ifdef CONFIG_IOMMU_API
209
210 /**
211 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
212 *
213 * @start: IOVA representing the start of the range to be flushed
214 * @end: IOVA representing the end of the range to be flushed (inclusive)
215 * @pgsize: The interval at which to perform the flush
216 * @freelist: Removed pages to free after sync
217 * @queued: Indicates that the flush will be queued
218 *
219 * This structure is intended to be updated by multiple calls to the
220 * ->unmap() function in struct iommu_ops before eventually being passed
221 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
222 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
223 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
224 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
225 */
226 struct iommu_iotlb_gather {
227 unsigned long start;
228 unsigned long end;
229 size_t pgsize;
230 struct list_head freelist;
231 bool queued;
232 };
233
234 /**
235 * struct iommu_ops - iommu ops and capabilities
236 * @capable: check capability
237 * @hw_info: report iommu hardware information. The data buffer returned by this
238 * op is allocated in the iommu driver and freed by the caller after
239 * use. The information type is one of enum iommu_hw_info_type defined
240 * in include/uapi/linux/iommufd.h.
241 * @domain_alloc: allocate iommu domain
242 * @domain_alloc_paging: Allocate an iommu_domain that can be used for
243 * UNMANAGED, DMA, and DMA_FQ domain types.
244 * @probe_device: Add device to iommu driver handling
245 * @release_device: Remove device from iommu driver handling
246 * @probe_finalize: Do final setup work after the device is added to an IOMMU
247 * group and attached to the groups domain
248 * @device_group: find iommu group for a particular device
249 * @get_resv_regions: Request list of reserved regions for a device
250 * @of_xlate: add OF master IDs to iommu grouping
251 * @is_attach_deferred: Check if domain attach should be deferred from iommu
252 * driver init to device driver init (default no)
253 * @dev_enable/disable_feat: per device entries to enable/disable
254 * iommu specific features.
255 * @page_response: handle page request response
256 * @def_domain_type: device default domain type, return value:
257 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
258 * - IOMMU_DOMAIN_DMA: must use a dma domain
259 * - 0: use the default setting
260 * @default_domain_ops: the default ops for domains
261 * @remove_dev_pasid: Remove any translation configurations of a specific
262 * pasid, so that any DMA transactions with this pasid
263 * will be blocked by the hardware.
264 * @pgsize_bitmap: bitmap of all possible supported page sizes
265 * @owner: Driver module providing these ops
266 * @identity_domain: An always available, always attachable identity
267 * translation.
268 * @blocked_domain: An always available, always attachable blocking
269 * translation.
270 * @default_domain: If not NULL this will always be set as the default domain.
271 * This should be an IDENTITY/BLOCKED/PLATFORM domain.
272 * Do not use in new drivers.
273 */
274 struct iommu_ops {
275 bool (*capable)(struct device *dev, enum iommu_cap);
276 void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
277
278 /* Domain allocation and freeing by the iommu driver */
279 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
280 struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
281
282 struct iommu_device *(*probe_device)(struct device *dev);
283 void (*release_device)(struct device *dev);
284 void (*probe_finalize)(struct device *dev);
285 struct iommu_group *(*device_group)(struct device *dev);
286
287 /* Request/Free a list of reserved regions for a device */
288 void (*get_resv_regions)(struct device *dev, struct list_head *list);
289
290 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
291 bool (*is_attach_deferred)(struct device *dev);
292
293 /* Per device IOMMU features */
294 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
295 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
296
297 int (*page_response)(struct device *dev,
298 struct iommu_fault_event *evt,
299 struct iommu_page_response *msg);
300
301 int (*def_domain_type)(struct device *dev);
302 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
303
304 const struct iommu_domain_ops *default_domain_ops;
305 unsigned long pgsize_bitmap;
306 struct module *owner;
307 struct iommu_domain *identity_domain;
308 struct iommu_domain *blocked_domain;
309 struct iommu_domain *default_domain;
310 };
311
312 /**
313 * struct iommu_domain_ops - domain specific operations
314 * @attach_dev: attach an iommu domain to a device
315 * Return:
316 * * 0 - success
317 * * EINVAL - can indicate that device and domain are incompatible due to
318 * some previous configuration of the domain, in which case the
319 * driver shouldn't log an error, since it is legitimate for a
320 * caller to test reuse of existing domains. Otherwise, it may
321 * still represent some other fundamental problem
322 * * ENOMEM - out of memory
323 * * ENOSPC - non-ENOMEM type of resource allocation failures
324 * * EBUSY - device is attached to a domain and cannot be changed
325 * * ENODEV - device specific errors, not able to be attached
326 * * <others> - treated as ENODEV by the caller. Use is discouraged
327 * @set_dev_pasid: set an iommu domain to a pasid of device
328 * @map_pages: map a physically contiguous set of pages of the same size to
329 * an iommu domain.
330 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
331 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
332 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
333 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
334 * queue
335 * @iova_to_phys: translate iova to physical address
336 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
337 * including no-snoop TLPs on PCIe or other platform
338 * specific mechanisms.
339 * @enable_nesting: Enable nesting
340 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
341 * @free: Release the domain after use.
342 */
343 struct iommu_domain_ops {
344 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
345 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
346 ioasid_t pasid);
347
348 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
349 phys_addr_t paddr, size_t pgsize, size_t pgcount,
350 int prot, gfp_t gfp, size_t *mapped);
351 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
352 size_t pgsize, size_t pgcount,
353 struct iommu_iotlb_gather *iotlb_gather);
354
355 void (*flush_iotlb_all)(struct iommu_domain *domain);
356 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
357 size_t size);
358 void (*iotlb_sync)(struct iommu_domain *domain,
359 struct iommu_iotlb_gather *iotlb_gather);
360
361 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
362 dma_addr_t iova);
363
364 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
365 int (*enable_nesting)(struct iommu_domain *domain);
366 int (*set_pgtable_quirks)(struct iommu_domain *domain,
367 unsigned long quirks);
368
369 void (*free)(struct iommu_domain *domain);
370 };
371
372 /**
373 * struct iommu_device - IOMMU core representation of one IOMMU hardware
374 * instance
375 * @list: Used by the iommu-core to keep a list of registered iommus
376 * @ops: iommu-ops for talking to this iommu
377 * @dev: struct device for sysfs handling
378 * @singleton_group: Used internally for drivers that have only one group
379 * @max_pasids: number of supported PASIDs
380 */
381 struct iommu_device {
382 struct list_head list;
383 const struct iommu_ops *ops;
384 struct fwnode_handle *fwnode;
385 struct device *dev;
386 struct iommu_group *singleton_group;
387 u32 max_pasids;
388 };
389
390 /**
391 * struct iommu_fault_event - Generic fault event
392 *
393 * Can represent recoverable faults such as a page requests or
394 * unrecoverable faults such as DMA or IRQ remapping faults.
395 *
396 * @fault: fault descriptor
397 * @list: pending fault event list, used for tracking responses
398 */
399 struct iommu_fault_event {
400 struct iommu_fault fault;
401 struct list_head list;
402 };
403
404 /**
405 * struct iommu_fault_param - per-device IOMMU fault data
406 * @handler: Callback function to handle IOMMU faults at device level
407 * @data: handler private data
408 * @faults: holds the pending faults which needs response
409 * @lock: protect pending faults list
410 */
411 struct iommu_fault_param {
412 iommu_dev_fault_handler_t handler;
413 void *data;
414 struct list_head faults;
415 struct mutex lock;
416 };
417
418 /**
419 * struct dev_iommu - Collection of per-device IOMMU data
420 *
421 * @fault_param: IOMMU detected device fault reporting data
422 * @iopf_param: I/O Page Fault queue and data
423 * @fwspec: IOMMU fwspec data
424 * @iommu_dev: IOMMU device this device is linked to
425 * @priv: IOMMU Driver private data
426 * @max_pasids: number of PASIDs this device can consume
427 * @attach_deferred: the dma domain attachment is deferred
428 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
429 * @require_direct: device requires IOMMU_RESV_DIRECT regions
430 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
431 *
432 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
433 * struct iommu_group *iommu_group;
434 */
435 struct dev_iommu {
436 struct mutex lock;
437 struct iommu_fault_param *fault_param;
438 struct iopf_device_param *iopf_param;
439 struct iommu_fwspec *fwspec;
440 struct iommu_device *iommu_dev;
441 void *priv;
442 u32 max_pasids;
443 u32 attach_deferred:1;
444 u32 pci_32bit_workaround:1;
445 u32 require_direct:1;
446 u32 shadow_on_flush:1;
447 };
448
449 int iommu_device_register(struct iommu_device *iommu,
450 const struct iommu_ops *ops,
451 struct device *hwdev);
452 void iommu_device_unregister(struct iommu_device *iommu);
453 int iommu_device_sysfs_add(struct iommu_device *iommu,
454 struct device *parent,
455 const struct attribute_group **groups,
456 const char *fmt, ...) __printf(4, 5);
457 void iommu_device_sysfs_remove(struct iommu_device *iommu);
458 int iommu_device_link(struct iommu_device *iommu, struct device *link);
459 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
460 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
461
462 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
463 {
464 return (struct iommu_device *)dev_get_drvdata(dev);
465 }
466
467 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
468 {
469 *gather = (struct iommu_iotlb_gather) {
470 .start = ULONG_MAX,
471 .freelist = LIST_HEAD_INIT(gather->freelist),
472 };
473 }
474
475 extern int bus_iommu_probe(const struct bus_type *bus);
476 extern bool iommu_present(const struct bus_type *bus);
477 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
478 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
479 extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
480 extern void iommu_domain_free(struct iommu_domain *domain);
481 extern int iommu_attach_device(struct iommu_domain *domain,
482 struct device *dev);
483 extern void iommu_detach_device(struct iommu_domain *domain,
484 struct device *dev);
485 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
486 struct device *dev, ioasid_t pasid);
487 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
488 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
489 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
490 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
491 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
492 size_t size);
493 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
494 unsigned long iova, size_t size,
495 struct iommu_iotlb_gather *iotlb_gather);
496 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
497 struct scatterlist *sg, unsigned int nents,
498 int prot, gfp_t gfp);
499 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
500 extern void iommu_set_fault_handler(struct iommu_domain *domain,
501 iommu_fault_handler_t handler, void *token);
502
503 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
504 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
505 extern void iommu_set_default_passthrough(bool cmd_line);
506 extern void iommu_set_default_translated(bool cmd_line);
507 extern bool iommu_default_passthrough(void);
508 extern struct iommu_resv_region *
509 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
510 enum iommu_resv_type type, gfp_t gfp);
511 extern int iommu_get_group_resv_regions(struct iommu_group *group,
512 struct list_head *head);
513
514 extern int iommu_attach_group(struct iommu_domain *domain,
515 struct iommu_group *group);
516 extern void iommu_detach_group(struct iommu_domain *domain,
517 struct iommu_group *group);
518 extern struct iommu_group *iommu_group_alloc(void);
519 extern void *iommu_group_get_iommudata(struct iommu_group *group);
520 extern void iommu_group_set_iommudata(struct iommu_group *group,
521 void *iommu_data,
522 void (*release)(void *iommu_data));
523 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
524 extern int iommu_group_add_device(struct iommu_group *group,
525 struct device *dev);
526 extern void iommu_group_remove_device(struct device *dev);
527 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
528 int (*fn)(struct device *, void *));
529 extern struct iommu_group *iommu_group_get(struct device *dev);
530 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
531 extern void iommu_group_put(struct iommu_group *group);
532 extern int iommu_register_device_fault_handler(struct device *dev,
533 iommu_dev_fault_handler_t handler,
534 void *data);
535
536 extern int iommu_unregister_device_fault_handler(struct device *dev);
537
538 extern int iommu_report_device_fault(struct device *dev,
539 struct iommu_fault_event *evt);
540 extern int iommu_page_response(struct device *dev,
541 struct iommu_page_response *msg);
542
543 extern int iommu_group_id(struct iommu_group *group);
544 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
545
546 int iommu_enable_nesting(struct iommu_domain *domain);
547 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
548 unsigned long quirks);
549
550 void iommu_set_dma_strict(void);
551
552 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
553 unsigned long iova, int flags);
554
555 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
556 {
557 if (domain->ops->flush_iotlb_all)
558 domain->ops->flush_iotlb_all(domain);
559 }
560
561 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
562 struct iommu_iotlb_gather *iotlb_gather)
563 {
564 if (domain->ops->iotlb_sync)
565 domain->ops->iotlb_sync(domain, iotlb_gather);
566
567 iommu_iotlb_gather_init(iotlb_gather);
568 }
569
570 /**
571 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
572 *
573 * @gather: TLB gather data
574 * @iova: start of page to invalidate
575 * @size: size of page to invalidate
576 *
577 * Helper for IOMMU drivers to check whether a new range and the gathered range
578 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
579 * than merging the two, which might lead to unnecessary invalidations.
580 */
581 static inline
582 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
583 unsigned long iova, size_t size)
584 {
585 unsigned long start = iova, end = start + size - 1;
586
587 return gather->end != 0 &&
588 (end + 1 < gather->start || start > gather->end + 1);
589 }
590
591
592 /**
593 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
594 * @gather: TLB gather data
595 * @iova: start of page to invalidate
596 * @size: size of page to invalidate
597 *
598 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
599 * where only the address range matters, and simply minimising intermediate
600 * syncs is preferred.
601 */
602 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
603 unsigned long iova, size_t size)
604 {
605 unsigned long end = iova + size - 1;
606
607 if (gather->start > iova)
608 gather->start = iova;
609 if (gather->end < end)
610 gather->end = end;
611 }
612
613 /**
614 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
615 * @domain: IOMMU domain to be invalidated
616 * @gather: TLB gather data
617 * @iova: start of page to invalidate
618 * @size: size of page to invalidate
619 *
620 * Helper for IOMMU drivers to build invalidation commands based on individual
621 * pages, or with page size/table level hints which cannot be gathered if they
622 * differ.
623 */
624 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
625 struct iommu_iotlb_gather *gather,
626 unsigned long iova, size_t size)
627 {
628 /*
629 * If the new page is disjoint from the current range or is mapped at
630 * a different granularity, then sync the TLB so that the gather
631 * structure can be rewritten.
632 */
633 if ((gather->pgsize && gather->pgsize != size) ||
634 iommu_iotlb_gather_is_disjoint(gather, iova, size))
635 iommu_iotlb_sync(domain, gather);
636
637 gather->pgsize = size;
638 iommu_iotlb_gather_add_range(gather, iova, size);
639 }
640
641 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
642 {
643 return gather && gather->queued;
644 }
645
646 /* PCI device grouping function */
647 extern struct iommu_group *pci_device_group(struct device *dev);
648 /* Generic device grouping function */
649 extern struct iommu_group *generic_device_group(struct device *dev);
650 /* FSL-MC device grouping function */
651 struct iommu_group *fsl_mc_device_group(struct device *dev);
652 extern struct iommu_group *generic_single_device_group(struct device *dev);
653
654 /**
655 * struct iommu_fwspec - per-device IOMMU instance data
656 * @ops: ops for this device's IOMMU
657 * @iommu_fwnode: firmware handle for this device's IOMMU
658 * @flags: IOMMU_FWSPEC_* flags
659 * @num_ids: number of associated device IDs
660 * @ids: IDs which this device may present to the IOMMU
661 *
662 * Note that the IDs (and any other information, really) stored in this structure should be
663 * considered private to the IOMMU device driver and are not to be used directly by IOMMU
664 * consumers.
665 */
666 struct iommu_fwspec {
667 const struct iommu_ops *ops;
668 struct fwnode_handle *iommu_fwnode;
669 u32 flags;
670 unsigned int num_ids;
671 u32 ids[];
672 };
673
674 /* ATS is supported */
675 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
676
677 /**
678 * struct iommu_sva - handle to a device-mm bond
679 */
680 struct iommu_sva {
681 struct device *dev;
682 struct iommu_domain *domain;
683 };
684
685 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
686 const struct iommu_ops *ops);
687 void iommu_fwspec_free(struct device *dev);
688 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
689 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
690
691 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
692 {
693 if (dev->iommu)
694 return dev->iommu->fwspec;
695 else
696 return NULL;
697 }
698
699 static inline void dev_iommu_fwspec_set(struct device *dev,
700 struct iommu_fwspec *fwspec)
701 {
702 dev->iommu->fwspec = fwspec;
703 }
704
705 static inline void *dev_iommu_priv_get(struct device *dev)
706 {
707 if (dev->iommu)
708 return dev->iommu->priv;
709 else
710 return NULL;
711 }
712
713 static inline void dev_iommu_priv_set(struct device *dev, void *priv)
714 {
715 dev->iommu->priv = priv;
716 }
717
718 int iommu_probe_device(struct device *dev);
719
720 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
721 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
722
723 int iommu_device_use_default_domain(struct device *dev);
724 void iommu_device_unuse_default_domain(struct device *dev);
725
726 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
727 void iommu_group_release_dma_owner(struct iommu_group *group);
728 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
729
730 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
731 void iommu_device_release_dma_owner(struct device *dev);
732
733 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
734 struct mm_struct *mm);
735 int iommu_attach_device_pasid(struct iommu_domain *domain,
736 struct device *dev, ioasid_t pasid);
737 void iommu_detach_device_pasid(struct iommu_domain *domain,
738 struct device *dev, ioasid_t pasid);
739 struct iommu_domain *
740 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
741 unsigned int type);
742 ioasid_t iommu_alloc_global_pasid(struct device *dev);
743 void iommu_free_global_pasid(ioasid_t pasid);
744 #else /* CONFIG_IOMMU_API */
745
746 struct iommu_ops {};
747 struct iommu_group {};
748 struct iommu_fwspec {};
749 struct iommu_device {};
750 struct iommu_fault_param {};
751 struct iommu_iotlb_gather {};
752
753 static inline bool iommu_present(const struct bus_type *bus)
754 {
755 return false;
756 }
757
758 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
759 {
760 return false;
761 }
762
763 static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
764 {
765 return NULL;
766 }
767
768 static inline void iommu_domain_free(struct iommu_domain *domain)
769 {
770 }
771
772 static inline int iommu_attach_device(struct iommu_domain *domain,
773 struct device *dev)
774 {
775 return -ENODEV;
776 }
777
778 static inline void iommu_detach_device(struct iommu_domain *domain,
779 struct device *dev)
780 {
781 }
782
783 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
784 {
785 return NULL;
786 }
787
788 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
789 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
790 {
791 return -ENODEV;
792 }
793
794 static inline size_t iommu_unmap(struct iommu_domain *domain,
795 unsigned long iova, size_t size)
796 {
797 return 0;
798 }
799
800 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
801 unsigned long iova, int gfp_order,
802 struct iommu_iotlb_gather *iotlb_gather)
803 {
804 return 0;
805 }
806
807 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
808 unsigned long iova, struct scatterlist *sg,
809 unsigned int nents, int prot, gfp_t gfp)
810 {
811 return -ENODEV;
812 }
813
814 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
815 {
816 }
817
818 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
819 struct iommu_iotlb_gather *iotlb_gather)
820 {
821 }
822
823 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
824 {
825 return 0;
826 }
827
828 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
829 iommu_fault_handler_t handler, void *token)
830 {
831 }
832
833 static inline void iommu_get_resv_regions(struct device *dev,
834 struct list_head *list)
835 {
836 }
837
838 static inline void iommu_put_resv_regions(struct device *dev,
839 struct list_head *list)
840 {
841 }
842
843 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
844 struct list_head *head)
845 {
846 return -ENODEV;
847 }
848
849 static inline void iommu_set_default_passthrough(bool cmd_line)
850 {
851 }
852
853 static inline void iommu_set_default_translated(bool cmd_line)
854 {
855 }
856
857 static inline bool iommu_default_passthrough(void)
858 {
859 return true;
860 }
861
862 static inline int iommu_attach_group(struct iommu_domain *domain,
863 struct iommu_group *group)
864 {
865 return -ENODEV;
866 }
867
868 static inline void iommu_detach_group(struct iommu_domain *domain,
869 struct iommu_group *group)
870 {
871 }
872
873 static inline struct iommu_group *iommu_group_alloc(void)
874 {
875 return ERR_PTR(-ENODEV);
876 }
877
878 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
879 {
880 return NULL;
881 }
882
883 static inline void iommu_group_set_iommudata(struct iommu_group *group,
884 void *iommu_data,
885 void (*release)(void *iommu_data))
886 {
887 }
888
889 static inline int iommu_group_set_name(struct iommu_group *group,
890 const char *name)
891 {
892 return -ENODEV;
893 }
894
895 static inline int iommu_group_add_device(struct iommu_group *group,
896 struct device *dev)
897 {
898 return -ENODEV;
899 }
900
901 static inline void iommu_group_remove_device(struct device *dev)
902 {
903 }
904
905 static inline int iommu_group_for_each_dev(struct iommu_group *group,
906 void *data,
907 int (*fn)(struct device *, void *))
908 {
909 return -ENODEV;
910 }
911
912 static inline struct iommu_group *iommu_group_get(struct device *dev)
913 {
914 return NULL;
915 }
916
917 static inline void iommu_group_put(struct iommu_group *group)
918 {
919 }
920
921 static inline
922 int iommu_register_device_fault_handler(struct device *dev,
923 iommu_dev_fault_handler_t handler,
924 void *data)
925 {
926 return -ENODEV;
927 }
928
929 static inline int iommu_unregister_device_fault_handler(struct device *dev)
930 {
931 return 0;
932 }
933
934 static inline
935 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
936 {
937 return -ENODEV;
938 }
939
940 static inline int iommu_page_response(struct device *dev,
941 struct iommu_page_response *msg)
942 {
943 return -ENODEV;
944 }
945
946 static inline int iommu_group_id(struct iommu_group *group)
947 {
948 return -ENODEV;
949 }
950
951 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
952 unsigned long quirks)
953 {
954 return 0;
955 }
956
957 static inline int iommu_device_register(struct iommu_device *iommu,
958 const struct iommu_ops *ops,
959 struct device *hwdev)
960 {
961 return -ENODEV;
962 }
963
964 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
965 {
966 return NULL;
967 }
968
969 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
970 {
971 }
972
973 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
974 struct iommu_iotlb_gather *gather,
975 unsigned long iova, size_t size)
976 {
977 }
978
979 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
980 {
981 return false;
982 }
983
984 static inline void iommu_device_unregister(struct iommu_device *iommu)
985 {
986 }
987
988 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
989 struct device *parent,
990 const struct attribute_group **groups,
991 const char *fmt, ...)
992 {
993 return -ENODEV;
994 }
995
996 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
997 {
998 }
999
1000 static inline int iommu_device_link(struct device *dev, struct device *link)
1001 {
1002 return -EINVAL;
1003 }
1004
1005 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1006 {
1007 }
1008
1009 static inline int iommu_fwspec_init(struct device *dev,
1010 struct fwnode_handle *iommu_fwnode,
1011 const struct iommu_ops *ops)
1012 {
1013 return -ENODEV;
1014 }
1015
1016 static inline void iommu_fwspec_free(struct device *dev)
1017 {
1018 }
1019
1020 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1021 int num_ids)
1022 {
1023 return -ENODEV;
1024 }
1025
1026 static inline
1027 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1028 {
1029 return NULL;
1030 }
1031
1032 static inline int
1033 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1034 {
1035 return -ENODEV;
1036 }
1037
1038 static inline int
1039 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1040 {
1041 return -ENODEV;
1042 }
1043
1044 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1045 {
1046 return NULL;
1047 }
1048
1049 static inline int iommu_device_use_default_domain(struct device *dev)
1050 {
1051 return 0;
1052 }
1053
1054 static inline void iommu_device_unuse_default_domain(struct device *dev)
1055 {
1056 }
1057
1058 static inline int
1059 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1060 {
1061 return -ENODEV;
1062 }
1063
1064 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1065 {
1066 }
1067
1068 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1069 {
1070 return false;
1071 }
1072
1073 static inline void iommu_device_release_dma_owner(struct device *dev)
1074 {
1075 }
1076
1077 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1078 {
1079 return -ENODEV;
1080 }
1081
1082 static inline struct iommu_domain *
1083 iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
1084 {
1085 return NULL;
1086 }
1087
1088 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1089 struct device *dev, ioasid_t pasid)
1090 {
1091 return -ENODEV;
1092 }
1093
1094 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1095 struct device *dev, ioasid_t pasid)
1096 {
1097 }
1098
1099 static inline struct iommu_domain *
1100 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
1101 unsigned int type)
1102 {
1103 return NULL;
1104 }
1105
1106 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1107 {
1108 return IOMMU_PASID_INVALID;
1109 }
1110
1111 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1112 #endif /* CONFIG_IOMMU_API */
1113
1114 /**
1115 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1116 * @domain: The IOMMU domain to perform the mapping
1117 * @iova: The start address to map the buffer
1118 * @sgt: The sg_table object describing the buffer
1119 * @prot: IOMMU protection bits
1120 *
1121 * Creates a mapping at @iova for the buffer described by a scatterlist
1122 * stored in the given sg_table object in the provided IOMMU domain.
1123 */
1124 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1125 unsigned long iova, struct sg_table *sgt, int prot)
1126 {
1127 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1128 GFP_KERNEL);
1129 }
1130
1131 #ifdef CONFIG_IOMMU_DEBUGFS
1132 extern struct dentry *iommu_debugfs_dir;
1133 void iommu_debugfs_setup(void);
1134 #else
1135 static inline void iommu_debugfs_setup(void) {}
1136 #endif
1137
1138 #ifdef CONFIG_IOMMU_DMA
1139 #include <linux/msi.h>
1140
1141 /* Setup call for arch DMA mapping code */
1142 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
1143
1144 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1145
1146 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1147 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
1148
1149 #else /* CONFIG_IOMMU_DMA */
1150
1151 struct msi_desc;
1152 struct msi_msg;
1153
1154 static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
1155 {
1156 }
1157
1158 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1159 {
1160 return -ENODEV;
1161 }
1162
1163 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1164 {
1165 return 0;
1166 }
1167
1168 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1169 {
1170 }
1171
1172 #endif /* CONFIG_IOMMU_DMA */
1173
1174 /*
1175 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1176 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1177 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1178 */
1179 #define TEGRA_STREAM_ID_BYPASS 0x7f
1180
1181 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1182 {
1183 #ifdef CONFIG_IOMMU_API
1184 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1185
1186 if (fwspec && fwspec->num_ids == 1) {
1187 *stream_id = fwspec->ids[0] & 0xffff;
1188 return true;
1189 }
1190 #endif
1191
1192 return false;
1193 }
1194
1195 #ifdef CONFIG_IOMMU_SVA
1196 static inline void mm_pasid_init(struct mm_struct *mm)
1197 {
1198 mm->pasid = IOMMU_PASID_INVALID;
1199 }
1200 static inline bool mm_valid_pasid(struct mm_struct *mm)
1201 {
1202 return mm->pasid != IOMMU_PASID_INVALID;
1203 }
1204 void mm_pasid_drop(struct mm_struct *mm);
1205 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1206 struct mm_struct *mm);
1207 void iommu_sva_unbind_device(struct iommu_sva *handle);
1208 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1209 #else
1210 static inline struct iommu_sva *
1211 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1212 {
1213 return NULL;
1214 }
1215
1216 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1217 {
1218 }
1219
1220 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1221 {
1222 return IOMMU_PASID_INVALID;
1223 }
1224 static inline void mm_pasid_init(struct mm_struct *mm) {}
1225 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1226 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1227 #endif /* CONFIG_IOMMU_SVA */
1228
1229 #endif /* __LINUX_IOMMU_H */