1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/gfp.h>
15 #include <linux/huge_mm.h>
16 #include <linux/iommu.h>
17 #include <linux/iova.h>
18 #include <linux/irq.h>
20 #include <linux/pci.h>
21 #include <linux/scatterlist.h>
22 #include <linux/vmalloc.h>
24 struct iommu_dma_msi_page
{
25 struct list_head list
;
30 enum iommu_dma_cookie_type
{
31 IOMMU_DMA_IOVA_COOKIE
,
35 struct iommu_dma_cookie
{
36 enum iommu_dma_cookie_type type
;
38 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
39 struct iova_domain iovad
;
40 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
43 struct list_head msi_page_list
;
46 /* Domain for flush queue callback; NULL if flush queue not in use */
47 struct iommu_domain
*fq_domain
;
50 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
52 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
53 return cookie
->iovad
.granule
;
57 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
59 struct iommu_dma_cookie
*cookie
;
61 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
63 spin_lock_init(&cookie
->msi_lock
);
64 INIT_LIST_HEAD(&cookie
->msi_page_list
);
70 int iommu_dma_init(void)
72 return iova_cache_get();
76 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
77 * @domain: IOMMU domain to prepare for DMA-API usage
79 * IOMMU drivers should normally call this from their domain_alloc
80 * callback when domain->type == IOMMU_DOMAIN_DMA.
82 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
84 if (domain
->iova_cookie
)
87 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
88 if (!domain
->iova_cookie
)
93 EXPORT_SYMBOL(iommu_get_dma_cookie
);
96 * iommu_get_msi_cookie - Acquire just MSI remapping resources
97 * @domain: IOMMU domain to prepare
98 * @base: Start address of IOVA region for MSI mappings
100 * Users who manage their own IOVA allocation and do not want DMA API support,
101 * but would still like to take advantage of automatic MSI remapping, can use
102 * this to initialise their own domain appropriately. Users should reserve a
103 * contiguous IOVA region, starting at @base, large enough to accommodate the
104 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
105 * used by the devices attached to @domain.
107 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
109 struct iommu_dma_cookie
*cookie
;
111 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
114 if (domain
->iova_cookie
)
117 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
121 cookie
->msi_iova
= base
;
122 domain
->iova_cookie
= cookie
;
125 EXPORT_SYMBOL(iommu_get_msi_cookie
);
128 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
129 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
130 * iommu_get_msi_cookie()
132 * IOMMU drivers should normally call this from their domain_free callback.
134 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
136 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
137 struct iommu_dma_msi_page
*msi
, *tmp
;
142 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
143 put_iova_domain(&cookie
->iovad
);
145 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
146 list_del(&msi
->list
);
150 domain
->iova_cookie
= NULL
;
152 EXPORT_SYMBOL(iommu_put_dma_cookie
);
155 * iommu_dma_get_resv_regions - Reserved region driver helper
156 * @dev: Device from iommu_get_resv_regions()
157 * @list: Reserved region list from iommu_get_resv_regions()
159 * IOMMU drivers can use this to implement their .get_resv_regions callback
160 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
161 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
164 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
167 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
168 iort_iommu_msi_get_resv_regions(dev
, list
);
171 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
173 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
174 phys_addr_t start
, phys_addr_t end
)
176 struct iova_domain
*iovad
= &cookie
->iovad
;
177 struct iommu_dma_msi_page
*msi_page
;
180 start
-= iova_offset(iovad
, start
);
181 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
183 msi_page
= kcalloc(num_pages
, sizeof(*msi_page
), GFP_KERNEL
);
187 for (i
= 0; i
< num_pages
; i
++) {
188 msi_page
[i
].phys
= start
;
189 msi_page
[i
].iova
= start
;
190 INIT_LIST_HEAD(&msi_page
[i
].list
);
191 list_add(&msi_page
[i
].list
, &cookie
->msi_page_list
);
192 start
+= iovad
->granule
;
198 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
199 struct iova_domain
*iovad
)
201 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
202 struct resource_entry
*window
;
203 unsigned long lo
, hi
;
204 phys_addr_t start
= 0, end
;
206 resource_list_for_each_entry(window
, &bridge
->windows
) {
207 if (resource_type(window
->res
) != IORESOURCE_MEM
)
210 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
211 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
212 reserve_iova(iovad
, lo
, hi
);
215 /* Get reserved DMA windows from host bridge */
216 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
217 end
= window
->res
->start
- window
->offset
;
220 lo
= iova_pfn(iovad
, start
);
221 hi
= iova_pfn(iovad
, end
);
222 reserve_iova(iovad
, lo
, hi
);
224 /* dma_ranges list should be sorted */
225 dev_err(&dev
->dev
, "Failed to reserve IOVA\n");
229 start
= window
->res
->end
- window
->offset
+ 1;
230 /* If window is last entry */
231 if (window
->node
.next
== &bridge
->dma_ranges
&&
232 end
!= ~(dma_addr_t
)0) {
233 end
= ~(dma_addr_t
)0;
241 static int iova_reserve_iommu_regions(struct device
*dev
,
242 struct iommu_domain
*domain
)
244 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
245 struct iova_domain
*iovad
= &cookie
->iovad
;
246 struct iommu_resv_region
*region
;
247 LIST_HEAD(resv_regions
);
250 if (dev_is_pci(dev
)) {
251 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
256 iommu_get_resv_regions(dev
, &resv_regions
);
257 list_for_each_entry(region
, &resv_regions
, list
) {
258 unsigned long lo
, hi
;
260 /* We ARE the software that manages these! */
261 if (region
->type
== IOMMU_RESV_SW_MSI
)
264 lo
= iova_pfn(iovad
, region
->start
);
265 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
266 reserve_iova(iovad
, lo
, hi
);
268 if (region
->type
== IOMMU_RESV_MSI
)
269 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
270 region
->start
+ region
->length
);
274 iommu_put_resv_regions(dev
, &resv_regions
);
279 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
281 struct iommu_dma_cookie
*cookie
;
282 struct iommu_domain
*domain
;
284 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
285 domain
= cookie
->fq_domain
;
287 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
288 * implies that ops->flush_iotlb_all must be non-NULL.
290 domain
->ops
->flush_iotlb_all(domain
);
294 * iommu_dma_init_domain - Initialise a DMA mapping domain
295 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
296 * @base: IOVA at which the mappable address space starts
297 * @size: Size of IOVA space
298 * @dev: Device the domain is being initialised for
300 * @base and @size should be exact multiples of IOMMU page granularity to
301 * avoid rounding surprises. If necessary, we reserve the page at address 0
302 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
303 * any change which could make prior IOVAs invalid will fail.
305 int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
306 u64 size
, struct device
*dev
)
308 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
309 struct iova_domain
*iovad
= &cookie
->iovad
;
310 unsigned long order
, base_pfn
;
313 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
316 /* Use the smallest supported page size for IOVA granularity */
317 order
= __ffs(domain
->pgsize_bitmap
);
318 base_pfn
= max_t(unsigned long, 1, base
>> order
);
320 /* Check the domain allows at least some access to the device... */
321 if (domain
->geometry
.force_aperture
) {
322 if (base
> domain
->geometry
.aperture_end
||
323 base
+ size
<= domain
->geometry
.aperture_start
) {
324 pr_warn("specified DMA range outside IOMMU capability\n");
327 /* ...then finally give it a kicking to make sure it fits */
328 base_pfn
= max_t(unsigned long, base_pfn
,
329 domain
->geometry
.aperture_start
>> order
);
332 /* start_pfn is always nonzero for an already-initialised domain */
333 if (iovad
->start_pfn
) {
334 if (1UL << order
!= iovad
->granule
||
335 base_pfn
!= iovad
->start_pfn
) {
336 pr_warn("Incompatible range for DMA domain\n");
343 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
345 if (!cookie
->fq_domain
&& !iommu_domain_get_attr(domain
,
346 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
, &attr
) && attr
) {
347 cookie
->fq_domain
= domain
;
348 init_iova_flush_queue(iovad
, iommu_dma_flush_iotlb_all
, NULL
);
354 return iova_reserve_iommu_regions(dev
, domain
);
356 EXPORT_SYMBOL(iommu_dma_init_domain
);
359 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
361 * @dir: Direction of DMA transfer
362 * @coherent: Is the DMA master cache-coherent?
363 * @attrs: DMA attributes for the mapping
365 * Return: corresponding IOMMU API page protection flags
367 int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
370 int prot
= coherent
? IOMMU_CACHE
: 0;
372 if (attrs
& DMA_ATTR_PRIVILEGED
)
376 case DMA_BIDIRECTIONAL
:
377 return prot
| IOMMU_READ
| IOMMU_WRITE
;
379 return prot
| IOMMU_READ
;
380 case DMA_FROM_DEVICE
:
381 return prot
| IOMMU_WRITE
;
387 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
388 size_t size
, dma_addr_t dma_limit
, struct device
*dev
)
390 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
391 struct iova_domain
*iovad
= &cookie
->iovad
;
392 unsigned long shift
, iova_len
, iova
= 0;
394 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
395 cookie
->msi_iova
+= size
;
396 return cookie
->msi_iova
- size
;
399 shift
= iova_shift(iovad
);
400 iova_len
= size
>> shift
;
402 * Freeing non-power-of-two-sized allocations back into the IOVA caches
403 * will come back to bite us badly, so we have to waste a bit of space
404 * rounding up anything cacheable to make sure that can't happen. The
405 * order of the unadjusted size will still match upon freeing.
407 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
408 iova_len
= roundup_pow_of_two(iova_len
);
410 if (dev
->bus_dma_mask
)
411 dma_limit
&= dev
->bus_dma_mask
;
413 if (domain
->geometry
.force_aperture
)
414 dma_limit
= min(dma_limit
, domain
->geometry
.aperture_end
);
416 /* Try to get PCI devices a SAC address */
417 if (dma_limit
> DMA_BIT_MASK(32) && dev_is_pci(dev
))
418 iova
= alloc_iova_fast(iovad
, iova_len
,
419 DMA_BIT_MASK(32) >> shift
, false);
422 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
425 return (dma_addr_t
)iova
<< shift
;
428 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
429 dma_addr_t iova
, size_t size
)
431 struct iova_domain
*iovad
= &cookie
->iovad
;
433 /* The MSI case is only ever cleaning up its most recent allocation */
434 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
435 cookie
->msi_iova
-= size
;
436 else if (cookie
->fq_domain
) /* non-strict mode */
437 queue_iova(iovad
, iova_pfn(iovad
, iova
),
438 size
>> iova_shift(iovad
), 0);
440 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
441 size
>> iova_shift(iovad
));
444 static void __iommu_dma_unmap(struct iommu_domain
*domain
, dma_addr_t dma_addr
,
447 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
448 struct iova_domain
*iovad
= &cookie
->iovad
;
449 size_t iova_off
= iova_offset(iovad
, dma_addr
);
451 dma_addr
-= iova_off
;
452 size
= iova_align(iovad
, size
+ iova_off
);
454 WARN_ON(iommu_unmap_fast(domain
, dma_addr
, size
) != size
);
455 if (!cookie
->fq_domain
)
456 iommu_tlb_sync(domain
);
457 iommu_dma_free_iova(cookie
, dma_addr
, size
);
460 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
463 __free_page(pages
[count
]);
467 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
468 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
471 unsigned int i
= 0, nid
= dev_to_node(dev
);
473 order_mask
&= (2U << MAX_ORDER
) - 1;
477 pages
= kvzalloc(count
* sizeof(*pages
), GFP_KERNEL
);
481 /* IOMMU can map any pages, so himem can also be used here */
482 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
485 struct page
*page
= NULL
;
486 unsigned int order_size
;
489 * Higher-order allocations are a convenience rather
490 * than a necessity, hence using __GFP_NORETRY until
491 * falling back to minimum-order allocations.
493 for (order_mask
&= (2U << __fls(count
)) - 1;
494 order_mask
; order_mask
&= ~order_size
) {
495 unsigned int order
= __fls(order_mask
);
496 gfp_t alloc_flags
= gfp
;
498 order_size
= 1U << order
;
499 if (order_mask
> order_size
)
500 alloc_flags
|= __GFP_NORETRY
;
501 page
= alloc_pages_node(nid
, alloc_flags
, order
);
506 if (!PageCompound(page
)) {
507 split_page(page
, order
);
509 } else if (!split_huge_page(page
)) {
512 __free_pages(page
, order
);
515 __iommu_dma_free_pages(pages
, i
);
526 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
527 * @dev: Device which owns this buffer
528 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
529 * @size: Size of buffer in bytes
530 * @handle: DMA address of buffer
532 * Frees both the pages associated with the buffer, and the array
535 void iommu_dma_free(struct device
*dev
, struct page
**pages
, size_t size
,
538 __iommu_dma_unmap(iommu_get_dma_domain(dev
), *handle
, size
);
539 __iommu_dma_free_pages(pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
540 *handle
= DMA_MAPPING_ERROR
;
544 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
545 * @dev: Device to allocate memory for. Must be a real device
546 * attached to an iommu_dma_domain
547 * @size: Size of buffer in bytes
548 * @gfp: Allocation flags
549 * @attrs: DMA attributes for this allocation
550 * @prot: IOMMU mapping flags
551 * @handle: Out argument for allocated DMA handle
552 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
553 * given VA/PA are visible to the given non-coherent device.
555 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
556 * but an IOMMU which supports smaller pages might not map the whole thing.
558 * Return: Array of struct page pointers describing the buffer,
559 * or NULL on failure.
561 struct page
**iommu_dma_alloc(struct device
*dev
, size_t size
, gfp_t gfp
,
562 unsigned long attrs
, int prot
, dma_addr_t
*handle
,
563 void (*flush_page
)(struct device
*, const void *, phys_addr_t
))
565 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
566 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
567 struct iova_domain
*iovad
= &cookie
->iovad
;
571 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
573 *handle
= DMA_MAPPING_ERROR
;
575 min_size
= alloc_sizes
& -alloc_sizes
;
576 if (min_size
< PAGE_SIZE
) {
577 min_size
= PAGE_SIZE
;
578 alloc_sizes
|= PAGE_SIZE
;
580 size
= ALIGN(size
, min_size
);
582 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
583 alloc_sizes
= min_size
;
585 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
586 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
591 size
= iova_align(iovad
, size
);
592 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
596 if (sg_alloc_table_from_pages(&sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
599 if (!(prot
& IOMMU_CACHE
)) {
600 struct sg_mapping_iter miter
;
602 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
603 * sufficient here, so skip it by using the "wrong" direction.
605 sg_miter_start(&miter
, sgt
.sgl
, sgt
.orig_nents
, SG_MITER_FROM_SG
);
606 while (sg_miter_next(&miter
))
607 flush_page(dev
, miter
.addr
, page_to_phys(miter
.page
));
608 sg_miter_stop(&miter
);
611 if (iommu_map_sg(domain
, iova
, sgt
.sgl
, sgt
.orig_nents
, prot
)
622 iommu_dma_free_iova(cookie
, iova
, size
);
624 __iommu_dma_free_pages(pages
, count
);
629 * iommu_dma_mmap - Map a buffer into provided user VMA
630 * @pages: Array representing buffer from iommu_dma_alloc()
631 * @size: Size of buffer in bytes
632 * @vma: VMA describing requested userspace mapping
634 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
635 * for verifying the correct size and protection of @vma beforehand.
638 int iommu_dma_mmap(struct page
**pages
, size_t size
, struct vm_area_struct
*vma
)
640 return vm_map_pages(vma
, pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
643 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
644 size_t size
, int prot
, struct iommu_domain
*domain
)
646 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
650 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
) {
651 iova_off
= iova_offset(&cookie
->iovad
, phys
);
652 size
= iova_align(&cookie
->iovad
, size
+ iova_off
);
655 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
657 return DMA_MAPPING_ERROR
;
659 if (iommu_map(domain
, iova
, phys
- iova_off
, size
, prot
)) {
660 iommu_dma_free_iova(cookie
, iova
, size
);
661 return DMA_MAPPING_ERROR
;
663 return iova
+ iova_off
;
666 dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
667 unsigned long offset
, size_t size
, int prot
)
669 return __iommu_dma_map(dev
, page_to_phys(page
) + offset
, size
, prot
,
670 iommu_get_dma_domain(dev
));
673 void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t handle
, size_t size
,
674 enum dma_data_direction dir
, unsigned long attrs
)
676 __iommu_dma_unmap(iommu_get_dma_domain(dev
), handle
, size
);
680 * Prepare a successfully-mapped scatterlist to give back to the caller.
682 * At this point the segments are already laid out by iommu_dma_map_sg() to
683 * avoid individually crossing any boundaries, so we merely need to check a
684 * segment's start address to avoid concatenating across one.
686 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
689 struct scatterlist
*s
, *cur
= sg
;
690 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
691 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
694 for_each_sg(sg
, s
, nents
, i
) {
695 /* Restore this segment's original unaligned fields first */
696 unsigned int s_iova_off
= sg_dma_address(s
);
697 unsigned int s_length
= sg_dma_len(s
);
698 unsigned int s_iova_len
= s
->length
;
700 s
->offset
+= s_iova_off
;
701 s
->length
= s_length
;
702 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
706 * Now fill in the real DMA data. If...
707 * - there is a valid output segment to append to
708 * - and this segment starts on an IOVA page boundary
709 * - but doesn't fall at a segment boundary
710 * - and wouldn't make the resulting output segment too long
712 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
713 (cur_len
+ s_length
<= max_len
)) {
714 /* ...then concatenate it with the previous one */
717 /* Otherwise start the next output segment */
723 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
726 sg_dma_len(cur
) = cur_len
;
727 dma_addr
+= s_iova_len
;
729 if (s_length
+ s_iova_off
< s_iova_len
)
736 * If mapping failed, then just restore the original list,
737 * but making sure the DMA fields are invalidated.
739 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
741 struct scatterlist
*s
;
744 for_each_sg(sg
, s
, nents
, i
) {
745 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
746 s
->offset
+= sg_dma_address(s
);
748 s
->length
= sg_dma_len(s
);
749 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
755 * The DMA API client is passing in a scatterlist which could describe
756 * any old buffer layout, but the IOMMU API requires everything to be
757 * aligned to IOMMU pages. Hence the need for this complicated bit of
758 * impedance-matching, to be able to hand off a suitably-aligned list,
759 * but still preserve the original offsets and sizes for the caller.
761 int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
764 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
765 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
766 struct iova_domain
*iovad
= &cookie
->iovad
;
767 struct scatterlist
*s
, *prev
= NULL
;
770 unsigned long mask
= dma_get_seg_boundary(dev
);
774 * Work out how much IOVA space we need, and align the segments to
775 * IOVA granules for the IOMMU driver to handle. With some clever
776 * trickery we can modify the list in-place, but reversibly, by
777 * stashing the unaligned parts in the as-yet-unused DMA fields.
779 for_each_sg(sg
, s
, nents
, i
) {
780 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
781 size_t s_length
= s
->length
;
782 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
784 sg_dma_address(s
) = s_iova_off
;
785 sg_dma_len(s
) = s_length
;
786 s
->offset
-= s_iova_off
;
787 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
788 s
->length
= s_length
;
791 * Due to the alignment of our single IOVA allocation, we can
792 * depend on these assumptions about the segment boundary mask:
793 * - If mask size >= IOVA size, then the IOVA range cannot
794 * possibly fall across a boundary, so we don't care.
795 * - If mask size < IOVA size, then the IOVA range must start
796 * exactly on a boundary, therefore we can lay things out
797 * based purely on segment lengths without needing to know
798 * the actual addresses beforehand.
799 * - The mask must be a power of 2, so pad_len == 0 if
800 * iova_len == 0, thus we cannot dereference prev the first
801 * time through here (i.e. before it has a meaningful value).
803 if (pad_len
&& pad_len
< s_length
- 1) {
804 prev
->length
+= pad_len
;
808 iova_len
+= s_length
;
812 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
817 * We'll leave any physical concatenation to the IOMMU driver's
818 * implementation - it knows better than we do.
820 if (iommu_map_sg(domain
, iova
, sg
, nents
, prot
) < iova_len
)
823 return __finalise_sg(dev
, sg
, nents
, iova
);
826 iommu_dma_free_iova(cookie
, iova
, iova_len
);
828 __invalidate_sg(sg
, nents
);
832 void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
833 enum dma_data_direction dir
, unsigned long attrs
)
835 dma_addr_t start
, end
;
836 struct scatterlist
*tmp
;
839 * The scatterlist segments are mapped into a single
840 * contiguous IOVA allocation, so this is incredibly easy.
842 start
= sg_dma_address(sg
);
843 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
844 if (sg_dma_len(tmp
) == 0)
848 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
849 __iommu_dma_unmap(iommu_get_dma_domain(dev
), start
, end
- start
);
852 dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
853 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
855 return __iommu_dma_map(dev
, phys
, size
,
856 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
,
857 iommu_get_dma_domain(dev
));
860 void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
861 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
863 __iommu_dma_unmap(iommu_get_dma_domain(dev
), handle
, size
);
866 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
867 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
869 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
870 struct iommu_dma_msi_page
*msi_page
;
872 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
873 size_t size
= cookie_msi_granule(cookie
);
875 msi_addr
&= ~(phys_addr_t
)(size
- 1);
876 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
877 if (msi_page
->phys
== msi_addr
)
880 msi_page
= kzalloc(sizeof(*msi_page
), GFP_ATOMIC
);
884 iova
= __iommu_dma_map(dev
, msi_addr
, size
, prot
, domain
);
885 if (iova
== DMA_MAPPING_ERROR
)
888 INIT_LIST_HEAD(&msi_page
->list
);
889 msi_page
->phys
= msi_addr
;
890 msi_page
->iova
= iova
;
891 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
899 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
901 struct device
*dev
= msi_desc_to_dev(desc
);
902 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
903 struct iommu_dma_cookie
*cookie
;
904 struct iommu_dma_msi_page
*msi_page
;
907 if (!domain
|| !domain
->iova_cookie
) {
908 desc
->iommu_cookie
= NULL
;
912 cookie
= domain
->iova_cookie
;
915 * We disable IRQs to rule out a possible inversion against
916 * irq_desc_lock if, say, someone tries to retarget the affinity
917 * of an MSI from within an IPI handler.
919 spin_lock_irqsave(&cookie
->msi_lock
, flags
);
920 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
921 spin_unlock_irqrestore(&cookie
->msi_lock
, flags
);
923 msi_desc_set_iommu_cookie(desc
, msi_page
);
930 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
,
933 struct device
*dev
= msi_desc_to_dev(desc
);
934 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
935 const struct iommu_dma_msi_page
*msi_page
;
937 msi_page
= msi_desc_get_iommu_cookie(desc
);
939 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
942 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
943 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
944 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);