2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
53 #include "irq_remapping.h"
55 #define ROOT_SIZE VTD_PAGE_SIZE
56 #define CONTEXT_SIZE VTD_PAGE_SIZE
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
63 #define IOAPIC_RANGE_START (0xfee00000)
64 #define IOAPIC_RANGE_END (0xfeefffff)
65 #define IOVA_START_ADDR (0x1000)
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
72 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN (1)
84 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
88 /* page table handling */
89 #define LEVEL_STRIDE (9)
90 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
108 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110 static inline int agaw_to_level(int agaw
)
115 static inline int agaw_to_width(int agaw
)
117 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
120 static inline int width_to_agaw(int width
)
122 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
125 static inline unsigned int level_to_offset_bits(int level
)
127 return (level
- 1) * LEVEL_STRIDE
;
130 static inline int pfn_level_offset(unsigned long pfn
, int level
)
132 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
135 static inline unsigned long level_mask(int level
)
137 return -1UL << level_to_offset_bits(level
);
140 static inline unsigned long level_size(int level
)
142 return 1UL << level_to_offset_bits(level
);
145 static inline unsigned long align_to_level(unsigned long pfn
, int level
)
147 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
152 return 1 << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn
)
159 return dma_pfn
>> (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
164 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
166 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
168 return mm_to_dma_pfn(page_to_pfn(pg
));
170 static inline unsigned long virt_to_dma_pfn(void *p
)
172 return page_to_dma_pfn(virt_to_page(p
));
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu
**g_iommus
;
178 static void __init
check_tylersburg_isoch(void);
179 static int rwbf_quirk
;
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
185 static int force_on
= 0;
190 * 12-63: Context Ptr (12 - (haw-1))
197 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
200 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
203 static phys_addr_t
root_entry_lctp(struct root_entry
*re
)
208 return re
->lo
& VTD_PAGE_MASK
;
212 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
215 static phys_addr_t
root_entry_uctp(struct root_entry
*re
)
220 return re
->hi
& VTD_PAGE_MASK
;
225 * 1: fault processing disable
226 * 2-3: translation type
227 * 12-63: address space root
233 struct context_entry
{
238 static inline void context_clear_pasid_enable(struct context_entry
*context
)
240 context
->lo
&= ~(1ULL << 11);
243 static inline bool context_pasid_enabled(struct context_entry
*context
)
245 return !!(context
->lo
& (1ULL << 11));
248 static inline void context_set_copied(struct context_entry
*context
)
250 context
->hi
|= (1ull << 3);
253 static inline bool context_copied(struct context_entry
*context
)
255 return !!(context
->hi
& (1ULL << 3));
258 static inline bool __context_present(struct context_entry
*context
)
260 return (context
->lo
& 1);
263 static inline bool context_present(struct context_entry
*context
)
265 return context_pasid_enabled(context
) ?
266 __context_present(context
) :
267 __context_present(context
) && !context_copied(context
);
270 static inline void context_set_present(struct context_entry
*context
)
275 static inline void context_set_fault_enable(struct context_entry
*context
)
277 context
->lo
&= (((u64
)-1) << 2) | 1;
280 static inline void context_set_translation_type(struct context_entry
*context
,
283 context
->lo
&= (((u64
)-1) << 4) | 3;
284 context
->lo
|= (value
& 3) << 2;
287 static inline void context_set_address_root(struct context_entry
*context
,
290 context
->lo
&= ~VTD_PAGE_MASK
;
291 context
->lo
|= value
& VTD_PAGE_MASK
;
294 static inline void context_set_address_width(struct context_entry
*context
,
297 context
->hi
|= value
& 7;
300 static inline void context_set_domain_id(struct context_entry
*context
,
303 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
306 static inline int context_domain_id(struct context_entry
*c
)
308 return((c
->hi
>> 8) & 0xffff);
311 static inline void context_clear_entry(struct context_entry
*context
)
324 * 12-63: Host physcial address
330 static inline void dma_clear_pte(struct dma_pte
*pte
)
335 static inline u64
dma_pte_addr(struct dma_pte
*pte
)
338 return pte
->val
& VTD_PAGE_MASK
;
340 /* Must have a full atomic 64-bit read */
341 return __cmpxchg64(&pte
->val
, 0ULL, 0ULL) & VTD_PAGE_MASK
;
345 static inline bool dma_pte_present(struct dma_pte
*pte
)
347 return (pte
->val
& 3) != 0;
350 static inline bool dma_pte_superpage(struct dma_pte
*pte
)
352 return (pte
->val
& DMA_PTE_LARGE_PAGE
);
355 static inline int first_pte_in_page(struct dma_pte
*pte
)
357 return !((unsigned long)pte
& ~VTD_PAGE_MASK
);
361 * This domain is a statically identity mapping domain.
362 * 1. This domain creats a static 1:1 mapping to all usable memory.
363 * 2. It maps to each iommu if successful.
364 * 3. Each iommu mapps to this domain if successful.
366 static struct dmar_domain
*si_domain
;
367 static int hw_pass_through
= 1;
370 * Domain represents a virtual machine, more than one devices
371 * across iommus may be owned in one domain, e.g. kvm guest.
373 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
375 /* si_domain contains mulitple devices */
376 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
378 #define for_each_domain_iommu(idx, domain) \
379 for (idx = 0; idx < g_num_of_iommus; idx++) \
380 if (domain->iommu_refcnt[idx])
383 int nid
; /* node id */
385 unsigned iommu_refcnt
[DMAR_UNITS_SUPPORTED
];
386 /* Refcount of devices per iommu */
389 u16 iommu_did
[DMAR_UNITS_SUPPORTED
];
390 /* Domain ids per IOMMU. Use u16 since
391 * domain ids are 16 bit wide according
392 * to VT-d spec, section 9.3 */
394 bool has_iotlb_device
;
395 struct list_head devices
; /* all devices' list */
396 struct iova_domain iovad
; /* iova's that belong to this domain */
398 struct dma_pte
*pgd
; /* virtual address */
399 int gaw
; /* max guest address width */
401 /* adjusted guest address width, 0 is level 2 30-bit */
404 int flags
; /* flags to find out type of domain */
406 int iommu_coherency
;/* indicate coherency of iommu access */
407 int iommu_snooping
; /* indicate snooping control feature*/
408 int iommu_count
; /* reference count of iommu */
409 int iommu_superpage
;/* Level of superpages supported:
410 0 == 4KiB (no superpages), 1 == 2MiB,
411 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
412 u64 max_addr
; /* maximum mapped address */
414 struct iommu_domain domain
; /* generic domain data structure for
418 /* PCI domain-device relationship */
419 struct device_domain_info
{
420 struct list_head link
; /* link to domain siblings */
421 struct list_head global
; /* link to global list */
422 u8 bus
; /* PCI bus number */
423 u8 devfn
; /* PCI devfn number */
424 u16 pfsid
; /* SRIOV physical function source ID */
425 u8 pasid_supported
:3;
432 struct device
*dev
; /* it's NULL for PCIe-to-PCI bridge */
433 struct intel_iommu
*iommu
; /* IOMMU used by this device */
434 struct dmar_domain
*domain
; /* pointer to domain */
437 struct dmar_rmrr_unit
{
438 struct list_head list
; /* list of rmrr units */
439 struct acpi_dmar_header
*hdr
; /* ACPI header */
440 u64 base_address
; /* reserved base address*/
441 u64 end_address
; /* reserved end address */
442 struct dmar_dev_scope
*devices
; /* target devices */
443 int devices_cnt
; /* target device count */
446 struct dmar_atsr_unit
{
447 struct list_head list
; /* list of ATSR units */
448 struct acpi_dmar_header
*hdr
; /* ACPI header */
449 struct dmar_dev_scope
*devices
; /* target devices */
450 int devices_cnt
; /* target device count */
451 u8 include_all
:1; /* include all ports */
454 static LIST_HEAD(dmar_atsr_units
);
455 static LIST_HEAD(dmar_rmrr_units
);
457 #define for_each_rmrr_units(rmrr) \
458 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
460 static void flush_unmaps_timeout(unsigned long data
);
462 struct deferred_flush_entry
{
463 unsigned long iova_pfn
;
464 unsigned long nrpages
;
465 struct dmar_domain
*domain
;
466 struct page
*freelist
;
469 #define HIGH_WATER_MARK 250
470 struct deferred_flush_table
{
472 struct deferred_flush_entry entries
[HIGH_WATER_MARK
];
475 struct deferred_flush_data
{
478 struct timer_list timer
;
480 struct deferred_flush_table
*tables
;
483 DEFINE_PER_CPU(struct deferred_flush_data
, deferred_flush
);
485 /* bitmap for indexing intel_iommus */
486 static int g_num_of_iommus
;
488 static void domain_exit(struct dmar_domain
*domain
);
489 static void domain_remove_dev_info(struct dmar_domain
*domain
);
490 static void dmar_remove_one_dev_info(struct dmar_domain
*domain
,
492 static void __dmar_remove_one_dev_info(struct device_domain_info
*info
);
493 static void domain_context_clear(struct intel_iommu
*iommu
,
495 static int domain_detach_iommu(struct dmar_domain
*domain
,
496 struct intel_iommu
*iommu
);
498 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
499 int dmar_disabled
= 0;
501 int dmar_disabled
= 1;
502 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
504 int intel_iommu_enabled
= 0;
505 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
507 static int dmar_map_gfx
= 1;
508 static int dmar_forcedac
;
509 static int intel_iommu_strict
;
510 static int intel_iommu_superpage
= 1;
511 static int intel_iommu_ecs
= 1;
512 static int intel_iommu_pasid28
;
513 static int iommu_identity_mapping
;
515 #define IDENTMAP_ALL 1
516 #define IDENTMAP_GFX 2
517 #define IDENTMAP_AZALIA 4
519 /* Broadwell and Skylake have broken ECS support — normal so-called "second
520 * level" translation of DMA requests-without-PASID doesn't actually happen
521 * unless you also set the NESTE bit in an extended context-entry. Which of
522 * course means that SVM doesn't work because it's trying to do nested
523 * translation of the physical addresses it finds in the process page tables,
524 * through the IOVA->phys mapping found in the "second level" page tables.
526 * The VT-d specification was retroactively changed to change the definition
527 * of the capability bits and pretend that Broadwell/Skylake never happened...
528 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
529 * for some reason it was the PASID capability bit which was redefined (from
530 * bit 28 on BDW/SKL to bit 40 in future).
532 * So our test for ECS needs to eschew those implementations which set the old
533 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
534 * Unless we are working around the 'pasid28' limitations, that is, by putting
535 * the device into passthrough mode for normal DMA and thus masking the bug.
537 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
538 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
539 /* PASID support is thus enabled if ECS is enabled and *either* of the old
540 * or new capability bits are set. */
541 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
542 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
544 int intel_iommu_gfx_mapped
;
545 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped
);
547 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
548 static DEFINE_SPINLOCK(device_domain_lock
);
549 static LIST_HEAD(device_domain_list
);
551 static const struct iommu_ops intel_iommu_ops
;
553 static bool translation_pre_enabled(struct intel_iommu
*iommu
)
555 return (iommu
->flags
& VTD_FLAG_TRANS_PRE_ENABLED
);
558 static void clear_translation_pre_enabled(struct intel_iommu
*iommu
)
560 iommu
->flags
&= ~VTD_FLAG_TRANS_PRE_ENABLED
;
563 static void init_translation_status(struct intel_iommu
*iommu
)
567 gsts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
568 if (gsts
& DMA_GSTS_TES
)
569 iommu
->flags
|= VTD_FLAG_TRANS_PRE_ENABLED
;
572 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
573 static struct dmar_domain
*to_dmar_domain(struct iommu_domain
*dom
)
575 return container_of(dom
, struct dmar_domain
, domain
);
578 static int __init
intel_iommu_setup(char *str
)
583 if (!strncmp(str
, "on", 2)) {
585 pr_info("IOMMU enabled\n");
586 } else if (!strncmp(str
, "off", 3)) {
588 pr_info("IOMMU disabled\n");
589 } else if (!strncmp(str
, "igfx_off", 8)) {
591 pr_info("Disable GFX device mapping\n");
592 } else if (!strncmp(str
, "forcedac", 8)) {
593 pr_info("Forcing DAC for PCI devices\n");
595 } else if (!strncmp(str
, "strict", 6)) {
596 pr_info("Disable batched IOTLB flush\n");
597 intel_iommu_strict
= 1;
598 } else if (!strncmp(str
, "sp_off", 6)) {
599 pr_info("Disable supported super page\n");
600 intel_iommu_superpage
= 0;
601 } else if (!strncmp(str
, "ecs_off", 7)) {
603 "Intel-IOMMU: disable extended context table support\n");
605 } else if (!strncmp(str
, "pasid28", 7)) {
607 "Intel-IOMMU: enable pre-production PASID support\n");
608 intel_iommu_pasid28
= 1;
609 iommu_identity_mapping
|= IDENTMAP_GFX
;
612 str
+= strcspn(str
, ",");
618 __setup("intel_iommu=", intel_iommu_setup
);
620 static struct kmem_cache
*iommu_domain_cache
;
621 static struct kmem_cache
*iommu_devinfo_cache
;
623 static struct dmar_domain
* get_iommu_domain(struct intel_iommu
*iommu
, u16 did
)
625 struct dmar_domain
**domains
;
628 domains
= iommu
->domains
[idx
];
632 return domains
[did
& 0xff];
635 static void set_iommu_domain(struct intel_iommu
*iommu
, u16 did
,
636 struct dmar_domain
*domain
)
638 struct dmar_domain
**domains
;
641 if (!iommu
->domains
[idx
]) {
642 size_t size
= 256 * sizeof(struct dmar_domain
*);
643 iommu
->domains
[idx
] = kzalloc(size
, GFP_ATOMIC
);
646 domains
= iommu
->domains
[idx
];
647 if (WARN_ON(!domains
))
650 domains
[did
& 0xff] = domain
;
653 static inline void *alloc_pgtable_page(int node
)
658 page
= alloc_pages_node(node
, GFP_ATOMIC
| __GFP_ZERO
, 0);
660 vaddr
= page_address(page
);
664 static inline void free_pgtable_page(void *vaddr
)
666 free_page((unsigned long)vaddr
);
669 static inline void *alloc_domain_mem(void)
671 return kmem_cache_alloc(iommu_domain_cache
, GFP_ATOMIC
);
674 static void free_domain_mem(void *vaddr
)
676 kmem_cache_free(iommu_domain_cache
, vaddr
);
679 static inline void * alloc_devinfo_mem(void)
681 return kmem_cache_alloc(iommu_devinfo_cache
, GFP_ATOMIC
);
684 static inline void free_devinfo_mem(void *vaddr
)
686 kmem_cache_free(iommu_devinfo_cache
, vaddr
);
689 static inline int domain_type_is_vm(struct dmar_domain
*domain
)
691 return domain
->flags
& DOMAIN_FLAG_VIRTUAL_MACHINE
;
694 static inline int domain_type_is_si(struct dmar_domain
*domain
)
696 return domain
->flags
& DOMAIN_FLAG_STATIC_IDENTITY
;
699 static inline int domain_type_is_vm_or_si(struct dmar_domain
*domain
)
701 return domain
->flags
& (DOMAIN_FLAG_VIRTUAL_MACHINE
|
702 DOMAIN_FLAG_STATIC_IDENTITY
);
705 static inline int domain_pfn_supported(struct dmar_domain
*domain
,
708 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
710 return !(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
713 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
718 sagaw
= cap_sagaw(iommu
->cap
);
719 for (agaw
= width_to_agaw(max_gaw
);
721 if (test_bit(agaw
, &sagaw
))
729 * Calculate max SAGAW for each iommu.
731 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
733 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
737 * calculate agaw for each iommu.
738 * "SAGAW" may be different across iommus, use a default agaw, and
739 * get a supported less agaw for iommus that don't support the default agaw.
741 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
743 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
746 /* This functionin only returns single iommu in a domain */
747 static struct intel_iommu
*domain_get_iommu(struct dmar_domain
*domain
)
751 /* si_domain and vm domain should not get here. */
752 BUG_ON(domain_type_is_vm_or_si(domain
));
753 for_each_domain_iommu(iommu_id
, domain
)
756 if (iommu_id
< 0 || iommu_id
>= g_num_of_iommus
)
759 return g_iommus
[iommu_id
];
762 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
764 struct dmar_drhd_unit
*drhd
;
765 struct intel_iommu
*iommu
;
769 domain
->iommu_coherency
= 1;
771 for_each_domain_iommu(i
, domain
) {
773 if (!ecap_coherent(g_iommus
[i
]->ecap
)) {
774 domain
->iommu_coherency
= 0;
781 /* No hardware attached; use lowest common denominator */
783 for_each_active_iommu(iommu
, drhd
) {
784 if (!ecap_coherent(iommu
->ecap
)) {
785 domain
->iommu_coherency
= 0;
792 static int domain_update_iommu_snooping(struct intel_iommu
*skip
)
794 struct dmar_drhd_unit
*drhd
;
795 struct intel_iommu
*iommu
;
799 for_each_active_iommu(iommu
, drhd
) {
801 if (!ecap_sc_support(iommu
->ecap
)) {
812 static int domain_update_iommu_superpage(struct intel_iommu
*skip
)
814 struct dmar_drhd_unit
*drhd
;
815 struct intel_iommu
*iommu
;
818 if (!intel_iommu_superpage
) {
822 /* set iommu_superpage to the smallest common denominator */
824 for_each_active_iommu(iommu
, drhd
) {
826 mask
&= cap_super_page_val(iommu
->cap
);
836 /* Some capabilities may be different across iommus */
837 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
839 domain_update_iommu_coherency(domain
);
840 domain
->iommu_snooping
= domain_update_iommu_snooping(NULL
);
841 domain
->iommu_superpage
= domain_update_iommu_superpage(NULL
);
844 static inline struct context_entry
*iommu_context_addr(struct intel_iommu
*iommu
,
845 u8 bus
, u8 devfn
, int alloc
)
847 struct root_entry
*root
= &iommu
->root_entry
[bus
];
848 struct context_entry
*context
;
852 if (ecs_enabled(iommu
)) {
860 context
= phys_to_virt(*entry
& VTD_PAGE_MASK
);
862 unsigned long phy_addr
;
866 context
= alloc_pgtable_page(iommu
->node
);
870 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
871 phy_addr
= virt_to_phys((void *)context
);
872 *entry
= phy_addr
| 1;
873 __iommu_flush_cache(iommu
, entry
, sizeof(*entry
));
875 return &context
[devfn
];
878 static int iommu_dummy(struct device
*dev
)
880 return dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
;
883 static struct intel_iommu
*device_to_iommu(struct device
*dev
, u8
*bus
, u8
*devfn
)
885 struct dmar_drhd_unit
*drhd
= NULL
;
886 struct intel_iommu
*iommu
;
888 struct pci_dev
*ptmp
, *pdev
= NULL
;
892 if (iommu_dummy(dev
))
895 if (dev_is_pci(dev
)) {
896 struct pci_dev
*pf_pdev
;
898 pdev
= to_pci_dev(dev
);
899 /* VFs aren't listed in scope tables; we need to look up
900 * the PF instead to find the IOMMU. */
901 pf_pdev
= pci_physfn(pdev
);
903 segment
= pci_domain_nr(pdev
->bus
);
904 } else if (has_acpi_companion(dev
))
905 dev
= &ACPI_COMPANION(dev
)->dev
;
908 for_each_active_iommu(iommu
, drhd
) {
909 if (pdev
&& segment
!= drhd
->segment
)
912 for_each_active_dev_scope(drhd
->devices
,
913 drhd
->devices_cnt
, i
, tmp
) {
915 /* For a VF use its original BDF# not that of the PF
916 * which we used for the IOMMU lookup. Strictly speaking
917 * we could do this for all PCI devices; we only need to
918 * get the BDF# from the scope table for ACPI matches. */
919 if (pdev
&& pdev
->is_virtfn
)
922 *bus
= drhd
->devices
[i
].bus
;
923 *devfn
= drhd
->devices
[i
].devfn
;
927 if (!pdev
|| !dev_is_pci(tmp
))
930 ptmp
= to_pci_dev(tmp
);
931 if (ptmp
->subordinate
&&
932 ptmp
->subordinate
->number
<= pdev
->bus
->number
&&
933 ptmp
->subordinate
->busn_res
.end
>= pdev
->bus
->number
)
937 if (pdev
&& drhd
->include_all
) {
939 *bus
= pdev
->bus
->number
;
940 *devfn
= pdev
->devfn
;
951 static void domain_flush_cache(struct dmar_domain
*domain
,
952 void *addr
, int size
)
954 if (!domain
->iommu_coherency
)
955 clflush_cache_range(addr
, size
);
958 static int device_context_mapped(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
960 struct context_entry
*context
;
964 spin_lock_irqsave(&iommu
->lock
, flags
);
965 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
967 ret
= context_present(context
);
968 spin_unlock_irqrestore(&iommu
->lock
, flags
);
972 static void clear_context_table(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
974 struct context_entry
*context
;
977 spin_lock_irqsave(&iommu
->lock
, flags
);
978 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
980 context_clear_entry(context
);
981 __iommu_flush_cache(iommu
, context
, sizeof(*context
));
983 spin_unlock_irqrestore(&iommu
->lock
, flags
);
986 static void free_context_table(struct intel_iommu
*iommu
)
990 struct context_entry
*context
;
992 spin_lock_irqsave(&iommu
->lock
, flags
);
993 if (!iommu
->root_entry
) {
996 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
997 context
= iommu_context_addr(iommu
, i
, 0, 0);
999 free_pgtable_page(context
);
1001 if (!ecs_enabled(iommu
))
1004 context
= iommu_context_addr(iommu
, i
, 0x80, 0);
1006 free_pgtable_page(context
);
1009 free_pgtable_page(iommu
->root_entry
);
1010 iommu
->root_entry
= NULL
;
1012 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1015 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
1016 unsigned long pfn
, int *target_level
)
1018 struct dma_pte
*parent
, *pte
= NULL
;
1019 int level
= agaw_to_level(domain
->agaw
);
1022 BUG_ON(!domain
->pgd
);
1024 if (!domain_pfn_supported(domain
, pfn
))
1025 /* Address beyond IOMMU's addressing capabilities. */
1028 parent
= domain
->pgd
;
1033 offset
= pfn_level_offset(pfn
, level
);
1034 pte
= &parent
[offset
];
1035 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
1037 if (level
== *target_level
)
1040 if (!dma_pte_present(pte
)) {
1043 tmp_page
= alloc_pgtable_page(domain
->nid
);
1048 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
1049 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
1050 if (cmpxchg64(&pte
->val
, 0ULL, pteval
))
1051 /* Someone else set it while we were thinking; use theirs. */
1052 free_pgtable_page(tmp_page
);
1054 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1059 parent
= phys_to_virt(dma_pte_addr(pte
));
1064 *target_level
= level
;
1070 /* return address's pte at specific level */
1071 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
1073 int level
, int *large_page
)
1075 struct dma_pte
*parent
, *pte
= NULL
;
1076 int total
= agaw_to_level(domain
->agaw
);
1079 parent
= domain
->pgd
;
1080 while (level
<= total
) {
1081 offset
= pfn_level_offset(pfn
, total
);
1082 pte
= &parent
[offset
];
1086 if (!dma_pte_present(pte
)) {
1087 *large_page
= total
;
1091 if (dma_pte_superpage(pte
)) {
1092 *large_page
= total
;
1096 parent
= phys_to_virt(dma_pte_addr(pte
));
1102 /* clear last level pte, a tlb flush should be followed */
1103 static void dma_pte_clear_range(struct dmar_domain
*domain
,
1104 unsigned long start_pfn
,
1105 unsigned long last_pfn
)
1107 unsigned int large_page
= 1;
1108 struct dma_pte
*first_pte
, *pte
;
1110 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1111 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1112 BUG_ON(start_pfn
> last_pfn
);
1114 /* we don't need lock here; nobody else touches the iova range */
1117 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
1119 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
1124 start_pfn
+= lvl_to_nr_pages(large_page
);
1126 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
1128 domain_flush_cache(domain
, first_pte
,
1129 (void *)pte
- (void *)first_pte
);
1131 } while (start_pfn
&& start_pfn
<= last_pfn
);
1134 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
1135 struct dma_pte
*pte
, unsigned long pfn
,
1136 unsigned long start_pfn
, unsigned long last_pfn
)
1138 pfn
= max(start_pfn
, pfn
);
1139 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1142 unsigned long level_pfn
;
1143 struct dma_pte
*level_pte
;
1145 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
1148 level_pfn
= pfn
& level_mask(level
);
1149 level_pte
= phys_to_virt(dma_pte_addr(pte
));
1152 dma_pte_free_level(domain
, level
- 1, level_pte
,
1153 level_pfn
, start_pfn
, last_pfn
);
1155 /* If range covers entire pagetable, free it */
1156 if (!(start_pfn
> level_pfn
||
1157 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
1159 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1160 free_pgtable_page(level_pte
);
1163 pfn
+= level_size(level
);
1164 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1167 /* clear last level (leaf) ptes and free page table pages. */
1168 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
1169 unsigned long start_pfn
,
1170 unsigned long last_pfn
)
1172 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1173 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1174 BUG_ON(start_pfn
> last_pfn
);
1176 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
1178 /* We don't need lock here; nobody else touches the iova range */
1179 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
),
1180 domain
->pgd
, 0, start_pfn
, last_pfn
);
1183 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1184 free_pgtable_page(domain
->pgd
);
1189 /* When a page at a given level is being unlinked from its parent, we don't
1190 need to *modify* it at all. All we need to do is make a list of all the
1191 pages which can be freed just as soon as we've flushed the IOTLB and we
1192 know the hardware page-walk will no longer touch them.
1193 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1195 static struct page
*dma_pte_list_pagetables(struct dmar_domain
*domain
,
1196 int level
, struct dma_pte
*pte
,
1197 struct page
*freelist
)
1201 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
1202 pg
->freelist
= freelist
;
1208 pte
= page_address(pg
);
1210 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
1211 freelist
= dma_pte_list_pagetables(domain
, level
- 1,
1214 } while (!first_pte_in_page(pte
));
1219 static struct page
*dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
1220 struct dma_pte
*pte
, unsigned long pfn
,
1221 unsigned long start_pfn
,
1222 unsigned long last_pfn
,
1223 struct page
*freelist
)
1225 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
1227 pfn
= max(start_pfn
, pfn
);
1228 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1231 unsigned long level_pfn
;
1233 if (!dma_pte_present(pte
))
1236 level_pfn
= pfn
& level_mask(level
);
1238 /* If range covers entire pagetable, free it */
1239 if (start_pfn
<= level_pfn
&&
1240 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1241 /* These suborbinate page tables are going away entirely. Don't
1242 bother to clear them; we're just going to *free* them. */
1243 if (level
> 1 && !dma_pte_superpage(pte
))
1244 freelist
= dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1250 } else if (level
> 1) {
1251 /* Recurse down into a level that isn't *entirely* obsolete */
1252 freelist
= dma_pte_clear_level(domain
, level
- 1,
1253 phys_to_virt(dma_pte_addr(pte
)),
1254 level_pfn
, start_pfn
, last_pfn
,
1258 pfn
+= level_size(level
);
1259 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1262 domain_flush_cache(domain
, first_pte
,
1263 (void *)++last_pte
- (void *)first_pte
);
1268 /* We can't just free the pages because the IOMMU may still be walking
1269 the page tables, and may have cached the intermediate levels. The
1270 pages can only be freed after the IOTLB flush has been done. */
1271 static struct page
*domain_unmap(struct dmar_domain
*domain
,
1272 unsigned long start_pfn
,
1273 unsigned long last_pfn
)
1275 struct page
*freelist
= NULL
;
1277 BUG_ON(!domain_pfn_supported(domain
, start_pfn
));
1278 BUG_ON(!domain_pfn_supported(domain
, last_pfn
));
1279 BUG_ON(start_pfn
> last_pfn
);
1281 /* we don't need lock here; nobody else touches the iova range */
1282 freelist
= dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1283 domain
->pgd
, 0, start_pfn
, last_pfn
, NULL
);
1286 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1287 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1288 pgd_page
->freelist
= freelist
;
1289 freelist
= pgd_page
;
1297 static void dma_free_pagelist(struct page
*freelist
)
1301 while ((pg
= freelist
)) {
1302 freelist
= pg
->freelist
;
1303 free_pgtable_page(page_address(pg
));
1307 /* iommu handling */
1308 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1310 struct root_entry
*root
;
1311 unsigned long flags
;
1313 root
= (struct root_entry
*)alloc_pgtable_page(iommu
->node
);
1315 pr_err("Allocating root entry for %s failed\n",
1320 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1322 spin_lock_irqsave(&iommu
->lock
, flags
);
1323 iommu
->root_entry
= root
;
1324 spin_unlock_irqrestore(&iommu
->lock
, flags
);
1329 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1335 addr
= virt_to_phys(iommu
->root_entry
);
1336 if (ecs_enabled(iommu
))
1337 addr
|= DMA_RTADDR_RTT
;
1339 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1340 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, addr
);
1342 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1346 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1348 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1351 static void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1356 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1359 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1360 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1362 /* Make sure hardware complete it */
1363 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1364 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1366 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1369 /* return value determine if we need a write buffer flush */
1370 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1371 u16 did
, u16 source_id
, u8 function_mask
,
1378 case DMA_CCMD_GLOBAL_INVL
:
1379 val
= DMA_CCMD_GLOBAL_INVL
;
1381 case DMA_CCMD_DOMAIN_INVL
:
1382 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1384 case DMA_CCMD_DEVICE_INVL
:
1385 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1386 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1391 val
|= DMA_CCMD_ICC
;
1393 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1394 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1396 /* Make sure hardware complete it */
1397 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1398 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1400 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1403 /* return value determine if we need a write buffer flush */
1404 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1405 u64 addr
, unsigned int size_order
, u64 type
)
1407 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1408 u64 val
= 0, val_iva
= 0;
1412 case DMA_TLB_GLOBAL_FLUSH
:
1413 /* global flush doesn't need set IVA_REG */
1414 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1416 case DMA_TLB_DSI_FLUSH
:
1417 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1419 case DMA_TLB_PSI_FLUSH
:
1420 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1421 /* IH bit is passed in as part of address */
1422 val_iva
= size_order
| addr
;
1427 /* Note: set drain read/write */
1430 * This is probably to be super secure.. Looks like we can
1431 * ignore it without any impact.
1433 if (cap_read_drain(iommu
->cap
))
1434 val
|= DMA_TLB_READ_DRAIN
;
1436 if (cap_write_drain(iommu
->cap
))
1437 val
|= DMA_TLB_WRITE_DRAIN
;
1439 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1440 /* Note: Only uses first TLB reg currently */
1442 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1443 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1445 /* Make sure hardware complete it */
1446 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1447 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1449 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1451 /* check IOTLB invalidation granularity */
1452 if (DMA_TLB_IAIG(val
) == 0)
1453 pr_err("Flush IOTLB failed\n");
1454 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1455 pr_debug("TLB flush request %Lx, actual %Lx\n",
1456 (unsigned long long)DMA_TLB_IIRG(type
),
1457 (unsigned long long)DMA_TLB_IAIG(val
));
1460 static struct device_domain_info
*
1461 iommu_support_dev_iotlb (struct dmar_domain
*domain
, struct intel_iommu
*iommu
,
1464 struct device_domain_info
*info
;
1466 assert_spin_locked(&device_domain_lock
);
1471 list_for_each_entry(info
, &domain
->devices
, link
)
1472 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
1473 info
->devfn
== devfn
) {
1474 if (info
->ats_supported
&& info
->dev
)
1482 static void domain_update_iotlb(struct dmar_domain
*domain
)
1484 struct device_domain_info
*info
;
1485 bool has_iotlb_device
= false;
1487 assert_spin_locked(&device_domain_lock
);
1489 list_for_each_entry(info
, &domain
->devices
, link
) {
1490 struct pci_dev
*pdev
;
1492 if (!info
->dev
|| !dev_is_pci(info
->dev
))
1495 pdev
= to_pci_dev(info
->dev
);
1496 if (pdev
->ats_enabled
) {
1497 has_iotlb_device
= true;
1502 domain
->has_iotlb_device
= has_iotlb_device
;
1505 static void iommu_enable_dev_iotlb(struct device_domain_info
*info
)
1507 struct pci_dev
*pdev
;
1509 assert_spin_locked(&device_domain_lock
);
1511 if (!info
|| !dev_is_pci(info
->dev
))
1514 pdev
= to_pci_dev(info
->dev
);
1515 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1516 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1517 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1518 * reserved, which should be set to 0.
1520 if (!ecap_dit(info
->iommu
->ecap
))
1523 struct pci_dev
*pf_pdev
;
1525 /* pdev will be returned if device is not a vf */
1526 pf_pdev
= pci_physfn(pdev
);
1527 info
->pfsid
= PCI_DEVID(pf_pdev
->bus
->number
, pf_pdev
->devfn
);
1530 #ifdef CONFIG_INTEL_IOMMU_SVM
1531 /* The PCIe spec, in its wisdom, declares that the behaviour of
1532 the device if you enable PASID support after ATS support is
1533 undefined. So always enable PASID support on devices which
1534 have it, even if we can't yet know if we're ever going to
1536 if (info
->pasid_supported
&& !pci_enable_pasid(pdev
, info
->pasid_supported
& ~1))
1537 info
->pasid_enabled
= 1;
1539 if (info
->pri_supported
&& !pci_reset_pri(pdev
) && !pci_enable_pri(pdev
, 32))
1540 info
->pri_enabled
= 1;
1542 if (info
->ats_supported
&& !pci_enable_ats(pdev
, VTD_PAGE_SHIFT
)) {
1543 info
->ats_enabled
= 1;
1544 domain_update_iotlb(info
->domain
);
1545 info
->ats_qdep
= pci_ats_queue_depth(pdev
);
1549 static void iommu_disable_dev_iotlb(struct device_domain_info
*info
)
1551 struct pci_dev
*pdev
;
1553 assert_spin_locked(&device_domain_lock
);
1555 if (!dev_is_pci(info
->dev
))
1558 pdev
= to_pci_dev(info
->dev
);
1560 if (info
->ats_enabled
) {
1561 pci_disable_ats(pdev
);
1562 info
->ats_enabled
= 0;
1563 domain_update_iotlb(info
->domain
);
1565 #ifdef CONFIG_INTEL_IOMMU_SVM
1566 if (info
->pri_enabled
) {
1567 pci_disable_pri(pdev
);
1568 info
->pri_enabled
= 0;
1570 if (info
->pasid_enabled
) {
1571 pci_disable_pasid(pdev
);
1572 info
->pasid_enabled
= 0;
1577 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1578 u64 addr
, unsigned mask
)
1581 unsigned long flags
;
1582 struct device_domain_info
*info
;
1584 if (!domain
->has_iotlb_device
)
1587 spin_lock_irqsave(&device_domain_lock
, flags
);
1588 list_for_each_entry(info
, &domain
->devices
, link
) {
1589 if (!info
->ats_enabled
)
1592 sid
= info
->bus
<< 8 | info
->devfn
;
1593 qdep
= info
->ats_qdep
;
1594 qi_flush_dev_iotlb(info
->iommu
, sid
, info
->pfsid
,
1597 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1600 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
,
1601 struct dmar_domain
*domain
,
1602 unsigned long pfn
, unsigned int pages
,
1605 unsigned int mask
= ilog2(__roundup_pow_of_two(pages
));
1606 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1607 u16 did
= domain
->iommu_did
[iommu
->seq_id
];
1614 * Fallback to domain selective flush if no PSI support or the size is
1616 * PSI requires page size to be 2 ^ x, and the base address is naturally
1617 * aligned to the size
1619 if (!cap_pgsel_inv(iommu
->cap
) || mask
> cap_max_amask_val(iommu
->cap
))
1620 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1623 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1627 * In caching mode, changes of pages from non-present to present require
1628 * flush. However, device IOTLB doesn't need to be flushed in this case.
1630 if (!cap_caching_mode(iommu
->cap
) || !map
)
1631 iommu_flush_dev_iotlb(domain
, addr
, mask
);
1634 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1637 unsigned long flags
;
1639 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1640 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1641 pmen
&= ~DMA_PMEN_EPM
;
1642 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1644 /* wait for the protected region status bit to clear */
1645 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1646 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1648 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1651 static void iommu_enable_translation(struct intel_iommu
*iommu
)
1654 unsigned long flags
;
1656 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1657 iommu
->gcmd
|= DMA_GCMD_TE
;
1658 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1660 /* Make sure hardware complete it */
1661 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1662 readl
, (sts
& DMA_GSTS_TES
), sts
);
1664 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1667 static void iommu_disable_translation(struct intel_iommu
*iommu
)
1672 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1673 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1674 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1676 /* Make sure hardware complete it */
1677 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1678 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1680 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1684 static int iommu_init_domains(struct intel_iommu
*iommu
)
1686 u32 ndomains
, nlongs
;
1689 ndomains
= cap_ndoms(iommu
->cap
);
1690 pr_debug("%s: Number of Domains supported <%d>\n",
1691 iommu
->name
, ndomains
);
1692 nlongs
= BITS_TO_LONGS(ndomains
);
1694 spin_lock_init(&iommu
->lock
);
1696 iommu
->domain_ids
= kcalloc(nlongs
, sizeof(unsigned long), GFP_KERNEL
);
1697 if (!iommu
->domain_ids
) {
1698 pr_err("%s: Allocating domain id array failed\n",
1703 size
= (ALIGN(ndomains
, 256) >> 8) * sizeof(struct dmar_domain
**);
1704 iommu
->domains
= kzalloc(size
, GFP_KERNEL
);
1706 if (iommu
->domains
) {
1707 size
= 256 * sizeof(struct dmar_domain
*);
1708 iommu
->domains
[0] = kzalloc(size
, GFP_KERNEL
);
1711 if (!iommu
->domains
|| !iommu
->domains
[0]) {
1712 pr_err("%s: Allocating domain array failed\n",
1714 kfree(iommu
->domain_ids
);
1715 kfree(iommu
->domains
);
1716 iommu
->domain_ids
= NULL
;
1717 iommu
->domains
= NULL
;
1724 * If Caching mode is set, then invalid translations are tagged
1725 * with domain-id 0, hence we need to pre-allocate it. We also
1726 * use domain-id 0 as a marker for non-allocated domain-id, so
1727 * make sure it is not used for a real domain.
1729 set_bit(0, iommu
->domain_ids
);
1734 static void disable_dmar_iommu(struct intel_iommu
*iommu
)
1736 struct device_domain_info
*info
, *tmp
;
1737 unsigned long flags
;
1739 if (!iommu
->domains
|| !iommu
->domain_ids
)
1743 spin_lock_irqsave(&device_domain_lock
, flags
);
1744 list_for_each_entry_safe(info
, tmp
, &device_domain_list
, global
) {
1745 struct dmar_domain
*domain
;
1747 if (info
->iommu
!= iommu
)
1750 if (!info
->dev
|| !info
->domain
)
1753 domain
= info
->domain
;
1755 __dmar_remove_one_dev_info(info
);
1757 if (!domain_type_is_vm_or_si(domain
)) {
1759 * The domain_exit() function can't be called under
1760 * device_domain_lock, as it takes this lock itself.
1761 * So release the lock here and re-run the loop
1764 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1765 domain_exit(domain
);
1769 spin_unlock_irqrestore(&device_domain_lock
, flags
);
1771 if (iommu
->gcmd
& DMA_GCMD_TE
)
1772 iommu_disable_translation(iommu
);
1775 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1777 if ((iommu
->domains
) && (iommu
->domain_ids
)) {
1778 int elems
= ALIGN(cap_ndoms(iommu
->cap
), 256) >> 8;
1781 for (i
= 0; i
< elems
; i
++)
1782 kfree(iommu
->domains
[i
]);
1783 kfree(iommu
->domains
);
1784 kfree(iommu
->domain_ids
);
1785 iommu
->domains
= NULL
;
1786 iommu
->domain_ids
= NULL
;
1789 g_iommus
[iommu
->seq_id
] = NULL
;
1791 /* free context mapping */
1792 free_context_table(iommu
);
1794 #ifdef CONFIG_INTEL_IOMMU_SVM
1795 if (pasid_enabled(iommu
)) {
1796 if (ecap_prs(iommu
->ecap
))
1797 intel_svm_finish_prq(iommu
);
1798 intel_svm_free_pasid_tables(iommu
);
1803 static struct dmar_domain
*alloc_domain(int flags
)
1805 struct dmar_domain
*domain
;
1807 domain
= alloc_domain_mem();
1811 memset(domain
, 0, sizeof(*domain
));
1813 domain
->flags
= flags
;
1814 domain
->has_iotlb_device
= false;
1815 INIT_LIST_HEAD(&domain
->devices
);
1820 /* Must be called with iommu->lock */
1821 static int domain_attach_iommu(struct dmar_domain
*domain
,
1822 struct intel_iommu
*iommu
)
1824 unsigned long ndomains
;
1827 assert_spin_locked(&device_domain_lock
);
1828 assert_spin_locked(&iommu
->lock
);
1830 domain
->iommu_refcnt
[iommu
->seq_id
] += 1;
1831 domain
->iommu_count
+= 1;
1832 if (domain
->iommu_refcnt
[iommu
->seq_id
] == 1) {
1833 ndomains
= cap_ndoms(iommu
->cap
);
1834 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1836 if (num
>= ndomains
) {
1837 pr_err("%s: No free domain ids\n", iommu
->name
);
1838 domain
->iommu_refcnt
[iommu
->seq_id
] -= 1;
1839 domain
->iommu_count
-= 1;
1843 set_bit(num
, iommu
->domain_ids
);
1844 set_iommu_domain(iommu
, num
, domain
);
1846 domain
->iommu_did
[iommu
->seq_id
] = num
;
1847 domain
->nid
= iommu
->node
;
1849 domain_update_iommu_cap(domain
);
1855 static int domain_detach_iommu(struct dmar_domain
*domain
,
1856 struct intel_iommu
*iommu
)
1858 int num
, count
= INT_MAX
;
1860 assert_spin_locked(&device_domain_lock
);
1861 assert_spin_locked(&iommu
->lock
);
1863 domain
->iommu_refcnt
[iommu
->seq_id
] -= 1;
1864 count
= --domain
->iommu_count
;
1865 if (domain
->iommu_refcnt
[iommu
->seq_id
] == 0) {
1866 num
= domain
->iommu_did
[iommu
->seq_id
];
1867 clear_bit(num
, iommu
->domain_ids
);
1868 set_iommu_domain(iommu
, num
, NULL
);
1870 domain_update_iommu_cap(domain
);
1871 domain
->iommu_did
[iommu
->seq_id
] = 0;
1877 static struct iova_domain reserved_iova_list
;
1878 static struct lock_class_key reserved_rbtree_key
;
1880 static int dmar_init_reserved_ranges(void)
1882 struct pci_dev
*pdev
= NULL
;
1886 init_iova_domain(&reserved_iova_list
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
1889 lockdep_set_class(&reserved_iova_list
.iova_rbtree_lock
,
1890 &reserved_rbtree_key
);
1892 /* IOAPIC ranges shouldn't be accessed by DMA */
1893 iova
= reserve_iova(&reserved_iova_list
, IOVA_PFN(IOAPIC_RANGE_START
),
1894 IOVA_PFN(IOAPIC_RANGE_END
));
1896 pr_err("Reserve IOAPIC range failed\n");
1900 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1901 for_each_pci_dev(pdev
) {
1904 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
1905 r
= &pdev
->resource
[i
];
1906 if (!r
->flags
|| !(r
->flags
& IORESOURCE_MEM
))
1908 iova
= reserve_iova(&reserved_iova_list
,
1912 pr_err("Reserve iova failed\n");
1920 static void domain_reserve_special_ranges(struct dmar_domain
*domain
)
1922 copy_reserved_iova(&reserved_iova_list
, &domain
->iovad
);
1925 static inline int guestwidth_to_adjustwidth(int gaw
)
1928 int r
= (gaw
- 12) % 9;
1939 static int domain_init(struct dmar_domain
*domain
, struct intel_iommu
*iommu
,
1942 int adjust_width
, agaw
;
1943 unsigned long sagaw
;
1945 init_iova_domain(&domain
->iovad
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
1947 domain_reserve_special_ranges(domain
);
1949 /* calculate AGAW */
1950 if (guest_width
> cap_mgaw(iommu
->cap
))
1951 guest_width
= cap_mgaw(iommu
->cap
);
1952 domain
->gaw
= guest_width
;
1953 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
1954 agaw
= width_to_agaw(adjust_width
);
1955 sagaw
= cap_sagaw(iommu
->cap
);
1956 if (!test_bit(agaw
, &sagaw
)) {
1957 /* hardware doesn't support it, choose a bigger one */
1958 pr_debug("Hardware doesn't support agaw %d\n", agaw
);
1959 agaw
= find_next_bit(&sagaw
, 5, agaw
);
1963 domain
->agaw
= agaw
;
1965 if (ecap_coherent(iommu
->ecap
))
1966 domain
->iommu_coherency
= 1;
1968 domain
->iommu_coherency
= 0;
1970 if (ecap_sc_support(iommu
->ecap
))
1971 domain
->iommu_snooping
= 1;
1973 domain
->iommu_snooping
= 0;
1975 if (intel_iommu_superpage
)
1976 domain
->iommu_superpage
= fls(cap_super_page_val(iommu
->cap
));
1978 domain
->iommu_superpage
= 0;
1980 domain
->nid
= iommu
->node
;
1982 /* always allocate the top pgd */
1983 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
1986 __iommu_flush_cache(iommu
, domain
->pgd
, PAGE_SIZE
);
1990 static void domain_exit(struct dmar_domain
*domain
)
1992 struct page
*freelist
= NULL
;
1994 /* Domain 0 is reserved, so dont process it */
1998 /* Flush any lazy unmaps that may reference this domain */
1999 if (!intel_iommu_strict
) {
2002 for_each_possible_cpu(cpu
)
2003 flush_unmaps_timeout(cpu
);
2006 /* Remove associated devices and clear attached or cached domains */
2008 domain_remove_dev_info(domain
);
2012 put_iova_domain(&domain
->iovad
);
2014 freelist
= domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
));
2016 dma_free_pagelist(freelist
);
2018 free_domain_mem(domain
);
2021 static int domain_context_mapping_one(struct dmar_domain
*domain
,
2022 struct intel_iommu
*iommu
,
2025 u16 did
= domain
->iommu_did
[iommu
->seq_id
];
2026 int translation
= CONTEXT_TT_MULTI_LEVEL
;
2027 struct device_domain_info
*info
= NULL
;
2028 struct context_entry
*context
;
2029 unsigned long flags
;
2030 struct dma_pte
*pgd
;
2035 if (hw_pass_through
&& domain_type_is_si(domain
))
2036 translation
= CONTEXT_TT_PASS_THROUGH
;
2038 pr_debug("Set context mapping for %02x:%02x.%d\n",
2039 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
2041 BUG_ON(!domain
->pgd
);
2043 spin_lock_irqsave(&device_domain_lock
, flags
);
2044 spin_lock(&iommu
->lock
);
2047 context
= iommu_context_addr(iommu
, bus
, devfn
, 1);
2052 if (context_present(context
))
2056 * For kdump cases, old valid entries may be cached due to the
2057 * in-flight DMA and copied pgtable, but there is no unmapping
2058 * behaviour for them, thus we need an explicit cache flush for
2059 * the newly-mapped device. For kdump, at this point, the device
2060 * is supposed to finish reset at its driver probe stage, so no
2061 * in-flight DMA will exist, and we don't need to worry anymore
2064 if (context_copied(context
)) {
2065 u16 did_old
= context_domain_id(context
);
2067 if (did_old
>= 0 && did_old
< cap_ndoms(iommu
->cap
)) {
2068 iommu
->flush
.flush_context(iommu
, did_old
,
2069 (((u16
)bus
) << 8) | devfn
,
2070 DMA_CCMD_MASK_NOBIT
,
2071 DMA_CCMD_DEVICE_INVL
);
2072 iommu
->flush
.flush_iotlb(iommu
, did_old
, 0, 0,
2079 context_clear_entry(context
);
2080 context_set_domain_id(context
, did
);
2083 * Skip top levels of page tables for iommu which has less agaw
2084 * than default. Unnecessary for PT mode.
2086 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
2087 for (agaw
= domain
->agaw
; agaw
> iommu
->agaw
; agaw
--) {
2089 pgd
= phys_to_virt(dma_pte_addr(pgd
));
2090 if (!dma_pte_present(pgd
))
2094 info
= iommu_support_dev_iotlb(domain
, iommu
, bus
, devfn
);
2095 if (info
&& info
->ats_supported
)
2096 translation
= CONTEXT_TT_DEV_IOTLB
;
2098 translation
= CONTEXT_TT_MULTI_LEVEL
;
2100 context_set_address_root(context
, virt_to_phys(pgd
));
2101 context_set_address_width(context
, agaw
);
2104 * In pass through mode, AW must be programmed to
2105 * indicate the largest AGAW value supported by
2106 * hardware. And ASR is ignored by hardware.
2108 context_set_address_width(context
, iommu
->msagaw
);
2111 context_set_translation_type(context
, translation
);
2112 context_set_fault_enable(context
);
2113 context_set_present(context
);
2114 domain_flush_cache(domain
, context
, sizeof(*context
));
2117 * It's a non-present to present mapping. If hardware doesn't cache
2118 * non-present entry we only need to flush the write-buffer. If the
2119 * _does_ cache non-present entries, then it does so in the special
2120 * domain #0, which we have to flush:
2122 if (cap_caching_mode(iommu
->cap
)) {
2123 iommu
->flush
.flush_context(iommu
, 0,
2124 (((u16
)bus
) << 8) | devfn
,
2125 DMA_CCMD_MASK_NOBIT
,
2126 DMA_CCMD_DEVICE_INVL
);
2127 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0, DMA_TLB_DSI_FLUSH
);
2129 iommu_flush_write_buffer(iommu
);
2131 iommu_enable_dev_iotlb(info
);
2136 spin_unlock(&iommu
->lock
);
2137 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2142 struct domain_context_mapping_data
{
2143 struct dmar_domain
*domain
;
2144 struct intel_iommu
*iommu
;
2147 static int domain_context_mapping_cb(struct pci_dev
*pdev
,
2148 u16 alias
, void *opaque
)
2150 struct domain_context_mapping_data
*data
= opaque
;
2152 return domain_context_mapping_one(data
->domain
, data
->iommu
,
2153 PCI_BUS_NUM(alias
), alias
& 0xff);
2157 domain_context_mapping(struct dmar_domain
*domain
, struct device
*dev
)
2159 struct intel_iommu
*iommu
;
2161 struct domain_context_mapping_data data
;
2163 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2167 if (!dev_is_pci(dev
))
2168 return domain_context_mapping_one(domain
, iommu
, bus
, devfn
);
2170 data
.domain
= domain
;
2173 return pci_for_each_dma_alias(to_pci_dev(dev
),
2174 &domain_context_mapping_cb
, &data
);
2177 static int domain_context_mapped_cb(struct pci_dev
*pdev
,
2178 u16 alias
, void *opaque
)
2180 struct intel_iommu
*iommu
= opaque
;
2182 return !device_context_mapped(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
2185 static int domain_context_mapped(struct device
*dev
)
2187 struct intel_iommu
*iommu
;
2190 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2194 if (!dev_is_pci(dev
))
2195 return device_context_mapped(iommu
, bus
, devfn
);
2197 return !pci_for_each_dma_alias(to_pci_dev(dev
),
2198 domain_context_mapped_cb
, iommu
);
2201 /* Returns a number of VTD pages, but aligned to MM page size */
2202 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
2205 host_addr
&= ~PAGE_MASK
;
2206 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
2209 /* Return largest possible superpage level for a given mapping */
2210 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
2211 unsigned long iov_pfn
,
2212 unsigned long phy_pfn
,
2213 unsigned long pages
)
2215 int support
, level
= 1;
2216 unsigned long pfnmerge
;
2218 support
= domain
->iommu_superpage
;
2220 /* To use a large page, the virtual *and* physical addresses
2221 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2222 of them will mean we have to use smaller pages. So just
2223 merge them and check both at once. */
2224 pfnmerge
= iov_pfn
| phy_pfn
;
2226 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
2227 pages
>>= VTD_STRIDE_SHIFT
;
2230 pfnmerge
>>= VTD_STRIDE_SHIFT
;
2237 static int __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2238 struct scatterlist
*sg
, unsigned long phys_pfn
,
2239 unsigned long nr_pages
, int prot
)
2241 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
2242 phys_addr_t
uninitialized_var(pteval
);
2243 unsigned long sg_res
= 0;
2244 unsigned int largepage_lvl
= 0;
2245 unsigned long lvl_pages
= 0;
2247 BUG_ON(!domain_pfn_supported(domain
, iov_pfn
+ nr_pages
- 1));
2249 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
2252 prot
&= DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
;
2256 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | prot
;
2259 while (nr_pages
> 0) {
2263 unsigned int pgoff
= sg
->offset
& ~PAGE_MASK
;
2265 sg_res
= aligned_nrpages(sg
->offset
, sg
->length
);
2266 sg
->dma_address
= ((dma_addr_t
)iov_pfn
<< VTD_PAGE_SHIFT
) + pgoff
;
2267 sg
->dma_length
= sg
->length
;
2268 pteval
= (sg_phys(sg
) - pgoff
) | prot
;
2269 phys_pfn
= pteval
>> VTD_PAGE_SHIFT
;
2273 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
, phys_pfn
, sg_res
);
2275 first_pte
= pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
);
2278 /* It is large page*/
2279 if (largepage_lvl
> 1) {
2280 unsigned long nr_superpages
, end_pfn
;
2282 pteval
|= DMA_PTE_LARGE_PAGE
;
2283 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2285 nr_superpages
= sg_res
/ lvl_pages
;
2286 end_pfn
= iov_pfn
+ nr_superpages
* lvl_pages
- 1;
2289 * Ensure that old small page tables are
2290 * removed to make room for superpage(s).
2292 dma_pte_free_pagetable(domain
, iov_pfn
, end_pfn
);
2294 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
2298 /* We don't need lock here, nobody else
2299 * touches the iova range
2301 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
2303 static int dumps
= 5;
2304 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2305 iov_pfn
, tmp
, (unsigned long long)pteval
);
2308 debug_dma_dump_mappings(NULL
);
2313 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2315 BUG_ON(nr_pages
< lvl_pages
);
2316 BUG_ON(sg_res
< lvl_pages
);
2318 nr_pages
-= lvl_pages
;
2319 iov_pfn
+= lvl_pages
;
2320 phys_pfn
+= lvl_pages
;
2321 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
2322 sg_res
-= lvl_pages
;
2324 /* If the next PTE would be the first in a new page, then we
2325 need to flush the cache on the entries we've just written.
2326 And then we'll need to recalculate 'pte', so clear it and
2327 let it get set again in the if (!pte) block above.
2329 If we're done (!nr_pages) we need to flush the cache too.
2331 Also if we've been setting superpages, we may need to
2332 recalculate 'pte' and switch back to smaller pages for the
2333 end of the mapping, if the trailing size is not enough to
2334 use another superpage (i.e. sg_res < lvl_pages). */
2336 if (!nr_pages
|| first_pte_in_page(pte
) ||
2337 (largepage_lvl
> 1 && sg_res
< lvl_pages
)) {
2338 domain_flush_cache(domain
, first_pte
,
2339 (void *)pte
- (void *)first_pte
);
2343 if (!sg_res
&& nr_pages
)
2349 static inline int domain_sg_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2350 struct scatterlist
*sg
, unsigned long nr_pages
,
2353 return __domain_mapping(domain
, iov_pfn
, sg
, 0, nr_pages
, prot
);
2356 static inline int domain_pfn_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2357 unsigned long phys_pfn
, unsigned long nr_pages
,
2360 return __domain_mapping(domain
, iov_pfn
, NULL
, phys_pfn
, nr_pages
, prot
);
2363 static void domain_context_clear_one(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
2368 clear_context_table(iommu
, bus
, devfn
);
2369 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2370 DMA_CCMD_GLOBAL_INVL
);
2371 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
2374 static inline void unlink_domain_info(struct device_domain_info
*info
)
2376 assert_spin_locked(&device_domain_lock
);
2377 list_del(&info
->link
);
2378 list_del(&info
->global
);
2380 info
->dev
->archdata
.iommu
= NULL
;
2383 static void domain_remove_dev_info(struct dmar_domain
*domain
)
2385 struct device_domain_info
*info
, *tmp
;
2386 unsigned long flags
;
2388 spin_lock_irqsave(&device_domain_lock
, flags
);
2389 list_for_each_entry_safe(info
, tmp
, &domain
->devices
, link
)
2390 __dmar_remove_one_dev_info(info
);
2391 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2396 * Note: we use struct device->archdata.iommu stores the info
2398 static struct dmar_domain
*find_domain(struct device
*dev
)
2400 struct device_domain_info
*info
;
2402 /* No lock here, assumes no domain exit in normal case */
2403 info
= dev
->archdata
.iommu
;
2405 return info
->domain
;
2409 static inline struct device_domain_info
*
2410 dmar_search_domain_by_dev_info(int segment
, int bus
, int devfn
)
2412 struct device_domain_info
*info
;
2414 list_for_each_entry(info
, &device_domain_list
, global
)
2415 if (info
->iommu
->segment
== segment
&& info
->bus
== bus
&&
2416 info
->devfn
== devfn
)
2422 static struct dmar_domain
*dmar_insert_one_dev_info(struct intel_iommu
*iommu
,
2425 struct dmar_domain
*domain
)
2427 struct dmar_domain
*found
= NULL
;
2428 struct device_domain_info
*info
;
2429 unsigned long flags
;
2432 info
= alloc_devinfo_mem();
2437 info
->devfn
= devfn
;
2438 info
->ats_supported
= info
->pasid_supported
= info
->pri_supported
= 0;
2439 info
->ats_enabled
= info
->pasid_enabled
= info
->pri_enabled
= 0;
2442 info
->domain
= domain
;
2443 info
->iommu
= iommu
;
2445 if (dev
&& dev_is_pci(dev
)) {
2446 struct pci_dev
*pdev
= to_pci_dev(info
->dev
);
2448 if (ecap_dev_iotlb_support(iommu
->ecap
) &&
2449 pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ATS
) &&
2450 dmar_find_matched_atsr_unit(pdev
))
2451 info
->ats_supported
= 1;
2453 if (ecs_enabled(iommu
)) {
2454 if (pasid_enabled(iommu
)) {
2455 int features
= pci_pasid_features(pdev
);
2457 info
->pasid_supported
= features
| 1;
2460 if (info
->ats_supported
&& ecap_prs(iommu
->ecap
) &&
2461 pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_PRI
))
2462 info
->pri_supported
= 1;
2466 spin_lock_irqsave(&device_domain_lock
, flags
);
2468 found
= find_domain(dev
);
2471 struct device_domain_info
*info2
;
2472 info2
= dmar_search_domain_by_dev_info(iommu
->segment
, bus
, devfn
);
2474 found
= info2
->domain
;
2480 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2481 free_devinfo_mem(info
);
2482 /* Caller must free the original domain */
2486 spin_lock(&iommu
->lock
);
2487 ret
= domain_attach_iommu(domain
, iommu
);
2488 spin_unlock(&iommu
->lock
);
2491 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2492 free_devinfo_mem(info
);
2496 list_add(&info
->link
, &domain
->devices
);
2497 list_add(&info
->global
, &device_domain_list
);
2499 dev
->archdata
.iommu
= info
;
2500 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2502 if (dev
&& domain_context_mapping(domain
, dev
)) {
2503 pr_err("Domain context map for %s failed\n", dev_name(dev
));
2504 dmar_remove_one_dev_info(domain
, dev
);
2511 static int get_last_alias(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
2513 *(u16
*)opaque
= alias
;
2517 static struct dmar_domain
*find_or_alloc_domain(struct device
*dev
, int gaw
)
2519 struct device_domain_info
*info
= NULL
;
2520 struct dmar_domain
*domain
= NULL
;
2521 struct intel_iommu
*iommu
;
2522 u16 req_id
, dma_alias
;
2523 unsigned long flags
;
2526 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2530 req_id
= ((u16
)bus
<< 8) | devfn
;
2532 if (dev_is_pci(dev
)) {
2533 struct pci_dev
*pdev
= to_pci_dev(dev
);
2535 pci_for_each_dma_alias(pdev
, get_last_alias
, &dma_alias
);
2537 spin_lock_irqsave(&device_domain_lock
, flags
);
2538 info
= dmar_search_domain_by_dev_info(pci_domain_nr(pdev
->bus
),
2539 PCI_BUS_NUM(dma_alias
),
2542 iommu
= info
->iommu
;
2543 domain
= info
->domain
;
2545 spin_unlock_irqrestore(&device_domain_lock
, flags
);
2547 /* DMA alias already has a domain, use it */
2552 /* Allocate and initialize new domain for the device */
2553 domain
= alloc_domain(0);
2556 if (domain_init(domain
, iommu
, gaw
)) {
2557 domain_exit(domain
);
2566 static struct dmar_domain
*set_domain_for_dev(struct device
*dev
,
2567 struct dmar_domain
*domain
)
2569 struct intel_iommu
*iommu
;
2570 struct dmar_domain
*tmp
;
2571 u16 req_id
, dma_alias
;
2574 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2578 req_id
= ((u16
)bus
<< 8) | devfn
;
2580 if (dev_is_pci(dev
)) {
2581 struct pci_dev
*pdev
= to_pci_dev(dev
);
2583 pci_for_each_dma_alias(pdev
, get_last_alias
, &dma_alias
);
2585 /* register PCI DMA alias device */
2586 if (req_id
!= dma_alias
) {
2587 tmp
= dmar_insert_one_dev_info(iommu
, PCI_BUS_NUM(dma_alias
),
2588 dma_alias
& 0xff, NULL
, domain
);
2590 if (!tmp
|| tmp
!= domain
)
2595 tmp
= dmar_insert_one_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2596 if (!tmp
|| tmp
!= domain
)
2602 static struct dmar_domain
*get_domain_for_dev(struct device
*dev
, int gaw
)
2604 struct dmar_domain
*domain
, *tmp
;
2606 domain
= find_domain(dev
);
2610 domain
= find_or_alloc_domain(dev
, gaw
);
2614 tmp
= set_domain_for_dev(dev
, domain
);
2615 if (!tmp
|| domain
!= tmp
) {
2616 domain_exit(domain
);
2625 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2626 unsigned long long start
,
2627 unsigned long long end
)
2629 unsigned long first_vpfn
= start
>> VTD_PAGE_SHIFT
;
2630 unsigned long last_vpfn
= end
>> VTD_PAGE_SHIFT
;
2632 if (!reserve_iova(&domain
->iovad
, dma_to_mm_pfn(first_vpfn
),
2633 dma_to_mm_pfn(last_vpfn
))) {
2634 pr_err("Reserving iova failed\n");
2638 pr_debug("Mapping reserved region %llx-%llx\n", start
, end
);
2640 * RMRR range might have overlap with physical memory range,
2643 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2645 return domain_pfn_mapping(domain
, first_vpfn
, first_vpfn
,
2646 last_vpfn
- first_vpfn
+ 1,
2647 DMA_PTE_READ
|DMA_PTE_WRITE
);
2650 static int domain_prepare_identity_map(struct device
*dev
,
2651 struct dmar_domain
*domain
,
2652 unsigned long long start
,
2653 unsigned long long end
)
2655 /* For _hardware_ passthrough, don't bother. But for software
2656 passthrough, we do it anyway -- it may indicate a memory
2657 range which is reserved in E820, so which didn't get set
2658 up to start with in si_domain */
2659 if (domain
== si_domain
&& hw_pass_through
) {
2660 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2661 dev_name(dev
), start
, end
);
2665 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2666 dev_name(dev
), start
, end
);
2669 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2670 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2671 dmi_get_system_info(DMI_BIOS_VENDOR
),
2672 dmi_get_system_info(DMI_BIOS_VERSION
),
2673 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2677 if (end
>> agaw_to_width(domain
->agaw
)) {
2678 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2679 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2680 agaw_to_width(domain
->agaw
),
2681 dmi_get_system_info(DMI_BIOS_VENDOR
),
2682 dmi_get_system_info(DMI_BIOS_VERSION
),
2683 dmi_get_system_info(DMI_PRODUCT_VERSION
));
2687 return iommu_domain_identity_map(domain
, start
, end
);
2690 static int iommu_prepare_identity_map(struct device
*dev
,
2691 unsigned long long start
,
2692 unsigned long long end
)
2694 struct dmar_domain
*domain
;
2697 domain
= get_domain_for_dev(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
2701 ret
= domain_prepare_identity_map(dev
, domain
, start
, end
);
2703 domain_exit(domain
);
2708 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit
*rmrr
,
2711 if (dev
->archdata
.iommu
== DUMMY_DEVICE_DOMAIN_INFO
)
2713 return iommu_prepare_identity_map(dev
, rmrr
->base_address
,
2717 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2718 static inline void iommu_prepare_isa(void)
2720 struct pci_dev
*pdev
;
2723 pdev
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, NULL
);
2727 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2728 ret
= iommu_prepare_identity_map(&pdev
->dev
, 0, 16*1024*1024 - 1);
2731 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2736 static inline void iommu_prepare_isa(void)
2740 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2742 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2744 static int __init
si_domain_init(int hw
)
2748 si_domain
= alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY
);
2752 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2753 domain_exit(si_domain
);
2757 pr_debug("Identity mapping domain allocated\n");
2762 for_each_online_node(nid
) {
2763 unsigned long start_pfn
, end_pfn
;
2766 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2767 ret
= iommu_domain_identity_map(si_domain
,
2768 PFN_PHYS(start_pfn
), PFN_PHYS(end_pfn
));
2777 static int identity_mapping(struct device
*dev
)
2779 struct device_domain_info
*info
;
2781 if (likely(!iommu_identity_mapping
))
2784 info
= dev
->archdata
.iommu
;
2785 if (info
&& info
!= DUMMY_DEVICE_DOMAIN_INFO
)
2786 return (info
->domain
== si_domain
);
2791 static int domain_add_dev_info(struct dmar_domain
*domain
, struct device
*dev
)
2793 struct dmar_domain
*ndomain
;
2794 struct intel_iommu
*iommu
;
2797 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2801 ndomain
= dmar_insert_one_dev_info(iommu
, bus
, devfn
, dev
, domain
);
2802 if (ndomain
!= domain
)
2808 static bool device_has_rmrr(struct device
*dev
)
2810 struct dmar_rmrr_unit
*rmrr
;
2815 for_each_rmrr_units(rmrr
) {
2817 * Return TRUE if this RMRR contains the device that
2820 for_each_active_dev_scope(rmrr
->devices
,
2821 rmrr
->devices_cnt
, i
, tmp
)
2832 * There are a couple cases where we need to restrict the functionality of
2833 * devices associated with RMRRs. The first is when evaluating a device for
2834 * identity mapping because problems exist when devices are moved in and out
2835 * of domains and their respective RMRR information is lost. This means that
2836 * a device with associated RMRRs will never be in a "passthrough" domain.
2837 * The second is use of the device through the IOMMU API. This interface
2838 * expects to have full control of the IOVA space for the device. We cannot
2839 * satisfy both the requirement that RMRR access is maintained and have an
2840 * unencumbered IOVA space. We also have no ability to quiesce the device's
2841 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2842 * We therefore prevent devices associated with an RMRR from participating in
2843 * the IOMMU API, which eliminates them from device assignment.
2845 * In both cases we assume that PCI USB devices with RMRRs have them largely
2846 * for historical reasons and that the RMRR space is not actively used post
2847 * boot. This exclusion may change if vendors begin to abuse it.
2849 * The same exception is made for graphics devices, with the requirement that
2850 * any use of the RMRR regions will be torn down before assigning the device
2853 static bool device_is_rmrr_locked(struct device
*dev
)
2855 if (!device_has_rmrr(dev
))
2858 if (dev_is_pci(dev
)) {
2859 struct pci_dev
*pdev
= to_pci_dev(dev
);
2861 if (IS_USB_DEVICE(pdev
) || IS_GFX_DEVICE(pdev
))
2868 static int iommu_should_identity_map(struct device
*dev
, int startup
)
2871 if (dev_is_pci(dev
)) {
2872 struct pci_dev
*pdev
= to_pci_dev(dev
);
2874 if (device_is_rmrr_locked(dev
))
2877 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2880 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2883 if (!(iommu_identity_mapping
& IDENTMAP_ALL
))
2887 * We want to start off with all devices in the 1:1 domain, and
2888 * take them out later if we find they can't access all of memory.
2890 * However, we can't do this for PCI devices behind bridges,
2891 * because all PCI devices behind the same bridge will end up
2892 * with the same source-id on their transactions.
2894 * Practically speaking, we can't change things around for these
2895 * devices at run-time, because we can't be sure there'll be no
2896 * DMA transactions in flight for any of their siblings.
2898 * So PCI devices (unless they're on the root bus) as well as
2899 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2900 * the 1:1 domain, just in _case_ one of their siblings turns out
2901 * not to be able to map all of memory.
2903 if (!pci_is_pcie(pdev
)) {
2904 if (!pci_is_root_bus(pdev
->bus
))
2906 if (pdev
->class >> 8 == PCI_CLASS_BRIDGE_PCI
)
2908 } else if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_PCI_BRIDGE
)
2911 if (device_has_rmrr(dev
))
2916 * At boot time, we don't yet know if devices will be 64-bit capable.
2917 * Assume that they will — if they turn out not to be, then we can
2918 * take them out of the 1:1 domain later.
2922 * If the device's dma_mask is less than the system's memory
2923 * size then this is not a candidate for identity mapping.
2925 u64 dma_mask
= *dev
->dma_mask
;
2927 if (dev
->coherent_dma_mask
&&
2928 dev
->coherent_dma_mask
< dma_mask
)
2929 dma_mask
= dev
->coherent_dma_mask
;
2931 return dma_mask
>= dma_get_required_mask(dev
);
2937 static int __init
dev_prepare_static_identity_mapping(struct device
*dev
, int hw
)
2941 if (!iommu_should_identity_map(dev
, 1))
2944 ret
= domain_add_dev_info(si_domain
, dev
);
2946 pr_info("%s identity mapping for device %s\n",
2947 hw
? "Hardware" : "Software", dev_name(dev
));
2948 else if (ret
== -ENODEV
)
2949 /* device not associated with an iommu */
2956 static int __init
iommu_prepare_static_identity_mapping(int hw
)
2958 struct pci_dev
*pdev
= NULL
;
2959 struct dmar_drhd_unit
*drhd
;
2960 struct intel_iommu
*iommu
;
2965 for_each_pci_dev(pdev
) {
2966 ret
= dev_prepare_static_identity_mapping(&pdev
->dev
, hw
);
2971 for_each_active_iommu(iommu
, drhd
)
2972 for_each_active_dev_scope(drhd
->devices
, drhd
->devices_cnt
, i
, dev
) {
2973 struct acpi_device_physical_node
*pn
;
2974 struct acpi_device
*adev
;
2976 if (dev
->bus
!= &acpi_bus_type
)
2979 adev
= to_acpi_device(dev
);
2980 mutex_lock(&adev
->physical_node_lock
);
2981 list_for_each_entry(pn
, &adev
->physical_node_list
, node
) {
2982 ret
= dev_prepare_static_identity_mapping(pn
->dev
, hw
);
2986 mutex_unlock(&adev
->physical_node_lock
);
2994 static void intel_iommu_init_qi(struct intel_iommu
*iommu
)
2997 * Start from the sane iommu hardware state.
2998 * If the queued invalidation is already initialized by us
2999 * (for example, while enabling interrupt-remapping) then
3000 * we got the things already rolling from a sane state.
3004 * Clear any previous faults.
3006 dmar_fault(-1, iommu
);
3008 * Disable queued invalidation if supported and already enabled
3009 * before OS handover.
3011 dmar_disable_qi(iommu
);
3014 if (dmar_enable_qi(iommu
)) {
3016 * Queued Invalidate not enabled, use Register Based Invalidate
3018 iommu
->flush
.flush_context
= __iommu_flush_context
;
3019 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
3020 pr_info("%s: Using Register based invalidation\n",
3023 iommu
->flush
.flush_context
= qi_flush_context
;
3024 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
3025 pr_info("%s: Using Queued invalidation\n", iommu
->name
);
3029 static int copy_context_table(struct intel_iommu
*iommu
,
3030 struct root_entry
*old_re
,
3031 struct context_entry
**tbl
,
3034 int tbl_idx
, pos
= 0, idx
, devfn
, ret
= 0, did
;
3035 struct context_entry
*new_ce
= NULL
, ce
;
3036 struct context_entry
*old_ce
= NULL
;
3037 struct root_entry re
;
3038 phys_addr_t old_ce_phys
;
3040 tbl_idx
= ext
? bus
* 2 : bus
;
3041 memcpy(&re
, old_re
, sizeof(re
));
3043 for (devfn
= 0; devfn
< 256; devfn
++) {
3044 /* First calculate the correct index */
3045 idx
= (ext
? devfn
* 2 : devfn
) % 256;
3048 /* First save what we may have and clean up */
3050 tbl
[tbl_idx
] = new_ce
;
3051 __iommu_flush_cache(iommu
, new_ce
,
3061 old_ce_phys
= root_entry_lctp(&re
);
3063 old_ce_phys
= root_entry_uctp(&re
);
3066 if (ext
&& devfn
== 0) {
3067 /* No LCTP, try UCTP */
3076 old_ce
= memremap(old_ce_phys
, PAGE_SIZE
,
3081 new_ce
= alloc_pgtable_page(iommu
->node
);
3088 /* Now copy the context entry */
3089 memcpy(&ce
, old_ce
+ idx
, sizeof(ce
));
3091 if (!__context_present(&ce
))
3094 did
= context_domain_id(&ce
);
3095 if (did
>= 0 && did
< cap_ndoms(iommu
->cap
))
3096 set_bit(did
, iommu
->domain_ids
);
3099 * We need a marker for copied context entries. This
3100 * marker needs to work for the old format as well as
3101 * for extended context entries.
3103 * Bit 67 of the context entry is used. In the old
3104 * format this bit is available to software, in the
3105 * extended format it is the PGE bit, but PGE is ignored
3106 * by HW if PASIDs are disabled (and thus still
3109 * So disable PASIDs first and then mark the entry
3110 * copied. This means that we don't copy PASID
3111 * translations from the old kernel, but this is fine as
3112 * faults there are not fatal.
3114 context_clear_pasid_enable(&ce
);
3115 context_set_copied(&ce
);
3120 tbl
[tbl_idx
+ pos
] = new_ce
;
3122 __iommu_flush_cache(iommu
, new_ce
, VTD_PAGE_SIZE
);
3131 static int copy_translation_tables(struct intel_iommu
*iommu
)
3133 struct context_entry
**ctxt_tbls
;
3134 struct root_entry
*old_rt
;
3135 phys_addr_t old_rt_phys
;
3136 int ctxt_table_entries
;
3137 unsigned long flags
;
3142 rtaddr_reg
= dmar_readq(iommu
->reg
+ DMAR_RTADDR_REG
);
3143 ext
= !!(rtaddr_reg
& DMA_RTADDR_RTT
);
3144 new_ext
= !!ecap_ecs(iommu
->ecap
);
3147 * The RTT bit can only be changed when translation is disabled,
3148 * but disabling translation means to open a window for data
3149 * corruption. So bail out and don't copy anything if we would
3150 * have to change the bit.
3155 old_rt_phys
= rtaddr_reg
& VTD_PAGE_MASK
;
3159 old_rt
= memremap(old_rt_phys
, PAGE_SIZE
, MEMREMAP_WB
);
3163 /* This is too big for the stack - allocate it from slab */
3164 ctxt_table_entries
= ext
? 512 : 256;
3166 ctxt_tbls
= kzalloc(ctxt_table_entries
* sizeof(void *), GFP_KERNEL
);
3170 for (bus
= 0; bus
< 256; bus
++) {
3171 ret
= copy_context_table(iommu
, &old_rt
[bus
],
3172 ctxt_tbls
, bus
, ext
);
3174 pr_err("%s: Failed to copy context table for bus %d\n",
3180 spin_lock_irqsave(&iommu
->lock
, flags
);
3182 /* Context tables are copied, now write them to the root_entry table */
3183 for (bus
= 0; bus
< 256; bus
++) {
3184 int idx
= ext
? bus
* 2 : bus
;
3187 if (ctxt_tbls
[idx
]) {
3188 val
= virt_to_phys(ctxt_tbls
[idx
]) | 1;
3189 iommu
->root_entry
[bus
].lo
= val
;
3192 if (!ext
|| !ctxt_tbls
[idx
+ 1])
3195 val
= virt_to_phys(ctxt_tbls
[idx
+ 1]) | 1;
3196 iommu
->root_entry
[bus
].hi
= val
;
3199 spin_unlock_irqrestore(&iommu
->lock
, flags
);
3203 __iommu_flush_cache(iommu
, iommu
->root_entry
, PAGE_SIZE
);
3213 static int __init
init_dmars(void)
3215 struct dmar_drhd_unit
*drhd
;
3216 struct dmar_rmrr_unit
*rmrr
;
3217 bool copied_tables
= false;
3219 struct intel_iommu
*iommu
;
3225 * initialize and program root entry to not present
3228 for_each_drhd_unit(drhd
) {
3230 * lock not needed as this is only incremented in the single
3231 * threaded kernel __init code path all other access are read
3234 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
) {
3238 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED
);
3241 /* Preallocate enough resources for IOMMU hot-addition */
3242 if (g_num_of_iommus
< DMAR_UNITS_SUPPORTED
)
3243 g_num_of_iommus
= DMAR_UNITS_SUPPORTED
;
3245 g_iommus
= kcalloc(g_num_of_iommus
, sizeof(struct intel_iommu
*),
3248 pr_err("Allocating global iommu array failed\n");
3253 for_each_possible_cpu(cpu
) {
3254 struct deferred_flush_data
*dfd
= per_cpu_ptr(&deferred_flush
,
3257 dfd
->tables
= kzalloc(g_num_of_iommus
*
3258 sizeof(struct deferred_flush_table
),
3265 spin_lock_init(&dfd
->lock
);
3266 setup_timer(&dfd
->timer
, flush_unmaps_timeout
, cpu
);
3269 for_each_active_iommu(iommu
, drhd
) {
3270 g_iommus
[iommu
->seq_id
] = iommu
;
3272 intel_iommu_init_qi(iommu
);
3274 ret
= iommu_init_domains(iommu
);
3278 init_translation_status(iommu
);
3280 if (translation_pre_enabled(iommu
) && !is_kdump_kernel()) {
3281 iommu_disable_translation(iommu
);
3282 clear_translation_pre_enabled(iommu
);
3283 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3289 * we could share the same root & context tables
3290 * among all IOMMU's. Need to Split it later.
3292 ret
= iommu_alloc_root_entry(iommu
);
3296 if (translation_pre_enabled(iommu
)) {
3297 pr_info("Translation already enabled - trying to copy translation structures\n");
3299 ret
= copy_translation_tables(iommu
);
3302 * We found the IOMMU with translation
3303 * enabled - but failed to copy over the
3304 * old root-entry table. Try to proceed
3305 * by disabling translation now and
3306 * allocating a clean root-entry table.
3307 * This might cause DMAR faults, but
3308 * probably the dump will still succeed.
3310 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3312 iommu_disable_translation(iommu
);
3313 clear_translation_pre_enabled(iommu
);
3315 pr_info("Copied translation tables from previous kernel for %s\n",
3317 copied_tables
= true;
3321 if (!ecap_pass_through(iommu
->ecap
))
3322 hw_pass_through
= 0;
3323 #ifdef CONFIG_INTEL_IOMMU_SVM
3324 if (pasid_enabled(iommu
))
3325 intel_svm_alloc_pasid_tables(iommu
);
3330 * Now that qi is enabled on all iommus, set the root entry and flush
3331 * caches. This is required on some Intel X58 chipsets, otherwise the
3332 * flush_context function will loop forever and the boot hangs.
3334 for_each_active_iommu(iommu
, drhd
) {
3335 iommu_flush_write_buffer(iommu
);
3336 iommu_set_root_entry(iommu
);
3337 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
3338 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
3341 if (iommu_pass_through
)
3342 iommu_identity_mapping
|= IDENTMAP_ALL
;
3344 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3345 iommu_identity_mapping
|= IDENTMAP_GFX
;
3348 check_tylersburg_isoch();
3350 if (iommu_identity_mapping
) {
3351 ret
= si_domain_init(hw_pass_through
);
3358 * If we copied translations from a previous kernel in the kdump
3359 * case, we can not assign the devices to domains now, as that
3360 * would eliminate the old mappings. So skip this part and defer
3361 * the assignment to device driver initialization time.
3367 * If pass through is not set or not enabled, setup context entries for
3368 * identity mappings for rmrr, gfx, and isa and may fall back to static
3369 * identity mapping if iommu_identity_mapping is set.
3371 if (iommu_identity_mapping
) {
3372 ret
= iommu_prepare_static_identity_mapping(hw_pass_through
);
3374 pr_crit("Failed to setup IOMMU pass-through\n");
3380 * for each dev attached to rmrr
3382 * locate drhd for dev, alloc domain for dev
3383 * allocate free domain
3384 * allocate page table entries for rmrr
3385 * if context not allocated for bus
3386 * allocate and init context
3387 * set present in root table for this bus
3388 * init context with domain, translation etc
3392 pr_info("Setting RMRR:\n");
3393 for_each_rmrr_units(rmrr
) {
3394 /* some BIOS lists non-exist devices in DMAR table. */
3395 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
3397 ret
= iommu_prepare_rmrr_dev(rmrr
, dev
);
3399 pr_err("Mapping reserved region failed\n");
3403 iommu_prepare_isa();
3410 * global invalidate context cache
3411 * global invalidate iotlb
3412 * enable translation
3414 for_each_iommu(iommu
, drhd
) {
3415 if (drhd
->ignored
) {
3417 * we always have to disable PMRs or DMA may fail on
3421 iommu_disable_protect_mem_regions(iommu
);
3425 iommu_flush_write_buffer(iommu
);
3427 #ifdef CONFIG_INTEL_IOMMU_SVM
3428 if (pasid_enabled(iommu
) && ecap_prs(iommu
->ecap
)) {
3429 ret
= intel_svm_enable_prq(iommu
);
3434 ret
= dmar_set_interrupt(iommu
);
3438 if (!translation_pre_enabled(iommu
))
3439 iommu_enable_translation(iommu
);
3441 iommu_disable_protect_mem_regions(iommu
);
3447 for_each_active_iommu(iommu
, drhd
) {
3448 disable_dmar_iommu(iommu
);
3449 free_dmar_iommu(iommu
);
3452 for_each_possible_cpu(cpu
)
3453 kfree(per_cpu_ptr(&deferred_flush
, cpu
)->tables
);
3459 /* This takes a number of _MM_ pages, not VTD pages */
3460 static unsigned long intel_alloc_iova(struct device
*dev
,
3461 struct dmar_domain
*domain
,
3462 unsigned long nrpages
, uint64_t dma_mask
)
3464 unsigned long iova_pfn
= 0;
3466 /* Restrict dma_mask to the width that the iommu can handle */
3467 dma_mask
= min_t(uint64_t, DOMAIN_MAX_ADDR(domain
->gaw
), dma_mask
);
3468 /* Ensure we reserve the whole size-aligned region */
3469 nrpages
= __roundup_pow_of_two(nrpages
);
3471 if (!dmar_forcedac
&& dma_mask
> DMA_BIT_MASK(32)) {
3473 * First try to allocate an io virtual address in
3474 * DMA_BIT_MASK(32) and if that fails then try allocating
3477 iova_pfn
= alloc_iova_fast(&domain
->iovad
, nrpages
,
3478 IOVA_PFN(DMA_BIT_MASK(32)));
3482 iova_pfn
= alloc_iova_fast(&domain
->iovad
, nrpages
, IOVA_PFN(dma_mask
));
3483 if (unlikely(!iova_pfn
)) {
3484 pr_err("Allocating %ld-page iova for %s failed",
3485 nrpages
, dev_name(dev
));
3492 static struct dmar_domain
*__get_valid_domain_for_dev(struct device
*dev
)
3494 struct dmar_domain
*domain
, *tmp
;
3495 struct dmar_rmrr_unit
*rmrr
;
3496 struct device
*i_dev
;
3499 domain
= find_domain(dev
);
3503 domain
= find_or_alloc_domain(dev
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
3507 /* We have a new domain - setup possible RMRRs for the device */
3509 for_each_rmrr_units(rmrr
) {
3510 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
3515 ret
= domain_prepare_identity_map(dev
, domain
,
3519 dev_err(dev
, "Mapping reserved region failed\n");
3524 tmp
= set_domain_for_dev(dev
, domain
);
3525 if (!tmp
|| domain
!= tmp
) {
3526 domain_exit(domain
);
3533 pr_err("Allocating domain for %s failed\n", dev_name(dev
));
3539 static inline struct dmar_domain
*get_valid_domain_for_dev(struct device
*dev
)
3541 struct device_domain_info
*info
;
3543 /* No lock here, assumes no domain exit in normal case */
3544 info
= dev
->archdata
.iommu
;
3546 return info
->domain
;
3548 return __get_valid_domain_for_dev(dev
);
3551 /* Check if the dev needs to go through non-identity map and unmap process.*/
3552 static int iommu_no_mapping(struct device
*dev
)
3556 if (iommu_dummy(dev
))
3559 if (!iommu_identity_mapping
)
3562 found
= identity_mapping(dev
);
3564 if (iommu_should_identity_map(dev
, 0))
3568 * 32 bit DMA is removed from si_domain and fall back
3569 * to non-identity mapping.
3571 dmar_remove_one_dev_info(si_domain
, dev
);
3572 pr_info("32bit %s uses non-identity mapping\n",
3578 * In case of a detached 64 bit DMA device from vm, the device
3579 * is put into si_domain for identity mapping.
3581 if (iommu_should_identity_map(dev
, 0)) {
3583 ret
= domain_add_dev_info(si_domain
, dev
);
3585 pr_info("64bit %s uses identity mapping\n",
3595 static dma_addr_t
__intel_map_single(struct device
*dev
, phys_addr_t paddr
,
3596 size_t size
, int dir
, u64 dma_mask
)
3598 struct dmar_domain
*domain
;
3599 phys_addr_t start_paddr
;
3600 unsigned long iova_pfn
;
3603 struct intel_iommu
*iommu
;
3604 unsigned long paddr_pfn
= paddr
>> PAGE_SHIFT
;
3606 BUG_ON(dir
== DMA_NONE
);
3608 if (iommu_no_mapping(dev
))
3611 domain
= get_valid_domain_for_dev(dev
);
3615 iommu
= domain_get_iommu(domain
);
3616 size
= aligned_nrpages(paddr
, size
);
3618 iova_pfn
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
), dma_mask
);
3623 * Check if DMAR supports zero-length reads on write only
3626 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3627 !cap_zlr(iommu
->cap
))
3628 prot
|= DMA_PTE_READ
;
3629 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3630 prot
|= DMA_PTE_WRITE
;
3632 * paddr - (paddr + size) might be partial page, we should map the whole
3633 * page. Note: if two part of one page are separately mapped, we
3634 * might have two guest_addr mapping to the same host paddr, but this
3635 * is not a big problem
3637 ret
= domain_pfn_mapping(domain
, mm_to_dma_pfn(iova_pfn
),
3638 mm_to_dma_pfn(paddr_pfn
), size
, prot
);
3642 /* it's a non-present to present mapping. Only flush if caching mode */
3643 if (cap_caching_mode(iommu
->cap
))
3644 iommu_flush_iotlb_psi(iommu
, domain
,
3645 mm_to_dma_pfn(iova_pfn
),
3648 iommu_flush_write_buffer(iommu
);
3650 start_paddr
= (phys_addr_t
)iova_pfn
<< PAGE_SHIFT
;
3651 start_paddr
+= paddr
& ~PAGE_MASK
;
3656 free_iova_fast(&domain
->iovad
, iova_pfn
, dma_to_mm_pfn(size
));
3657 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3658 dev_name(dev
), size
, (unsigned long long)paddr
, dir
);
3662 static dma_addr_t
intel_map_page(struct device
*dev
, struct page
*page
,
3663 unsigned long offset
, size_t size
,
3664 enum dma_data_direction dir
,
3665 unsigned long attrs
)
3667 return __intel_map_single(dev
, page_to_phys(page
) + offset
, size
,
3668 dir
, *dev
->dma_mask
);
3671 static void flush_unmaps(struct deferred_flush_data
*flush_data
)
3675 flush_data
->timer_on
= 0;
3677 /* just flush them all */
3678 for (i
= 0; i
< g_num_of_iommus
; i
++) {
3679 struct intel_iommu
*iommu
= g_iommus
[i
];
3680 struct deferred_flush_table
*flush_table
=
3681 &flush_data
->tables
[i
];
3685 if (!flush_table
->next
)
3688 /* In caching mode, global flushes turn emulation expensive */
3689 if (!cap_caching_mode(iommu
->cap
))
3690 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
3691 DMA_TLB_GLOBAL_FLUSH
);
3692 for (j
= 0; j
< flush_table
->next
; j
++) {
3694 struct deferred_flush_entry
*entry
=
3695 &flush_table
->entries
[j
];
3696 unsigned long iova_pfn
= entry
->iova_pfn
;
3697 unsigned long nrpages
= entry
->nrpages
;
3698 struct dmar_domain
*domain
= entry
->domain
;
3699 struct page
*freelist
= entry
->freelist
;
3701 /* On real hardware multiple invalidations are expensive */
3702 if (cap_caching_mode(iommu
->cap
))
3703 iommu_flush_iotlb_psi(iommu
, domain
,
3704 mm_to_dma_pfn(iova_pfn
),
3705 nrpages
, !freelist
, 0);
3707 mask
= ilog2(nrpages
);
3708 iommu_flush_dev_iotlb(domain
,
3709 (uint64_t)iova_pfn
<< PAGE_SHIFT
, mask
);
3711 free_iova_fast(&domain
->iovad
, iova_pfn
, nrpages
);
3713 dma_free_pagelist(freelist
);
3715 flush_table
->next
= 0;
3718 flush_data
->size
= 0;
3721 static void flush_unmaps_timeout(unsigned long cpuid
)
3723 struct deferred_flush_data
*flush_data
= per_cpu_ptr(&deferred_flush
, cpuid
);
3724 unsigned long flags
;
3726 spin_lock_irqsave(&flush_data
->lock
, flags
);
3727 flush_unmaps(flush_data
);
3728 spin_unlock_irqrestore(&flush_data
->lock
, flags
);
3731 static void add_unmap(struct dmar_domain
*dom
, unsigned long iova_pfn
,
3732 unsigned long nrpages
, struct page
*freelist
)
3734 unsigned long flags
;
3735 int entry_id
, iommu_id
;
3736 struct intel_iommu
*iommu
;
3737 struct deferred_flush_entry
*entry
;
3738 struct deferred_flush_data
*flush_data
;
3742 flush_data
= per_cpu_ptr(&deferred_flush
, cpuid
);
3744 /* Flush all CPUs' entries to avoid deferring too much. If
3745 * this becomes a bottleneck, can just flush us, and rely on
3746 * flush timer for the rest.
3748 if (flush_data
->size
== HIGH_WATER_MARK
) {
3751 for_each_online_cpu(cpu
)
3752 flush_unmaps_timeout(cpu
);
3755 spin_lock_irqsave(&flush_data
->lock
, flags
);
3757 iommu
= domain_get_iommu(dom
);
3758 iommu_id
= iommu
->seq_id
;
3760 entry_id
= flush_data
->tables
[iommu_id
].next
;
3761 ++(flush_data
->tables
[iommu_id
].next
);
3763 entry
= &flush_data
->tables
[iommu_id
].entries
[entry_id
];
3764 entry
->domain
= dom
;
3765 entry
->iova_pfn
= iova_pfn
;
3766 entry
->nrpages
= nrpages
;
3767 entry
->freelist
= freelist
;
3769 if (!flush_data
->timer_on
) {
3770 mod_timer(&flush_data
->timer
, jiffies
+ msecs_to_jiffies(10));
3771 flush_data
->timer_on
= 1;
3774 spin_unlock_irqrestore(&flush_data
->lock
, flags
);
3779 static void intel_unmap(struct device
*dev
, dma_addr_t dev_addr
, size_t size
)
3781 struct dmar_domain
*domain
;
3782 unsigned long start_pfn
, last_pfn
;
3783 unsigned long nrpages
;
3784 unsigned long iova_pfn
;
3785 struct intel_iommu
*iommu
;
3786 struct page
*freelist
;
3788 if (iommu_no_mapping(dev
))
3791 domain
= find_domain(dev
);
3794 iommu
= domain_get_iommu(domain
);
3796 iova_pfn
= IOVA_PFN(dev_addr
);
3798 nrpages
= aligned_nrpages(dev_addr
, size
);
3799 start_pfn
= mm_to_dma_pfn(iova_pfn
);
3800 last_pfn
= start_pfn
+ nrpages
- 1;
3802 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3803 dev_name(dev
), start_pfn
, last_pfn
);
3805 freelist
= domain_unmap(domain
, start_pfn
, last_pfn
);
3807 if (intel_iommu_strict
) {
3808 iommu_flush_iotlb_psi(iommu
, domain
, start_pfn
,
3809 nrpages
, !freelist
, 0);
3811 free_iova_fast(&domain
->iovad
, iova_pfn
, dma_to_mm_pfn(nrpages
));
3812 dma_free_pagelist(freelist
);
3814 add_unmap(domain
, iova_pfn
, nrpages
, freelist
);
3816 * queue up the release of the unmap to save the 1/6th of the
3817 * cpu used up by the iotlb flush operation...
3822 static void intel_unmap_page(struct device
*dev
, dma_addr_t dev_addr
,
3823 size_t size
, enum dma_data_direction dir
,
3824 unsigned long attrs
)
3826 intel_unmap(dev
, dev_addr
, size
);
3829 static void *intel_alloc_coherent(struct device
*dev
, size_t size
,
3830 dma_addr_t
*dma_handle
, gfp_t flags
,
3831 unsigned long attrs
)
3833 struct page
*page
= NULL
;
3836 size
= PAGE_ALIGN(size
);
3837 order
= get_order(size
);
3839 if (!iommu_no_mapping(dev
))
3840 flags
&= ~(GFP_DMA
| GFP_DMA32
);
3841 else if (dev
->coherent_dma_mask
< dma_get_required_mask(dev
)) {
3842 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
3848 if (gfpflags_allow_blocking(flags
)) {
3849 unsigned int count
= size
>> PAGE_SHIFT
;
3851 page
= dma_alloc_from_contiguous(dev
, count
, order
);
3852 if (page
&& iommu_no_mapping(dev
) &&
3853 page_to_phys(page
) + size
> dev
->coherent_dma_mask
) {
3854 dma_release_from_contiguous(dev
, page
, count
);
3860 page
= alloc_pages(flags
, order
);
3863 memset(page_address(page
), 0, size
);
3865 *dma_handle
= __intel_map_single(dev
, page_to_phys(page
), size
,
3867 dev
->coherent_dma_mask
);
3869 return page_address(page
);
3870 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3871 __free_pages(page
, order
);
3876 static void intel_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
3877 dma_addr_t dma_handle
, unsigned long attrs
)
3880 struct page
*page
= virt_to_page(vaddr
);
3882 size
= PAGE_ALIGN(size
);
3883 order
= get_order(size
);
3885 intel_unmap(dev
, dma_handle
, size
);
3886 if (!dma_release_from_contiguous(dev
, page
, size
>> PAGE_SHIFT
))
3887 __free_pages(page
, order
);
3890 static void intel_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
3891 int nelems
, enum dma_data_direction dir
,
3892 unsigned long attrs
)
3894 dma_addr_t startaddr
= sg_dma_address(sglist
) & PAGE_MASK
;
3895 unsigned long nrpages
= 0;
3896 struct scatterlist
*sg
;
3899 for_each_sg(sglist
, sg
, nelems
, i
) {
3900 nrpages
+= aligned_nrpages(sg_dma_address(sg
), sg_dma_len(sg
));
3903 intel_unmap(dev
, startaddr
, nrpages
<< VTD_PAGE_SHIFT
);
3906 static int intel_nontranslate_map_sg(struct device
*hddev
,
3907 struct scatterlist
*sglist
, int nelems
, int dir
)
3910 struct scatterlist
*sg
;
3912 for_each_sg(sglist
, sg
, nelems
, i
) {
3913 BUG_ON(!sg_page(sg
));
3914 sg
->dma_address
= sg_phys(sg
);
3915 sg
->dma_length
= sg
->length
;
3920 static int intel_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nelems
,
3921 enum dma_data_direction dir
, unsigned long attrs
)
3924 struct dmar_domain
*domain
;
3927 unsigned long iova_pfn
;
3929 struct scatterlist
*sg
;
3930 unsigned long start_vpfn
;
3931 struct intel_iommu
*iommu
;
3933 BUG_ON(dir
== DMA_NONE
);
3934 if (iommu_no_mapping(dev
))
3935 return intel_nontranslate_map_sg(dev
, sglist
, nelems
, dir
);
3937 domain
= get_valid_domain_for_dev(dev
);
3941 iommu
= domain_get_iommu(domain
);
3943 for_each_sg(sglist
, sg
, nelems
, i
)
3944 size
+= aligned_nrpages(sg
->offset
, sg
->length
);
3946 iova_pfn
= intel_alloc_iova(dev
, domain
, dma_to_mm_pfn(size
),
3949 sglist
->dma_length
= 0;
3954 * Check if DMAR supports zero-length reads on write only
3957 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
|| \
3958 !cap_zlr(iommu
->cap
))
3959 prot
|= DMA_PTE_READ
;
3960 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
3961 prot
|= DMA_PTE_WRITE
;
3963 start_vpfn
= mm_to_dma_pfn(iova_pfn
);
3965 ret
= domain_sg_mapping(domain
, start_vpfn
, sglist
, size
, prot
);
3966 if (unlikely(ret
)) {
3967 dma_pte_free_pagetable(domain
, start_vpfn
,
3968 start_vpfn
+ size
- 1);
3969 free_iova_fast(&domain
->iovad
, iova_pfn
, dma_to_mm_pfn(size
));
3973 /* it's a non-present to present mapping. Only flush if caching mode */
3974 if (cap_caching_mode(iommu
->cap
))
3975 iommu_flush_iotlb_psi(iommu
, domain
, start_vpfn
, size
, 0, 1);
3977 iommu_flush_write_buffer(iommu
);
3982 static int intel_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
3987 struct dma_map_ops intel_dma_ops
= {
3988 .alloc
= intel_alloc_coherent
,
3989 .free
= intel_free_coherent
,
3990 .map_sg
= intel_map_sg
,
3991 .unmap_sg
= intel_unmap_sg
,
3992 .map_page
= intel_map_page
,
3993 .unmap_page
= intel_unmap_page
,
3994 .mapping_error
= intel_mapping_error
,
3997 static inline int iommu_domain_cache_init(void)
4001 iommu_domain_cache
= kmem_cache_create("iommu_domain",
4002 sizeof(struct dmar_domain
),
4007 if (!iommu_domain_cache
) {
4008 pr_err("Couldn't create iommu_domain cache\n");
4015 static inline int iommu_devinfo_cache_init(void)
4019 iommu_devinfo_cache
= kmem_cache_create("iommu_devinfo",
4020 sizeof(struct device_domain_info
),
4024 if (!iommu_devinfo_cache
) {
4025 pr_err("Couldn't create devinfo cache\n");
4032 static int __init
iommu_init_mempool(void)
4035 ret
= iova_cache_get();
4039 ret
= iommu_domain_cache_init();
4043 ret
= iommu_devinfo_cache_init();
4047 kmem_cache_destroy(iommu_domain_cache
);
4054 static void __init
iommu_exit_mempool(void)
4056 kmem_cache_destroy(iommu_devinfo_cache
);
4057 kmem_cache_destroy(iommu_domain_cache
);
4061 static void quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
4063 struct dmar_drhd_unit
*drhd
;
4067 /* We know that this device on this chipset has its own IOMMU.
4068 * If we find it under a different IOMMU, then the BIOS is lying
4069 * to us. Hope that the IOMMU for this device is actually
4070 * disabled, and it needs no translation...
4072 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
4074 /* "can't" happen */
4075 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
4078 vtbar
&= 0xffff0000;
4080 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4081 drhd
= dmar_find_matched_drhd_unit(pdev
);
4082 if (WARN_TAINT_ONCE(!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000,
4083 TAINT_FIRMWARE_WORKAROUND
,
4084 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4085 pdev
->dev
.archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
4087 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB
, quirk_ioat_snb_local_iommu
);
4089 static void __init
init_no_remapping_devices(void)
4091 struct dmar_drhd_unit
*drhd
;
4095 for_each_drhd_unit(drhd
) {
4096 if (!drhd
->include_all
) {
4097 for_each_active_dev_scope(drhd
->devices
,
4098 drhd
->devices_cnt
, i
, dev
)
4100 /* ignore DMAR unit if no devices exist */
4101 if (i
== drhd
->devices_cnt
)
4106 for_each_active_drhd_unit(drhd
) {
4107 if (drhd
->include_all
)
4110 for_each_active_dev_scope(drhd
->devices
,
4111 drhd
->devices_cnt
, i
, dev
)
4112 if (!dev_is_pci(dev
) || !IS_GFX_DEVICE(to_pci_dev(dev
)))
4114 if (i
< drhd
->devices_cnt
)
4117 /* This IOMMU has *only* gfx devices. Either bypass it or
4118 set the gfx_mapped flag, as appropriate */
4120 intel_iommu_gfx_mapped
= 1;
4123 for_each_active_dev_scope(drhd
->devices
,
4124 drhd
->devices_cnt
, i
, dev
)
4125 dev
->archdata
.iommu
= DUMMY_DEVICE_DOMAIN_INFO
;
4130 #ifdef CONFIG_SUSPEND
4131 static int init_iommu_hw(void)
4133 struct dmar_drhd_unit
*drhd
;
4134 struct intel_iommu
*iommu
= NULL
;
4136 for_each_active_iommu(iommu
, drhd
)
4138 dmar_reenable_qi(iommu
);
4140 for_each_iommu(iommu
, drhd
) {
4141 if (drhd
->ignored
) {
4143 * we always have to disable PMRs or DMA may fail on
4147 iommu_disable_protect_mem_regions(iommu
);
4151 iommu_flush_write_buffer(iommu
);
4153 iommu_set_root_entry(iommu
);
4155 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
4156 DMA_CCMD_GLOBAL_INVL
);
4157 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
4158 iommu_enable_translation(iommu
);
4159 iommu_disable_protect_mem_regions(iommu
);
4165 static void iommu_flush_all(void)
4167 struct dmar_drhd_unit
*drhd
;
4168 struct intel_iommu
*iommu
;
4170 for_each_active_iommu(iommu
, drhd
) {
4171 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
4172 DMA_CCMD_GLOBAL_INVL
);
4173 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
4174 DMA_TLB_GLOBAL_FLUSH
);
4178 static int iommu_suspend(void)
4180 struct dmar_drhd_unit
*drhd
;
4181 struct intel_iommu
*iommu
= NULL
;
4184 for_each_active_iommu(iommu
, drhd
) {
4185 iommu
->iommu_state
= kzalloc(sizeof(u32
) * MAX_SR_DMAR_REGS
,
4187 if (!iommu
->iommu_state
)
4193 for_each_active_iommu(iommu
, drhd
) {
4194 iommu_disable_translation(iommu
);
4196 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
4198 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
4199 readl(iommu
->reg
+ DMAR_FECTL_REG
);
4200 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
4201 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
4202 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
4203 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
4204 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
4205 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
4207 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
4212 for_each_active_iommu(iommu
, drhd
)
4213 kfree(iommu
->iommu_state
);
4218 static void iommu_resume(void)
4220 struct dmar_drhd_unit
*drhd
;
4221 struct intel_iommu
*iommu
= NULL
;
4224 if (init_iommu_hw()) {
4226 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4228 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4232 for_each_active_iommu(iommu
, drhd
) {
4234 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
4236 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
4237 iommu
->reg
+ DMAR_FECTL_REG
);
4238 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
4239 iommu
->reg
+ DMAR_FEDATA_REG
);
4240 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
4241 iommu
->reg
+ DMAR_FEADDR_REG
);
4242 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
4243 iommu
->reg
+ DMAR_FEUADDR_REG
);
4245 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
4248 for_each_active_iommu(iommu
, drhd
)
4249 kfree(iommu
->iommu_state
);
4252 static struct syscore_ops iommu_syscore_ops
= {
4253 .resume
= iommu_resume
,
4254 .suspend
= iommu_suspend
,
4257 static void __init
init_iommu_pm_ops(void)
4259 register_syscore_ops(&iommu_syscore_ops
);
4263 static inline void init_iommu_pm_ops(void) {}
4264 #endif /* CONFIG_PM */
4267 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
, void *arg
)
4269 struct acpi_dmar_reserved_memory
*rmrr
;
4270 struct dmar_rmrr_unit
*rmrru
;
4272 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
4276 rmrru
->hdr
= header
;
4277 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
4278 rmrru
->base_address
= rmrr
->base_address
;
4279 rmrru
->end_address
= rmrr
->end_address
;
4280 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
4281 ((void *)rmrr
) + rmrr
->header
.length
,
4282 &rmrru
->devices_cnt
);
4283 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
) {
4288 list_add(&rmrru
->list
, &dmar_rmrr_units
);
4293 static struct dmar_atsr_unit
*dmar_find_atsr(struct acpi_dmar_atsr
*atsr
)
4295 struct dmar_atsr_unit
*atsru
;
4296 struct acpi_dmar_atsr
*tmp
;
4298 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
4299 tmp
= (struct acpi_dmar_atsr
*)atsru
->hdr
;
4300 if (atsr
->segment
!= tmp
->segment
)
4302 if (atsr
->header
.length
!= tmp
->header
.length
)
4304 if (memcmp(atsr
, tmp
, atsr
->header
.length
) == 0)
4311 int dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
4313 struct acpi_dmar_atsr
*atsr
;
4314 struct dmar_atsr_unit
*atsru
;
4316 if (system_state
!= SYSTEM_BOOTING
&& !intel_iommu_enabled
)
4319 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
4320 atsru
= dmar_find_atsr(atsr
);
4324 atsru
= kzalloc(sizeof(*atsru
) + hdr
->length
, GFP_KERNEL
);
4329 * If memory is allocated from slab by ACPI _DSM method, we need to
4330 * copy the memory content because the memory buffer will be freed
4333 atsru
->hdr
= (void *)(atsru
+ 1);
4334 memcpy(atsru
->hdr
, hdr
, hdr
->length
);
4335 atsru
->include_all
= atsr
->flags
& 0x1;
4336 if (!atsru
->include_all
) {
4337 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
4338 (void *)atsr
+ atsr
->header
.length
,
4339 &atsru
->devices_cnt
);
4340 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
4346 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
4351 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
4353 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
4357 int dmar_release_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
4359 struct acpi_dmar_atsr
*atsr
;
4360 struct dmar_atsr_unit
*atsru
;
4362 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
4363 atsru
= dmar_find_atsr(atsr
);
4365 list_del_rcu(&atsru
->list
);
4367 intel_iommu_free_atsr(atsru
);
4373 int dmar_check_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
4377 struct acpi_dmar_atsr
*atsr
;
4378 struct dmar_atsr_unit
*atsru
;
4380 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
4381 atsru
= dmar_find_atsr(atsr
);
4385 if (!atsru
->include_all
&& atsru
->devices
&& atsru
->devices_cnt
) {
4386 for_each_active_dev_scope(atsru
->devices
, atsru
->devices_cnt
,
4394 static int intel_iommu_add(struct dmar_drhd_unit
*dmaru
)
4397 struct intel_iommu
*iommu
= dmaru
->iommu
;
4399 if (g_iommus
[iommu
->seq_id
])
4402 if (hw_pass_through
&& !ecap_pass_through(iommu
->ecap
)) {
4403 pr_warn("%s: Doesn't support hardware pass through.\n",
4407 if (!ecap_sc_support(iommu
->ecap
) &&
4408 domain_update_iommu_snooping(iommu
)) {
4409 pr_warn("%s: Doesn't support snooping.\n",
4413 sp
= domain_update_iommu_superpage(iommu
) - 1;
4414 if (sp
>= 0 && !(cap_super_page_val(iommu
->cap
) & (1 << sp
))) {
4415 pr_warn("%s: Doesn't support large page.\n",
4421 * Disable translation if already enabled prior to OS handover.
4423 if (iommu
->gcmd
& DMA_GCMD_TE
)
4424 iommu_disable_translation(iommu
);
4426 g_iommus
[iommu
->seq_id
] = iommu
;
4427 ret
= iommu_init_domains(iommu
);
4429 ret
= iommu_alloc_root_entry(iommu
);
4433 #ifdef CONFIG_INTEL_IOMMU_SVM
4434 if (pasid_enabled(iommu
))
4435 intel_svm_alloc_pasid_tables(iommu
);
4438 if (dmaru
->ignored
) {
4440 * we always have to disable PMRs or DMA may fail on this device
4443 iommu_disable_protect_mem_regions(iommu
);
4447 intel_iommu_init_qi(iommu
);
4448 iommu_flush_write_buffer(iommu
);
4450 #ifdef CONFIG_INTEL_IOMMU_SVM
4451 if (pasid_enabled(iommu
) && ecap_prs(iommu
->ecap
)) {
4452 ret
= intel_svm_enable_prq(iommu
);
4457 ret
= dmar_set_interrupt(iommu
);
4461 iommu_set_root_entry(iommu
);
4462 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
4463 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
4464 iommu_enable_translation(iommu
);
4466 iommu_disable_protect_mem_regions(iommu
);
4470 disable_dmar_iommu(iommu
);
4472 free_dmar_iommu(iommu
);
4476 int dmar_iommu_hotplug(struct dmar_drhd_unit
*dmaru
, bool insert
)
4479 struct intel_iommu
*iommu
= dmaru
->iommu
;
4481 if (!intel_iommu_enabled
)
4487 ret
= intel_iommu_add(dmaru
);
4489 disable_dmar_iommu(iommu
);
4490 free_dmar_iommu(iommu
);
4496 static void intel_iommu_free_dmars(void)
4498 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
4499 struct dmar_atsr_unit
*atsru
, *atsr_n
;
4501 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
4502 list_del(&rmrru
->list
);
4503 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
4507 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
4508 list_del(&atsru
->list
);
4509 intel_iommu_free_atsr(atsru
);
4513 int dmar_find_matched_atsr_unit(struct pci_dev
*dev
)
4516 struct pci_bus
*bus
;
4517 struct pci_dev
*bridge
= NULL
;
4519 struct acpi_dmar_atsr
*atsr
;
4520 struct dmar_atsr_unit
*atsru
;
4522 dev
= pci_physfn(dev
);
4523 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
4525 /* If it's an integrated device, allow ATS */
4528 /* Connected via non-PCIe: no ATS */
4529 if (!pci_is_pcie(bridge
) ||
4530 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
4532 /* If we found the root port, look it up in the ATSR */
4533 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
4538 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
4539 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
4540 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
4543 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
4544 if (tmp
== &bridge
->dev
)
4547 if (atsru
->include_all
)
4557 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
4560 struct dmar_rmrr_unit
*rmrru
;
4561 struct dmar_atsr_unit
*atsru
;
4562 struct acpi_dmar_atsr
*atsr
;
4563 struct acpi_dmar_reserved_memory
*rmrr
;
4565 if (!intel_iommu_enabled
&& system_state
!= SYSTEM_BOOTING
)
4568 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
4569 rmrr
= container_of(rmrru
->hdr
,
4570 struct acpi_dmar_reserved_memory
, header
);
4571 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
4572 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
4573 ((void *)rmrr
) + rmrr
->header
.length
,
4574 rmrr
->segment
, rmrru
->devices
,
4575 rmrru
->devices_cnt
);
4578 } else if (info
->event
== BUS_NOTIFY_REMOVED_DEVICE
) {
4579 dmar_remove_dev_scope(info
, rmrr
->segment
,
4580 rmrru
->devices
, rmrru
->devices_cnt
);
4584 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
4585 if (atsru
->include_all
)
4588 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
4589 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
4590 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
4591 (void *)atsr
+ atsr
->header
.length
,
4592 atsr
->segment
, atsru
->devices
,
4593 atsru
->devices_cnt
);
4598 } else if (info
->event
== BUS_NOTIFY_REMOVED_DEVICE
) {
4599 if (dmar_remove_dev_scope(info
, atsr
->segment
,
4600 atsru
->devices
, atsru
->devices_cnt
))
4609 * Here we only respond to action of unbound device from driver.
4611 * Added device is not attached to its DMAR domain here yet. That will happen
4612 * when mapping the device to iova.
4614 static int device_notifier(struct notifier_block
*nb
,
4615 unsigned long action
, void *data
)
4617 struct device
*dev
= data
;
4618 struct dmar_domain
*domain
;
4620 if (iommu_dummy(dev
))
4623 if (action
!= BUS_NOTIFY_REMOVED_DEVICE
)
4626 domain
= find_domain(dev
);
4630 dmar_remove_one_dev_info(domain
, dev
);
4631 if (!domain_type_is_vm_or_si(domain
) && list_empty(&domain
->devices
))
4632 domain_exit(domain
);
4637 static struct notifier_block device_nb
= {
4638 .notifier_call
= device_notifier
,
4641 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
4642 unsigned long val
, void *v
)
4644 struct memory_notify
*mhp
= v
;
4645 unsigned long long start
, end
;
4646 unsigned long start_vpfn
, last_vpfn
;
4649 case MEM_GOING_ONLINE
:
4650 start
= mhp
->start_pfn
<< PAGE_SHIFT
;
4651 end
= ((mhp
->start_pfn
+ mhp
->nr_pages
) << PAGE_SHIFT
) - 1;
4652 if (iommu_domain_identity_map(si_domain
, start
, end
)) {
4653 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4660 case MEM_CANCEL_ONLINE
:
4661 start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
4662 last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+ mhp
->nr_pages
- 1);
4663 while (start_vpfn
<= last_vpfn
) {
4665 struct dmar_drhd_unit
*drhd
;
4666 struct intel_iommu
*iommu
;
4667 struct page
*freelist
;
4669 iova
= find_iova(&si_domain
->iovad
, start_vpfn
);
4671 pr_debug("Failed get IOVA for PFN %lx\n",
4676 iova
= split_and_remove_iova(&si_domain
->iovad
, iova
,
4677 start_vpfn
, last_vpfn
);
4679 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4680 start_vpfn
, last_vpfn
);
4684 freelist
= domain_unmap(si_domain
, iova
->pfn_lo
,
4688 for_each_active_iommu(iommu
, drhd
)
4689 iommu_flush_iotlb_psi(iommu
, si_domain
,
4690 iova
->pfn_lo
, iova_size(iova
),
4693 dma_free_pagelist(freelist
);
4695 start_vpfn
= iova
->pfn_hi
+ 1;
4696 free_iova_mem(iova
);
4704 static struct notifier_block intel_iommu_memory_nb
= {
4705 .notifier_call
= intel_iommu_memory_notifier
,
4709 static void free_all_cpu_cached_iovas(unsigned int cpu
)
4713 for (i
= 0; i
< g_num_of_iommus
; i
++) {
4714 struct intel_iommu
*iommu
= g_iommus
[i
];
4715 struct dmar_domain
*domain
;
4721 for (did
= 0; did
< cap_ndoms(iommu
->cap
); did
++) {
4722 domain
= get_iommu_domain(iommu
, (u16
)did
);
4726 free_cpu_cached_iovas(cpu
, &domain
->iovad
);
4731 static int intel_iommu_cpu_notifier(struct notifier_block
*nfb
,
4732 unsigned long action
, void *v
)
4734 unsigned int cpu
= (unsigned long)v
;
4738 case CPU_DEAD_FROZEN
:
4739 free_all_cpu_cached_iovas(cpu
);
4740 flush_unmaps_timeout(cpu
);
4746 static struct notifier_block intel_iommu_cpu_nb
= {
4747 .notifier_call
= intel_iommu_cpu_notifier
,
4750 static ssize_t
intel_iommu_show_version(struct device
*dev
,
4751 struct device_attribute
*attr
,
4754 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4755 u32 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
4756 return sprintf(buf
, "%d:%d\n",
4757 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
));
4759 static DEVICE_ATTR(version
, S_IRUGO
, intel_iommu_show_version
, NULL
);
4761 static ssize_t
intel_iommu_show_address(struct device
*dev
,
4762 struct device_attribute
*attr
,
4765 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4766 return sprintf(buf
, "%llx\n", iommu
->reg_phys
);
4768 static DEVICE_ATTR(address
, S_IRUGO
, intel_iommu_show_address
, NULL
);
4770 static ssize_t
intel_iommu_show_cap(struct device
*dev
,
4771 struct device_attribute
*attr
,
4774 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4775 return sprintf(buf
, "%llx\n", iommu
->cap
);
4777 static DEVICE_ATTR(cap
, S_IRUGO
, intel_iommu_show_cap
, NULL
);
4779 static ssize_t
intel_iommu_show_ecap(struct device
*dev
,
4780 struct device_attribute
*attr
,
4783 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4784 return sprintf(buf
, "%llx\n", iommu
->ecap
);
4786 static DEVICE_ATTR(ecap
, S_IRUGO
, intel_iommu_show_ecap
, NULL
);
4788 static ssize_t
intel_iommu_show_ndoms(struct device
*dev
,
4789 struct device_attribute
*attr
,
4792 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4793 return sprintf(buf
, "%ld\n", cap_ndoms(iommu
->cap
));
4795 static DEVICE_ATTR(domains_supported
, S_IRUGO
, intel_iommu_show_ndoms
, NULL
);
4797 static ssize_t
intel_iommu_show_ndoms_used(struct device
*dev
,
4798 struct device_attribute
*attr
,
4801 struct intel_iommu
*iommu
= dev_get_drvdata(dev
);
4802 return sprintf(buf
, "%d\n", bitmap_weight(iommu
->domain_ids
,
4803 cap_ndoms(iommu
->cap
)));
4805 static DEVICE_ATTR(domains_used
, S_IRUGO
, intel_iommu_show_ndoms_used
, NULL
);
4807 static struct attribute
*intel_iommu_attrs
[] = {
4808 &dev_attr_version
.attr
,
4809 &dev_attr_address
.attr
,
4811 &dev_attr_ecap
.attr
,
4812 &dev_attr_domains_supported
.attr
,
4813 &dev_attr_domains_used
.attr
,
4817 static struct attribute_group intel_iommu_group
= {
4818 .name
= "intel-iommu",
4819 .attrs
= intel_iommu_attrs
,
4822 const struct attribute_group
*intel_iommu_groups
[] = {
4827 int __init
intel_iommu_init(void)
4830 struct dmar_drhd_unit
*drhd
;
4831 struct intel_iommu
*iommu
;
4833 /* VT-d is required for a TXT/tboot launch, so enforce that */
4834 force_on
= tboot_force_iommu();
4836 if (iommu_init_mempool()) {
4838 panic("tboot: Failed to initialize iommu memory\n");
4842 down_write(&dmar_global_lock
);
4843 if (dmar_table_init()) {
4845 panic("tboot: Failed to initialize DMAR table\n");
4849 if (dmar_dev_scope_init() < 0) {
4851 panic("tboot: Failed to initialize DMAR device scope\n");
4855 if (no_iommu
|| dmar_disabled
)
4858 if (list_empty(&dmar_rmrr_units
))
4859 pr_info("No RMRR found\n");
4861 if (list_empty(&dmar_atsr_units
))
4862 pr_info("No ATSR found\n");
4864 if (dmar_init_reserved_ranges()) {
4866 panic("tboot: Failed to reserve iommu ranges\n");
4867 goto out_free_reserved_range
;
4870 init_no_remapping_devices();
4875 panic("tboot: Failed to initialize DMARs\n");
4876 pr_err("Initialization failed\n");
4877 goto out_free_reserved_range
;
4879 up_write(&dmar_global_lock
);
4880 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4882 #ifdef CONFIG_SWIOTLB
4885 dma_ops
= &intel_dma_ops
;
4887 init_iommu_pm_ops();
4889 for_each_active_iommu(iommu
, drhd
)
4890 iommu
->iommu_dev
= iommu_device_create(NULL
, iommu
,
4894 bus_set_iommu(&pci_bus_type
, &intel_iommu_ops
);
4895 bus_register_notifier(&pci_bus_type
, &device_nb
);
4896 if (si_domain
&& !hw_pass_through
)
4897 register_memory_notifier(&intel_iommu_memory_nb
);
4898 register_hotcpu_notifier(&intel_iommu_cpu_nb
);
4900 intel_iommu_enabled
= 1;
4904 out_free_reserved_range
:
4905 put_iova_domain(&reserved_iova_list
);
4907 intel_iommu_free_dmars();
4908 up_write(&dmar_global_lock
);
4909 iommu_exit_mempool();
4913 static int domain_context_clear_one_cb(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
4915 struct intel_iommu
*iommu
= opaque
;
4917 domain_context_clear_one(iommu
, PCI_BUS_NUM(alias
), alias
& 0xff);
4922 * NB - intel-iommu lacks any sort of reference counting for the users of
4923 * dependent devices. If multiple endpoints have intersecting dependent
4924 * devices, unbinding the driver from any one of them will possibly leave
4925 * the others unable to operate.
4927 static void domain_context_clear(struct intel_iommu
*iommu
, struct device
*dev
)
4929 if (!iommu
|| !dev
|| !dev_is_pci(dev
))
4932 pci_for_each_dma_alias(to_pci_dev(dev
), &domain_context_clear_one_cb
, iommu
);
4935 static void __dmar_remove_one_dev_info(struct device_domain_info
*info
)
4937 struct intel_iommu
*iommu
;
4938 unsigned long flags
;
4940 assert_spin_locked(&device_domain_lock
);
4945 iommu
= info
->iommu
;
4948 iommu_disable_dev_iotlb(info
);
4949 domain_context_clear(iommu
, info
->dev
);
4952 unlink_domain_info(info
);
4954 spin_lock_irqsave(&iommu
->lock
, flags
);
4955 domain_detach_iommu(info
->domain
, iommu
);
4956 spin_unlock_irqrestore(&iommu
->lock
, flags
);
4958 free_devinfo_mem(info
);
4961 static void dmar_remove_one_dev_info(struct dmar_domain
*domain
,
4964 struct device_domain_info
*info
;
4965 unsigned long flags
;
4967 spin_lock_irqsave(&device_domain_lock
, flags
);
4968 info
= dev
->archdata
.iommu
;
4969 __dmar_remove_one_dev_info(info
);
4970 spin_unlock_irqrestore(&device_domain_lock
, flags
);
4973 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4977 init_iova_domain(&domain
->iovad
, VTD_PAGE_SIZE
, IOVA_START_PFN
,
4979 domain_reserve_special_ranges(domain
);
4981 /* calculate AGAW */
4982 domain
->gaw
= guest_width
;
4983 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4984 domain
->agaw
= width_to_agaw(adjust_width
);
4986 domain
->iommu_coherency
= 0;
4987 domain
->iommu_snooping
= 0;
4988 domain
->iommu_superpage
= 0;
4989 domain
->max_addr
= 0;
4991 /* always allocate the top pgd */
4992 domain
->pgd
= (struct dma_pte
*)alloc_pgtable_page(domain
->nid
);
4995 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4999 static struct iommu_domain
*intel_iommu_domain_alloc(unsigned type
)
5001 struct dmar_domain
*dmar_domain
;
5002 struct iommu_domain
*domain
;
5004 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
5007 dmar_domain
= alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE
);
5009 pr_err("Can't allocate dmar_domain\n");
5012 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
5013 pr_err("Domain initialization failed\n");
5014 domain_exit(dmar_domain
);
5017 domain_update_iommu_cap(dmar_domain
);
5019 domain
= &dmar_domain
->domain
;
5020 domain
->geometry
.aperture_start
= 0;
5021 domain
->geometry
.aperture_end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
5022 domain
->geometry
.force_aperture
= true;
5027 static void intel_iommu_domain_free(struct iommu_domain
*domain
)
5029 domain_exit(to_dmar_domain(domain
));
5032 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
5035 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
5036 struct intel_iommu
*iommu
;
5040 if (device_is_rmrr_locked(dev
)) {
5041 dev_warn(dev
, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5045 /* normally dev is not mapped */
5046 if (unlikely(domain_context_mapped(dev
))) {
5047 struct dmar_domain
*old_domain
;
5049 old_domain
= find_domain(dev
);
5052 dmar_remove_one_dev_info(old_domain
, dev
);
5055 if (!domain_type_is_vm_or_si(old_domain
) &&
5056 list_empty(&old_domain
->devices
))
5057 domain_exit(old_domain
);
5061 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
5065 /* check if this iommu agaw is sufficient for max mapped address */
5066 addr_width
= agaw_to_width(iommu
->agaw
);
5067 if (addr_width
> cap_mgaw(iommu
->cap
))
5068 addr_width
= cap_mgaw(iommu
->cap
);
5070 if (dmar_domain
->max_addr
> (1LL << addr_width
)) {
5071 pr_err("%s: iommu width (%d) is not "
5072 "sufficient for the mapped address (%llx)\n",
5073 __func__
, addr_width
, dmar_domain
->max_addr
);
5076 dmar_domain
->gaw
= addr_width
;
5079 * Knock out extra levels of page tables if necessary
5081 while (iommu
->agaw
< dmar_domain
->agaw
) {
5082 struct dma_pte
*pte
;
5084 pte
= dmar_domain
->pgd
;
5085 if (dma_pte_present(pte
)) {
5086 dmar_domain
->pgd
= (struct dma_pte
*)
5087 phys_to_virt(dma_pte_addr(pte
));
5088 free_pgtable_page(pte
);
5090 dmar_domain
->agaw
--;
5093 return domain_add_dev_info(dmar_domain
, dev
);
5096 static void intel_iommu_detach_device(struct iommu_domain
*domain
,
5099 dmar_remove_one_dev_info(to_dmar_domain(domain
), dev
);
5102 static int intel_iommu_map(struct iommu_domain
*domain
,
5103 unsigned long iova
, phys_addr_t hpa
,
5104 size_t size
, int iommu_prot
)
5106 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
5111 if (iommu_prot
& IOMMU_READ
)
5112 prot
|= DMA_PTE_READ
;
5113 if (iommu_prot
& IOMMU_WRITE
)
5114 prot
|= DMA_PTE_WRITE
;
5115 if ((iommu_prot
& IOMMU_CACHE
) && dmar_domain
->iommu_snooping
)
5116 prot
|= DMA_PTE_SNP
;
5118 max_addr
= iova
+ size
;
5119 if (dmar_domain
->max_addr
< max_addr
) {
5122 /* check if minimum agaw is sufficient for mapped address */
5123 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
5124 if (end
< max_addr
) {
5125 pr_err("%s: iommu width (%d) is not "
5126 "sufficient for the mapped address (%llx)\n",
5127 __func__
, dmar_domain
->gaw
, max_addr
);
5130 dmar_domain
->max_addr
= max_addr
;
5132 /* Round up size to next multiple of PAGE_SIZE, if it and
5133 the low bits of hpa would take us onto the next page */
5134 size
= aligned_nrpages(hpa
, size
);
5135 ret
= domain_pfn_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
5136 hpa
>> VTD_PAGE_SHIFT
, size
, prot
);
5140 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
5141 unsigned long iova
, size_t size
)
5143 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
5144 struct page
*freelist
= NULL
;
5145 struct intel_iommu
*iommu
;
5146 unsigned long start_pfn
, last_pfn
;
5147 unsigned int npages
;
5148 int iommu_id
, level
= 0;
5150 /* Cope with horrid API which requires us to unmap more than the
5151 size argument if it happens to be a large-page mapping. */
5152 BUG_ON(!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
));
5154 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
5155 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
5157 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
5158 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
5160 freelist
= domain_unmap(dmar_domain
, start_pfn
, last_pfn
);
5162 npages
= last_pfn
- start_pfn
+ 1;
5164 for_each_domain_iommu(iommu_id
, dmar_domain
) {
5165 iommu
= g_iommus
[iommu_id
];
5167 iommu_flush_iotlb_psi(g_iommus
[iommu_id
], dmar_domain
,
5168 start_pfn
, npages
, !freelist
, 0);
5171 dma_free_pagelist(freelist
);
5173 if (dmar_domain
->max_addr
== iova
+ size
)
5174 dmar_domain
->max_addr
= iova
;
5179 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
5182 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
5183 struct dma_pte
*pte
;
5187 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
);
5189 phys
= dma_pte_addr(pte
);
5194 static bool intel_iommu_capable(enum iommu_cap cap
)
5196 if (cap
== IOMMU_CAP_CACHE_COHERENCY
)
5197 return domain_update_iommu_snooping(NULL
) == 1;
5198 if (cap
== IOMMU_CAP_INTR_REMAP
)
5199 return irq_remapping_enabled
== 1;
5204 static int intel_iommu_add_device(struct device
*dev
)
5206 struct intel_iommu
*iommu
;
5207 struct iommu_group
*group
;
5210 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
5214 iommu_device_link(iommu
->iommu_dev
, dev
);
5216 group
= iommu_group_get_for_dev(dev
);
5219 return PTR_ERR(group
);
5221 iommu_group_put(group
);
5225 static void intel_iommu_remove_device(struct device
*dev
)
5227 struct intel_iommu
*iommu
;
5230 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
5234 iommu_group_remove_device(dev
);
5236 iommu_device_unlink(iommu
->iommu_dev
, dev
);
5239 #ifdef CONFIG_INTEL_IOMMU_SVM
5240 #define MAX_NR_PASID_BITS (20)
5241 static inline unsigned long intel_iommu_get_pts(struct intel_iommu
*iommu
)
5244 * Convert ecap_pss to extend context entry pts encoding, also
5245 * respect the soft pasid_max value set by the iommu.
5246 * - number of PASID bits = ecap_pss + 1
5247 * - number of PASID table entries = 2^(pts + 5)
5248 * Therefore, pts = ecap_pss - 4
5249 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5251 if (ecap_pss(iommu
->ecap
) < 5)
5254 /* pasid_max is encoded as actual number of entries not the bits */
5255 return find_first_bit((unsigned long *)&iommu
->pasid_max
,
5256 MAX_NR_PASID_BITS
) - 5;
5259 int intel_iommu_enable_pasid(struct intel_iommu
*iommu
, struct intel_svm_dev
*sdev
)
5261 struct device_domain_info
*info
;
5262 struct context_entry
*context
;
5263 struct dmar_domain
*domain
;
5264 unsigned long flags
;
5268 domain
= get_valid_domain_for_dev(sdev
->dev
);
5272 spin_lock_irqsave(&device_domain_lock
, flags
);
5273 spin_lock(&iommu
->lock
);
5276 info
= sdev
->dev
->archdata
.iommu
;
5277 if (!info
|| !info
->pasid_supported
)
5280 context
= iommu_context_addr(iommu
, info
->bus
, info
->devfn
, 0);
5281 if (WARN_ON(!context
))
5284 ctx_lo
= context
[0].lo
;
5286 sdev
->did
= domain
->iommu_did
[iommu
->seq_id
];
5287 sdev
->sid
= PCI_DEVID(info
->bus
, info
->devfn
);
5289 if (!(ctx_lo
& CONTEXT_PASIDE
)) {
5290 context
[1].hi
= (u64
)virt_to_phys(iommu
->pasid_state_table
);
5291 context
[1].lo
= (u64
)virt_to_phys(iommu
->pasid_table
) |
5292 intel_iommu_get_pts(iommu
);
5295 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5296 * extended to permit requests-with-PASID if the PASIDE bit
5297 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5298 * however, the PASIDE bit is ignored and requests-with-PASID
5299 * are unconditionally blocked. Which makes less sense.
5300 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5301 * "guest mode" translation types depending on whether ATS
5302 * is available or not. Annoyingly, we can't use the new
5303 * modes *unless* PASIDE is set. */
5304 if ((ctx_lo
& CONTEXT_TT_MASK
) == (CONTEXT_TT_PASS_THROUGH
<< 2)) {
5305 ctx_lo
&= ~CONTEXT_TT_MASK
;
5306 if (info
->ats_supported
)
5307 ctx_lo
|= CONTEXT_TT_PT_PASID_DEV_IOTLB
<< 2;
5309 ctx_lo
|= CONTEXT_TT_PT_PASID
<< 2;
5311 ctx_lo
|= CONTEXT_PASIDE
;
5312 if (iommu
->pasid_state_table
)
5313 ctx_lo
|= CONTEXT_DINVE
;
5314 if (info
->pri_supported
)
5315 ctx_lo
|= CONTEXT_PRS
;
5316 context
[0].lo
= ctx_lo
;
5318 iommu
->flush
.flush_context(iommu
, sdev
->did
, sdev
->sid
,
5319 DMA_CCMD_MASK_NOBIT
,
5320 DMA_CCMD_DEVICE_INVL
);
5323 /* Enable PASID support in the device, if it wasn't already */
5324 if (!info
->pasid_enabled
)
5325 iommu_enable_dev_iotlb(info
);
5327 if (info
->ats_enabled
) {
5328 sdev
->dev_iotlb
= 1;
5329 sdev
->qdep
= info
->ats_qdep
;
5330 if (sdev
->qdep
>= QI_DEV_EIOTLB_MAX_INVS
)
5336 spin_unlock(&iommu
->lock
);
5337 spin_unlock_irqrestore(&device_domain_lock
, flags
);
5342 struct intel_iommu
*intel_svm_device_to_iommu(struct device
*dev
)
5344 struct intel_iommu
*iommu
;
5347 if (iommu_dummy(dev
)) {
5349 "No IOMMU translation for device; cannot enable SVM\n");
5353 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
5355 dev_err(dev
, "No IOMMU for device; cannot enable SVM\n");
5359 if (!iommu
->pasid_table
) {
5360 dev_err(dev
, "PASID not enabled on IOMMU; cannot enable SVM\n");
5366 #endif /* CONFIG_INTEL_IOMMU_SVM */
5368 static const struct iommu_ops intel_iommu_ops
= {
5369 .capable
= intel_iommu_capable
,
5370 .domain_alloc
= intel_iommu_domain_alloc
,
5371 .domain_free
= intel_iommu_domain_free
,
5372 .attach_dev
= intel_iommu_attach_device
,
5373 .detach_dev
= intel_iommu_detach_device
,
5374 .map
= intel_iommu_map
,
5375 .unmap
= intel_iommu_unmap
,
5376 .map_sg
= default_iommu_map_sg
,
5377 .iova_to_phys
= intel_iommu_iova_to_phys
,
5378 .add_device
= intel_iommu_add_device
,
5379 .remove_device
= intel_iommu_remove_device
,
5380 .device_group
= pci_device_group
,
5381 .pgsize_bitmap
= INTEL_IOMMU_PGSIZES
,
5384 static void quirk_iommu_g4x_gfx(struct pci_dev
*dev
)
5386 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5387 pr_info("Disabling IOMMU for graphics on this chipset\n");
5391 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_g4x_gfx
);
5392 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_g4x_gfx
);
5393 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_g4x_gfx
);
5394 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_g4x_gfx
);
5395 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_g4x_gfx
);
5396 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_g4x_gfx
);
5397 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_g4x_gfx
);
5399 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
5402 * Mobile 4 Series Chipset neglects to set RWBF capability,
5403 * but needs it. Same seems to hold for the desktop versions.
5405 pr_info("Forcing write-buffer flush capability\n");
5409 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
5410 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
5411 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
5412 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
5413 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
5414 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
5415 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
5418 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5419 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5420 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5421 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5422 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5423 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5424 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5425 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5427 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
5431 if (pci_read_config_word(dev
, GGC
, &ggc
))
5434 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
5435 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5437 } else if (dmar_map_gfx
) {
5438 /* we have to ensure the gfx device is idle before we flush */
5439 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5440 intel_iommu_strict
= 1;
5443 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
5444 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
5445 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
5446 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
5448 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5449 ISOCH DMAR unit for the Azalia sound device, but not give it any
5450 TLB entries, which causes it to deadlock. Check for that. We do
5451 this in a function called from init_dmars(), instead of in a PCI
5452 quirk, because we don't want to print the obnoxious "BIOS broken"
5453 message if VT-d is actually disabled.
5455 static void __init
check_tylersburg_isoch(void)
5457 struct pci_dev
*pdev
;
5458 uint32_t vtisochctrl
;
5460 /* If there's no Azalia in the system anyway, forget it. */
5461 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
5466 /* System Management Registers. Might be hidden, in which case
5467 we can't do the sanity check. But that's OK, because the
5468 known-broken BIOSes _don't_ actually hide it, so far. */
5469 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
5473 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
5480 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5481 if (vtisochctrl
& 1)
5484 /* Drop all bits other than the number of TLB entries */
5485 vtisochctrl
&= 0x1c;
5487 /* If we have the recommended number of TLB entries (16), fine. */
5488 if (vtisochctrl
== 0x10)
5491 /* Zero TLB entries? You get to ride the short bus to school. */
5493 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5494 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5495 dmi_get_system_info(DMI_BIOS_VENDOR
),
5496 dmi_get_system_info(DMI_BIOS_VERSION
),
5497 dmi_get_system_info(DMI_PRODUCT_VERSION
));
5498 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
5502 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",