1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>,
6 * Ashok Raj <ashok.raj@intel.com>,
7 * Shaohua Li <shaohua.li@intel.com>,
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Joerg Roedel <jroedel@suse.de>
13 #define pr_fmt(fmt) "DMAR: " fmt
14 #define dev_fmt(fmt) pr_fmt(fmt)
16 #include <linux/crash_dump.h>
17 #include <linux/dma-direct.h>
18 #include <linux/dmi.h>
19 #include <linux/memory.h>
20 #include <linux/pci.h>
21 #include <linux/pci-ats.h>
22 #include <linux/spinlock.h>
23 #include <linux/syscore_ops.h>
24 #include <linux/tboot.h>
25 #include <uapi/linux/iommufd.h>
28 #include "../dma-iommu.h"
29 #include "../irq_remapping.h"
30 #include "../iommu-sva.h"
32 #include "cap_audit.h"
35 #define ROOT_SIZE VTD_PAGE_SIZE
36 #define CONTEXT_SIZE VTD_PAGE_SIZE
38 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
39 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
40 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
41 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
43 #define IOAPIC_RANGE_START (0xfee00000)
44 #define IOAPIC_RANGE_END (0xfeefffff)
45 #define IOVA_START_ADDR (0x1000)
47 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
49 #define MAX_AGAW_WIDTH 64
50 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
52 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
53 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
55 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
56 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
57 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
58 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
59 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
61 /* IO virtual address start page frame number */
62 #define IOVA_START_PFN (1)
64 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
66 /* page table handling */
67 #define LEVEL_STRIDE (9)
68 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
70 static inline int agaw_to_level(int agaw
)
75 static inline int agaw_to_width(int agaw
)
77 return min_t(int, 30 + agaw
* LEVEL_STRIDE
, MAX_AGAW_WIDTH
);
80 static inline int width_to_agaw(int width
)
82 return DIV_ROUND_UP(width
- 30, LEVEL_STRIDE
);
85 static inline unsigned int level_to_offset_bits(int level
)
87 return (level
- 1) * LEVEL_STRIDE
;
90 static inline int pfn_level_offset(u64 pfn
, int level
)
92 return (pfn
>> level_to_offset_bits(level
)) & LEVEL_MASK
;
95 static inline u64
level_mask(int level
)
97 return -1ULL << level_to_offset_bits(level
);
100 static inline u64
level_size(int level
)
102 return 1ULL << level_to_offset_bits(level
);
105 static inline u64
align_to_level(u64 pfn
, int level
)
107 return (pfn
+ level_size(level
) - 1) & level_mask(level
);
110 static inline unsigned long lvl_to_nr_pages(unsigned int lvl
)
112 return 1UL << min_t(int, (lvl
- 1) * LEVEL_STRIDE
, MAX_AGAW_PFN_WIDTH
);
115 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
116 are never going to work. */
117 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn
)
119 return mm_pfn
<< (PAGE_SHIFT
- VTD_PAGE_SHIFT
);
121 static inline unsigned long page_to_dma_pfn(struct page
*pg
)
123 return mm_to_dma_pfn(page_to_pfn(pg
));
125 static inline unsigned long virt_to_dma_pfn(void *p
)
127 return page_to_dma_pfn(virt_to_page(p
));
130 static void __init
check_tylersburg_isoch(void);
131 static int rwbf_quirk
;
134 * set to 1 to panic kernel if can't successfully enable VT-d
135 * (used when kernel is launched w/ TXT)
137 static int force_on
= 0;
138 static int intel_iommu_tboot_noforce
;
139 static int no_platform_optin
;
141 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
144 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
147 static phys_addr_t
root_entry_lctp(struct root_entry
*re
)
152 return re
->lo
& VTD_PAGE_MASK
;
156 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
159 static phys_addr_t
root_entry_uctp(struct root_entry
*re
)
164 return re
->hi
& VTD_PAGE_MASK
;
167 static inline void context_set_present(struct context_entry
*context
)
172 static inline void context_set_fault_enable(struct context_entry
*context
)
174 context
->lo
&= (((u64
)-1) << 2) | 1;
177 static inline void context_set_translation_type(struct context_entry
*context
,
180 context
->lo
&= (((u64
)-1) << 4) | 3;
181 context
->lo
|= (value
& 3) << 2;
184 static inline void context_set_address_root(struct context_entry
*context
,
187 context
->lo
&= ~VTD_PAGE_MASK
;
188 context
->lo
|= value
& VTD_PAGE_MASK
;
191 static inline void context_set_address_width(struct context_entry
*context
,
194 context
->hi
|= value
& 7;
197 static inline void context_set_domain_id(struct context_entry
*context
,
200 context
->hi
|= (value
& ((1 << 16) - 1)) << 8;
203 static inline void context_set_pasid(struct context_entry
*context
)
205 context
->lo
|= CONTEXT_PASIDE
;
208 static inline int context_domain_id(struct context_entry
*c
)
210 return((c
->hi
>> 8) & 0xffff);
213 static inline void context_clear_entry(struct context_entry
*context
)
219 static inline bool context_copied(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
221 if (!iommu
->copied_tables
)
224 return test_bit(((long)bus
<< 8) | devfn
, iommu
->copied_tables
);
228 set_context_copied(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
230 set_bit(((long)bus
<< 8) | devfn
, iommu
->copied_tables
);
234 clear_context_copied(struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
236 clear_bit(((long)bus
<< 8) | devfn
, iommu
->copied_tables
);
240 * This domain is a statically identity mapping domain.
241 * 1. This domain creats a static 1:1 mapping to all usable memory.
242 * 2. It maps to each iommu if successful.
243 * 3. Each iommu mapps to this domain if successful.
245 static struct dmar_domain
*si_domain
;
246 static int hw_pass_through
= 1;
248 struct dmar_rmrr_unit
{
249 struct list_head list
; /* list of rmrr units */
250 struct acpi_dmar_header
*hdr
; /* ACPI header */
251 u64 base_address
; /* reserved base address*/
252 u64 end_address
; /* reserved end address */
253 struct dmar_dev_scope
*devices
; /* target devices */
254 int devices_cnt
; /* target device count */
257 struct dmar_atsr_unit
{
258 struct list_head list
; /* list of ATSR units */
259 struct acpi_dmar_header
*hdr
; /* ACPI header */
260 struct dmar_dev_scope
*devices
; /* target devices */
261 int devices_cnt
; /* target device count */
262 u8 include_all
:1; /* include all ports */
265 struct dmar_satc_unit
{
266 struct list_head list
; /* list of SATC units */
267 struct acpi_dmar_header
*hdr
; /* ACPI header */
268 struct dmar_dev_scope
*devices
; /* target devices */
269 struct intel_iommu
*iommu
; /* the corresponding iommu */
270 int devices_cnt
; /* target device count */
271 u8 atc_required
:1; /* ATS is required */
274 static LIST_HEAD(dmar_atsr_units
);
275 static LIST_HEAD(dmar_rmrr_units
);
276 static LIST_HEAD(dmar_satc_units
);
278 #define for_each_rmrr_units(rmrr) \
279 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
281 static void device_block_translation(struct device
*dev
);
282 static void intel_iommu_domain_free(struct iommu_domain
*domain
);
284 int dmar_disabled
= !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON
);
285 int intel_iommu_sm
= IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
);
287 int intel_iommu_enabled
= 0;
288 EXPORT_SYMBOL_GPL(intel_iommu_enabled
);
290 static int dmar_map_gfx
= 1;
291 static int intel_iommu_superpage
= 1;
292 static int iommu_identity_mapping
;
293 static int iommu_skip_te_disable
;
295 #define IDENTMAP_GFX 2
296 #define IDENTMAP_AZALIA 4
298 const struct iommu_ops intel_iommu_ops
;
300 static bool translation_pre_enabled(struct intel_iommu
*iommu
)
302 return (iommu
->flags
& VTD_FLAG_TRANS_PRE_ENABLED
);
305 static void clear_translation_pre_enabled(struct intel_iommu
*iommu
)
307 iommu
->flags
&= ~VTD_FLAG_TRANS_PRE_ENABLED
;
310 static void init_translation_status(struct intel_iommu
*iommu
)
314 gsts
= readl(iommu
->reg
+ DMAR_GSTS_REG
);
315 if (gsts
& DMA_GSTS_TES
)
316 iommu
->flags
|= VTD_FLAG_TRANS_PRE_ENABLED
;
319 static int __init
intel_iommu_setup(char *str
)
325 if (!strncmp(str
, "on", 2)) {
327 pr_info("IOMMU enabled\n");
328 } else if (!strncmp(str
, "off", 3)) {
330 no_platform_optin
= 1;
331 pr_info("IOMMU disabled\n");
332 } else if (!strncmp(str
, "igfx_off", 8)) {
334 pr_info("Disable GFX device mapping\n");
335 } else if (!strncmp(str
, "forcedac", 8)) {
336 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
337 iommu_dma_forcedac
= true;
338 } else if (!strncmp(str
, "strict", 6)) {
339 pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
340 iommu_set_dma_strict();
341 } else if (!strncmp(str
, "sp_off", 6)) {
342 pr_info("Disable supported super page\n");
343 intel_iommu_superpage
= 0;
344 } else if (!strncmp(str
, "sm_on", 5)) {
345 pr_info("Enable scalable mode if hardware supports\n");
347 } else if (!strncmp(str
, "sm_off", 6)) {
348 pr_info("Scalable mode is disallowed\n");
350 } else if (!strncmp(str
, "tboot_noforce", 13)) {
351 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
352 intel_iommu_tboot_noforce
= 1;
354 pr_notice("Unknown option - '%s'\n", str
);
357 str
+= strcspn(str
, ",");
364 __setup("intel_iommu=", intel_iommu_setup
);
366 void *alloc_pgtable_page(int node
, gfp_t gfp
)
371 page
= alloc_pages_node(node
, gfp
| __GFP_ZERO
, 0);
373 vaddr
= page_address(page
);
377 void free_pgtable_page(void *vaddr
)
379 free_page((unsigned long)vaddr
);
382 static inline int domain_type_is_si(struct dmar_domain
*domain
)
384 return domain
->domain
.type
== IOMMU_DOMAIN_IDENTITY
;
387 static inline int domain_pfn_supported(struct dmar_domain
*domain
,
390 int addr_width
= agaw_to_width(domain
->agaw
) - VTD_PAGE_SHIFT
;
392 return !(addr_width
< BITS_PER_LONG
&& pfn
>> addr_width
);
396 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
397 * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
398 * the returned SAGAW.
400 static unsigned long __iommu_calculate_sagaw(struct intel_iommu
*iommu
)
402 unsigned long fl_sagaw
, sl_sagaw
;
404 fl_sagaw
= BIT(2) | (cap_fl5lp_support(iommu
->cap
) ? BIT(3) : 0);
405 sl_sagaw
= cap_sagaw(iommu
->cap
);
407 /* Second level only. */
408 if (!sm_supported(iommu
) || !ecap_flts(iommu
->ecap
))
411 /* First level only. */
412 if (!ecap_slts(iommu
->ecap
))
415 return fl_sagaw
& sl_sagaw
;
418 static int __iommu_calculate_agaw(struct intel_iommu
*iommu
, int max_gaw
)
423 sagaw
= __iommu_calculate_sagaw(iommu
);
424 for (agaw
= width_to_agaw(max_gaw
); agaw
>= 0; agaw
--) {
425 if (test_bit(agaw
, &sagaw
))
433 * Calculate max SAGAW for each iommu.
435 int iommu_calculate_max_sagaw(struct intel_iommu
*iommu
)
437 return __iommu_calculate_agaw(iommu
, MAX_AGAW_WIDTH
);
441 * calculate agaw for each iommu.
442 * "SAGAW" may be different across iommus, use a default agaw, and
443 * get a supported less agaw for iommus that don't support the default agaw.
445 int iommu_calculate_agaw(struct intel_iommu
*iommu
)
447 return __iommu_calculate_agaw(iommu
, DEFAULT_DOMAIN_ADDRESS_WIDTH
);
450 static inline bool iommu_paging_structure_coherency(struct intel_iommu
*iommu
)
452 return sm_supported(iommu
) ?
453 ecap_smpwc(iommu
->ecap
) : ecap_coherent(iommu
->ecap
);
456 static void domain_update_iommu_coherency(struct dmar_domain
*domain
)
458 struct iommu_domain_info
*info
;
459 struct dmar_drhd_unit
*drhd
;
460 struct intel_iommu
*iommu
;
464 domain
->iommu_coherency
= true;
465 xa_for_each(&domain
->iommu_array
, i
, info
) {
467 if (!iommu_paging_structure_coherency(info
->iommu
)) {
468 domain
->iommu_coherency
= false;
475 /* No hardware attached; use lowest common denominator */
477 for_each_active_iommu(iommu
, drhd
) {
478 if (!iommu_paging_structure_coherency(iommu
)) {
479 domain
->iommu_coherency
= false;
486 static int domain_update_iommu_superpage(struct dmar_domain
*domain
,
487 struct intel_iommu
*skip
)
489 struct dmar_drhd_unit
*drhd
;
490 struct intel_iommu
*iommu
;
493 if (!intel_iommu_superpage
)
496 /* set iommu_superpage to the smallest common denominator */
498 for_each_active_iommu(iommu
, drhd
) {
500 if (domain
&& domain
->use_first_level
) {
501 if (!cap_fl1gp_support(iommu
->cap
))
504 mask
&= cap_super_page_val(iommu
->cap
);
516 static int domain_update_device_node(struct dmar_domain
*domain
)
518 struct device_domain_info
*info
;
519 int nid
= NUMA_NO_NODE
;
522 spin_lock_irqsave(&domain
->lock
, flags
);
523 list_for_each_entry(info
, &domain
->devices
, link
) {
525 * There could possibly be multiple device numa nodes as devices
526 * within the same domain may sit behind different IOMMUs. There
527 * isn't perfect answer in such situation, so we select first
528 * come first served policy.
530 nid
= dev_to_node(info
->dev
);
531 if (nid
!= NUMA_NO_NODE
)
534 spin_unlock_irqrestore(&domain
->lock
, flags
);
539 static void domain_update_iotlb(struct dmar_domain
*domain
);
541 /* Return the super pagesize bitmap if supported. */
542 static unsigned long domain_super_pgsize_bitmap(struct dmar_domain
*domain
)
544 unsigned long bitmap
= 0;
547 * 1-level super page supports page size of 2MiB, 2-level super page
548 * supports page size of both 2MiB and 1GiB.
550 if (domain
->iommu_superpage
== 1)
552 else if (domain
->iommu_superpage
== 2)
553 bitmap
|= SZ_2M
| SZ_1G
;
558 /* Some capabilities may be different across iommus */
559 static void domain_update_iommu_cap(struct dmar_domain
*domain
)
561 domain_update_iommu_coherency(domain
);
562 domain
->iommu_superpage
= domain_update_iommu_superpage(domain
, NULL
);
565 * If RHSA is missing, we should default to the device numa domain
568 if (domain
->nid
== NUMA_NO_NODE
)
569 domain
->nid
= domain_update_device_node(domain
);
572 * First-level translation restricts the input-address to a
573 * canonical address (i.e., address bits 63:N have the same
574 * value as address bit [N-1], where N is 48-bits with 4-level
575 * paging and 57-bits with 5-level paging). Hence, skip bit
578 if (domain
->use_first_level
)
579 domain
->domain
.geometry
.aperture_end
= __DOMAIN_MAX_ADDR(domain
->gaw
- 1);
581 domain
->domain
.geometry
.aperture_end
= __DOMAIN_MAX_ADDR(domain
->gaw
);
583 domain
->domain
.pgsize_bitmap
|= domain_super_pgsize_bitmap(domain
);
584 domain_update_iotlb(domain
);
587 struct context_entry
*iommu_context_addr(struct intel_iommu
*iommu
, u8 bus
,
590 struct root_entry
*root
= &iommu
->root_entry
[bus
];
591 struct context_entry
*context
;
595 * Except that the caller requested to allocate a new entry,
596 * returning a copied context entry makes no sense.
598 if (!alloc
&& context_copied(iommu
, bus
, devfn
))
602 if (sm_supported(iommu
)) {
610 context
= phys_to_virt(*entry
& VTD_PAGE_MASK
);
612 unsigned long phy_addr
;
616 context
= alloc_pgtable_page(iommu
->node
, GFP_ATOMIC
);
620 __iommu_flush_cache(iommu
, (void *)context
, CONTEXT_SIZE
);
621 phy_addr
= virt_to_phys((void *)context
);
622 *entry
= phy_addr
| 1;
623 __iommu_flush_cache(iommu
, entry
, sizeof(*entry
));
625 return &context
[devfn
];
629 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
630 * sub-hierarchy of a candidate PCI-PCI bridge
631 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
632 * @bridge: the candidate PCI-PCI bridge
634 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
637 is_downstream_to_pci_bridge(struct device
*dev
, struct device
*bridge
)
639 struct pci_dev
*pdev
, *pbridge
;
641 if (!dev_is_pci(dev
) || !dev_is_pci(bridge
))
644 pdev
= to_pci_dev(dev
);
645 pbridge
= to_pci_dev(bridge
);
647 if (pbridge
->subordinate
&&
648 pbridge
->subordinate
->number
<= pdev
->bus
->number
&&
649 pbridge
->subordinate
->busn_res
.end
>= pdev
->bus
->number
)
655 static bool quirk_ioat_snb_local_iommu(struct pci_dev
*pdev
)
657 struct dmar_drhd_unit
*drhd
;
661 /* We know that this device on this chipset has its own IOMMU.
662 * If we find it under a different IOMMU, then the BIOS is lying
663 * to us. Hope that the IOMMU for this device is actually
664 * disabled, and it needs no translation...
666 rc
= pci_bus_read_config_dword(pdev
->bus
, PCI_DEVFN(0, 0), 0xb0, &vtbar
);
669 dev_info(&pdev
->dev
, "failed to run vt-d quirk\n");
674 /* we know that the this iommu should be at offset 0xa000 from vtbar */
675 drhd
= dmar_find_matched_drhd_unit(pdev
);
676 if (!drhd
|| drhd
->reg_base_addr
- vtbar
!= 0xa000) {
677 pr_warn_once(FW_BUG
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
678 add_taint(TAINT_FIRMWARE_WORKAROUND
, LOCKDEP_STILL_OK
);
685 static bool iommu_is_dummy(struct intel_iommu
*iommu
, struct device
*dev
)
687 if (!iommu
|| iommu
->drhd
->ignored
)
690 if (dev_is_pci(dev
)) {
691 struct pci_dev
*pdev
= to_pci_dev(dev
);
693 if (pdev
->vendor
== PCI_VENDOR_ID_INTEL
&&
694 pdev
->device
== PCI_DEVICE_ID_INTEL_IOAT_SNB
&&
695 quirk_ioat_snb_local_iommu(pdev
))
702 struct intel_iommu
*device_to_iommu(struct device
*dev
, u8
*bus
, u8
*devfn
)
704 struct dmar_drhd_unit
*drhd
= NULL
;
705 struct pci_dev
*pdev
= NULL
;
706 struct intel_iommu
*iommu
;
714 if (dev_is_pci(dev
)) {
715 struct pci_dev
*pf_pdev
;
717 pdev
= pci_real_dma_dev(to_pci_dev(dev
));
719 /* VFs aren't listed in scope tables; we need to look up
720 * the PF instead to find the IOMMU. */
721 pf_pdev
= pci_physfn(pdev
);
723 segment
= pci_domain_nr(pdev
->bus
);
724 } else if (has_acpi_companion(dev
))
725 dev
= &ACPI_COMPANION(dev
)->dev
;
728 for_each_iommu(iommu
, drhd
) {
729 if (pdev
&& segment
!= drhd
->segment
)
732 for_each_active_dev_scope(drhd
->devices
,
733 drhd
->devices_cnt
, i
, tmp
) {
735 /* For a VF use its original BDF# not that of the PF
736 * which we used for the IOMMU lookup. Strictly speaking
737 * we could do this for all PCI devices; we only need to
738 * get the BDF# from the scope table for ACPI matches. */
739 if (pdev
&& pdev
->is_virtfn
)
743 *bus
= drhd
->devices
[i
].bus
;
744 *devfn
= drhd
->devices
[i
].devfn
;
749 if (is_downstream_to_pci_bridge(dev
, tmp
))
753 if (pdev
&& drhd
->include_all
) {
756 *bus
= pdev
->bus
->number
;
757 *devfn
= pdev
->devfn
;
764 if (iommu_is_dummy(iommu
, dev
))
772 static void domain_flush_cache(struct dmar_domain
*domain
,
773 void *addr
, int size
)
775 if (!domain
->iommu_coherency
)
776 clflush_cache_range(addr
, size
);
779 static void free_context_table(struct intel_iommu
*iommu
)
781 struct context_entry
*context
;
784 if (!iommu
->root_entry
)
787 for (i
= 0; i
< ROOT_ENTRY_NR
; i
++) {
788 context
= iommu_context_addr(iommu
, i
, 0, 0);
790 free_pgtable_page(context
);
792 if (!sm_supported(iommu
))
795 context
= iommu_context_addr(iommu
, i
, 0x80, 0);
797 free_pgtable_page(context
);
800 free_pgtable_page(iommu
->root_entry
);
801 iommu
->root_entry
= NULL
;
804 #ifdef CONFIG_DMAR_DEBUG
805 static void pgtable_walk(struct intel_iommu
*iommu
, unsigned long pfn
,
806 u8 bus
, u8 devfn
, struct dma_pte
*parent
, int level
)
812 offset
= pfn_level_offset(pfn
, level
);
813 pte
= &parent
[offset
];
814 if (!pte
|| (dma_pte_superpage(pte
) || !dma_pte_present(pte
))) {
815 pr_info("PTE not present at level %d\n", level
);
819 pr_info("pte level: %d, pte value: 0x%016llx\n", level
, pte
->val
);
824 parent
= phys_to_virt(dma_pte_addr(pte
));
829 void dmar_fault_dump_ptes(struct intel_iommu
*iommu
, u16 source_id
,
830 unsigned long long addr
, u32 pasid
)
832 struct pasid_dir_entry
*dir
, *pde
;
833 struct pasid_entry
*entries
, *pte
;
834 struct context_entry
*ctx_entry
;
835 struct root_entry
*rt_entry
;
836 int i
, dir_index
, index
, level
;
837 u8 devfn
= source_id
& 0xff;
838 u8 bus
= source_id
>> 8;
839 struct dma_pte
*pgtable
;
841 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu
->name
, addr
);
843 /* root entry dump */
844 rt_entry
= &iommu
->root_entry
[bus
];
846 pr_info("root table entry is not present\n");
850 if (sm_supported(iommu
))
851 pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n",
852 rt_entry
->hi
, rt_entry
->lo
);
854 pr_info("root entry: 0x%016llx", rt_entry
->lo
);
856 /* context entry dump */
857 ctx_entry
= iommu_context_addr(iommu
, bus
, devfn
, 0);
859 pr_info("context table entry is not present\n");
863 pr_info("context entry: hi 0x%016llx, low 0x%016llx\n",
864 ctx_entry
->hi
, ctx_entry
->lo
);
866 /* legacy mode does not require PASID entries */
867 if (!sm_supported(iommu
)) {
868 level
= agaw_to_level(ctx_entry
->hi
& 7);
869 pgtable
= phys_to_virt(ctx_entry
->lo
& VTD_PAGE_MASK
);
873 /* get the pointer to pasid directory entry */
874 dir
= phys_to_virt(ctx_entry
->lo
& VTD_PAGE_MASK
);
876 pr_info("pasid directory entry is not present\n");
879 /* For request-without-pasid, get the pasid from context entry */
880 if (intel_iommu_sm
&& pasid
== IOMMU_PASID_INVALID
)
881 pasid
= PASID_RID2PASID
;
883 dir_index
= pasid
>> PASID_PDE_SHIFT
;
884 pde
= &dir
[dir_index
];
885 pr_info("pasid dir entry: 0x%016llx\n", pde
->val
);
887 /* get the pointer to the pasid table entry */
888 entries
= get_pasid_table_from_pde(pde
);
890 pr_info("pasid table entry is not present\n");
893 index
= pasid
& PASID_PTE_MASK
;
894 pte
= &entries
[index
];
895 for (i
= 0; i
< ARRAY_SIZE(pte
->val
); i
++)
896 pr_info("pasid table entry[%d]: 0x%016llx\n", i
, pte
->val
[i
]);
898 if (pasid_pte_get_pgtt(pte
) == PASID_ENTRY_PGTT_FL_ONLY
) {
899 level
= pte
->val
[2] & BIT_ULL(2) ? 5 : 4;
900 pgtable
= phys_to_virt(pte
->val
[2] & VTD_PAGE_MASK
);
902 level
= agaw_to_level((pte
->val
[0] >> 2) & 0x7);
903 pgtable
= phys_to_virt(pte
->val
[0] & VTD_PAGE_MASK
);
907 pgtable_walk(iommu
, addr
>> VTD_PAGE_SHIFT
, bus
, devfn
, pgtable
, level
);
911 static struct dma_pte
*pfn_to_dma_pte(struct dmar_domain
*domain
,
912 unsigned long pfn
, int *target_level
,
915 struct dma_pte
*parent
, *pte
;
916 int level
= agaw_to_level(domain
->agaw
);
919 if (!domain_pfn_supported(domain
, pfn
))
920 /* Address beyond IOMMU's addressing capabilities. */
923 parent
= domain
->pgd
;
928 offset
= pfn_level_offset(pfn
, level
);
929 pte
= &parent
[offset
];
930 if (!*target_level
&& (dma_pte_superpage(pte
) || !dma_pte_present(pte
)))
932 if (level
== *target_level
)
935 if (!dma_pte_present(pte
)) {
938 tmp_page
= alloc_pgtable_page(domain
->nid
, gfp
);
943 domain_flush_cache(domain
, tmp_page
, VTD_PAGE_SIZE
);
944 pteval
= ((uint64_t)virt_to_dma_pfn(tmp_page
) << VTD_PAGE_SHIFT
) | DMA_PTE_READ
| DMA_PTE_WRITE
;
945 if (domain
->use_first_level
)
946 pteval
|= DMA_FL_PTE_XD
| DMA_FL_PTE_US
| DMA_FL_PTE_ACCESS
;
948 if (cmpxchg64(&pte
->val
, 0ULL, pteval
))
949 /* Someone else set it while we were thinking; use theirs. */
950 free_pgtable_page(tmp_page
);
952 domain_flush_cache(domain
, pte
, sizeof(*pte
));
957 parent
= phys_to_virt(dma_pte_addr(pte
));
962 *target_level
= level
;
967 /* return address's pte at specific level */
968 static struct dma_pte
*dma_pfn_level_pte(struct dmar_domain
*domain
,
970 int level
, int *large_page
)
972 struct dma_pte
*parent
, *pte
;
973 int total
= agaw_to_level(domain
->agaw
);
976 parent
= domain
->pgd
;
977 while (level
<= total
) {
978 offset
= pfn_level_offset(pfn
, total
);
979 pte
= &parent
[offset
];
983 if (!dma_pte_present(pte
)) {
988 if (dma_pte_superpage(pte
)) {
993 parent
= phys_to_virt(dma_pte_addr(pte
));
999 /* clear last level pte, a tlb flush should be followed */
1000 static void dma_pte_clear_range(struct dmar_domain
*domain
,
1001 unsigned long start_pfn
,
1002 unsigned long last_pfn
)
1004 unsigned int large_page
;
1005 struct dma_pte
*first_pte
, *pte
;
1007 if (WARN_ON(!domain_pfn_supported(domain
, last_pfn
)) ||
1008 WARN_ON(start_pfn
> last_pfn
))
1011 /* we don't need lock here; nobody else touches the iova range */
1014 first_pte
= pte
= dma_pfn_level_pte(domain
, start_pfn
, 1, &large_page
);
1016 start_pfn
= align_to_level(start_pfn
+ 1, large_page
+ 1);
1021 start_pfn
+= lvl_to_nr_pages(large_page
);
1023 } while (start_pfn
<= last_pfn
&& !first_pte_in_page(pte
));
1025 domain_flush_cache(domain
, first_pte
,
1026 (void *)pte
- (void *)first_pte
);
1028 } while (start_pfn
&& start_pfn
<= last_pfn
);
1031 static void dma_pte_free_level(struct dmar_domain
*domain
, int level
,
1032 int retain_level
, struct dma_pte
*pte
,
1033 unsigned long pfn
, unsigned long start_pfn
,
1034 unsigned long last_pfn
)
1036 pfn
= max(start_pfn
, pfn
);
1037 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1040 unsigned long level_pfn
;
1041 struct dma_pte
*level_pte
;
1043 if (!dma_pte_present(pte
) || dma_pte_superpage(pte
))
1046 level_pfn
= pfn
& level_mask(level
);
1047 level_pte
= phys_to_virt(dma_pte_addr(pte
));
1050 dma_pte_free_level(domain
, level
- 1, retain_level
,
1051 level_pte
, level_pfn
, start_pfn
,
1056 * Free the page table if we're below the level we want to
1057 * retain and the range covers the entire table.
1059 if (level
< retain_level
&& !(start_pfn
> level_pfn
||
1060 last_pfn
< level_pfn
+ level_size(level
) - 1)) {
1062 domain_flush_cache(domain
, pte
, sizeof(*pte
));
1063 free_pgtable_page(level_pte
);
1066 pfn
+= level_size(level
);
1067 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1071 * clear last level (leaf) ptes and free page table pages below the
1072 * level we wish to keep intact.
1074 static void dma_pte_free_pagetable(struct dmar_domain
*domain
,
1075 unsigned long start_pfn
,
1076 unsigned long last_pfn
,
1079 dma_pte_clear_range(domain
, start_pfn
, last_pfn
);
1081 /* We don't need lock here; nobody else touches the iova range */
1082 dma_pte_free_level(domain
, agaw_to_level(domain
->agaw
), retain_level
,
1083 domain
->pgd
, 0, start_pfn
, last_pfn
);
1086 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1087 free_pgtable_page(domain
->pgd
);
1092 /* When a page at a given level is being unlinked from its parent, we don't
1093 need to *modify* it at all. All we need to do is make a list of all the
1094 pages which can be freed just as soon as we've flushed the IOTLB and we
1095 know the hardware page-walk will no longer touch them.
1096 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1098 static void dma_pte_list_pagetables(struct dmar_domain
*domain
,
1099 int level
, struct dma_pte
*pte
,
1100 struct list_head
*freelist
)
1104 pg
= pfn_to_page(dma_pte_addr(pte
) >> PAGE_SHIFT
);
1105 list_add_tail(&pg
->lru
, freelist
);
1110 pte
= page_address(pg
);
1112 if (dma_pte_present(pte
) && !dma_pte_superpage(pte
))
1113 dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1115 } while (!first_pte_in_page(pte
));
1118 static void dma_pte_clear_level(struct dmar_domain
*domain
, int level
,
1119 struct dma_pte
*pte
, unsigned long pfn
,
1120 unsigned long start_pfn
, unsigned long last_pfn
,
1121 struct list_head
*freelist
)
1123 struct dma_pte
*first_pte
= NULL
, *last_pte
= NULL
;
1125 pfn
= max(start_pfn
, pfn
);
1126 pte
= &pte
[pfn_level_offset(pfn
, level
)];
1129 unsigned long level_pfn
= pfn
& level_mask(level
);
1131 if (!dma_pte_present(pte
))
1134 /* If range covers entire pagetable, free it */
1135 if (start_pfn
<= level_pfn
&&
1136 last_pfn
>= level_pfn
+ level_size(level
) - 1) {
1137 /* These suborbinate page tables are going away entirely. Don't
1138 bother to clear them; we're just going to *free* them. */
1139 if (level
> 1 && !dma_pte_superpage(pte
))
1140 dma_pte_list_pagetables(domain
, level
- 1, pte
, freelist
);
1146 } else if (level
> 1) {
1147 /* Recurse down into a level that isn't *entirely* obsolete */
1148 dma_pte_clear_level(domain
, level
- 1,
1149 phys_to_virt(dma_pte_addr(pte
)),
1150 level_pfn
, start_pfn
, last_pfn
,
1154 pfn
= level_pfn
+ level_size(level
);
1155 } while (!first_pte_in_page(++pte
) && pfn
<= last_pfn
);
1158 domain_flush_cache(domain
, first_pte
,
1159 (void *)++last_pte
- (void *)first_pte
);
1162 /* We can't just free the pages because the IOMMU may still be walking
1163 the page tables, and may have cached the intermediate levels. The
1164 pages can only be freed after the IOTLB flush has been done. */
1165 static void domain_unmap(struct dmar_domain
*domain
, unsigned long start_pfn
,
1166 unsigned long last_pfn
, struct list_head
*freelist
)
1168 if (WARN_ON(!domain_pfn_supported(domain
, last_pfn
)) ||
1169 WARN_ON(start_pfn
> last_pfn
))
1172 /* we don't need lock here; nobody else touches the iova range */
1173 dma_pte_clear_level(domain
, agaw_to_level(domain
->agaw
),
1174 domain
->pgd
, 0, start_pfn
, last_pfn
, freelist
);
1177 if (start_pfn
== 0 && last_pfn
== DOMAIN_MAX_PFN(domain
->gaw
)) {
1178 struct page
*pgd_page
= virt_to_page(domain
->pgd
);
1179 list_add_tail(&pgd_page
->lru
, freelist
);
1184 /* iommu handling */
1185 static int iommu_alloc_root_entry(struct intel_iommu
*iommu
)
1187 struct root_entry
*root
;
1189 root
= alloc_pgtable_page(iommu
->node
, GFP_ATOMIC
);
1191 pr_err("Allocating root entry for %s failed\n",
1196 __iommu_flush_cache(iommu
, root
, ROOT_SIZE
);
1197 iommu
->root_entry
= root
;
1202 static void iommu_set_root_entry(struct intel_iommu
*iommu
)
1208 addr
= virt_to_phys(iommu
->root_entry
);
1209 if (sm_supported(iommu
))
1210 addr
|= DMA_RTADDR_SMT
;
1212 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1213 dmar_writeq(iommu
->reg
+ DMAR_RTADDR_REG
, addr
);
1215 writel(iommu
->gcmd
| DMA_GCMD_SRTP
, iommu
->reg
+ DMAR_GCMD_REG
);
1217 /* Make sure hardware complete it */
1218 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1219 readl
, (sts
& DMA_GSTS_RTPS
), sts
);
1221 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1224 * Hardware invalidates all DMA remapping hardware translation
1225 * caches as part of SRTP flow.
1227 if (cap_esrtps(iommu
->cap
))
1230 iommu
->flush
.flush_context(iommu
, 0, 0, 0, DMA_CCMD_GLOBAL_INVL
);
1231 if (sm_supported(iommu
))
1232 qi_flush_pasid_cache(iommu
, 0, QI_PC_GLOBAL
, 0);
1233 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH
);
1236 void iommu_flush_write_buffer(struct intel_iommu
*iommu
)
1241 if (!rwbf_quirk
&& !cap_rwbf(iommu
->cap
))
1244 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1245 writel(iommu
->gcmd
| DMA_GCMD_WBF
, iommu
->reg
+ DMAR_GCMD_REG
);
1247 /* Make sure hardware complete it */
1248 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1249 readl
, (!(val
& DMA_GSTS_WBFS
)), val
);
1251 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1254 /* return value determine if we need a write buffer flush */
1255 static void __iommu_flush_context(struct intel_iommu
*iommu
,
1256 u16 did
, u16 source_id
, u8 function_mask
,
1263 case DMA_CCMD_GLOBAL_INVL
:
1264 val
= DMA_CCMD_GLOBAL_INVL
;
1266 case DMA_CCMD_DOMAIN_INVL
:
1267 val
= DMA_CCMD_DOMAIN_INVL
|DMA_CCMD_DID(did
);
1269 case DMA_CCMD_DEVICE_INVL
:
1270 val
= DMA_CCMD_DEVICE_INVL
|DMA_CCMD_DID(did
)
1271 | DMA_CCMD_SID(source_id
) | DMA_CCMD_FM(function_mask
);
1274 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n",
1278 val
|= DMA_CCMD_ICC
;
1280 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1281 dmar_writeq(iommu
->reg
+ DMAR_CCMD_REG
, val
);
1283 /* Make sure hardware complete it */
1284 IOMMU_WAIT_OP(iommu
, DMAR_CCMD_REG
,
1285 dmar_readq
, (!(val
& DMA_CCMD_ICC
)), val
);
1287 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1290 /* return value determine if we need a write buffer flush */
1291 static void __iommu_flush_iotlb(struct intel_iommu
*iommu
, u16 did
,
1292 u64 addr
, unsigned int size_order
, u64 type
)
1294 int tlb_offset
= ecap_iotlb_offset(iommu
->ecap
);
1295 u64 val
= 0, val_iva
= 0;
1299 case DMA_TLB_GLOBAL_FLUSH
:
1300 /* global flush doesn't need set IVA_REG */
1301 val
= DMA_TLB_GLOBAL_FLUSH
|DMA_TLB_IVT
;
1303 case DMA_TLB_DSI_FLUSH
:
1304 val
= DMA_TLB_DSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1306 case DMA_TLB_PSI_FLUSH
:
1307 val
= DMA_TLB_PSI_FLUSH
|DMA_TLB_IVT
|DMA_TLB_DID(did
);
1308 /* IH bit is passed in as part of address */
1309 val_iva
= size_order
| addr
;
1312 pr_warn("%s: Unexpected iotlb invalidation type 0x%llx\n",
1317 if (cap_write_drain(iommu
->cap
))
1318 val
|= DMA_TLB_WRITE_DRAIN
;
1320 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1321 /* Note: Only uses first TLB reg currently */
1323 dmar_writeq(iommu
->reg
+ tlb_offset
, val_iva
);
1324 dmar_writeq(iommu
->reg
+ tlb_offset
+ 8, val
);
1326 /* Make sure hardware complete it */
1327 IOMMU_WAIT_OP(iommu
, tlb_offset
+ 8,
1328 dmar_readq
, (!(val
& DMA_TLB_IVT
)), val
);
1330 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1332 /* check IOTLB invalidation granularity */
1333 if (DMA_TLB_IAIG(val
) == 0)
1334 pr_err("Flush IOTLB failed\n");
1335 if (DMA_TLB_IAIG(val
) != DMA_TLB_IIRG(type
))
1336 pr_debug("TLB flush request %Lx, actual %Lx\n",
1337 (unsigned long long)DMA_TLB_IIRG(type
),
1338 (unsigned long long)DMA_TLB_IAIG(val
));
1341 static struct device_domain_info
*
1342 domain_lookup_dev_info(struct dmar_domain
*domain
,
1343 struct intel_iommu
*iommu
, u8 bus
, u8 devfn
)
1345 struct device_domain_info
*info
;
1346 unsigned long flags
;
1348 spin_lock_irqsave(&domain
->lock
, flags
);
1349 list_for_each_entry(info
, &domain
->devices
, link
) {
1350 if (info
->iommu
== iommu
&& info
->bus
== bus
&&
1351 info
->devfn
== devfn
) {
1352 spin_unlock_irqrestore(&domain
->lock
, flags
);
1356 spin_unlock_irqrestore(&domain
->lock
, flags
);
1361 static void domain_update_iotlb(struct dmar_domain
*domain
)
1363 struct device_domain_info
*info
;
1364 bool has_iotlb_device
= false;
1365 unsigned long flags
;
1367 spin_lock_irqsave(&domain
->lock
, flags
);
1368 list_for_each_entry(info
, &domain
->devices
, link
) {
1369 if (info
->ats_enabled
) {
1370 has_iotlb_device
= true;
1374 domain
->has_iotlb_device
= has_iotlb_device
;
1375 spin_unlock_irqrestore(&domain
->lock
, flags
);
1379 * The extra devTLB flush quirk impacts those QAT devices with PCI device
1380 * IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
1381 * check because it applies only to the built-in QAT devices and it doesn't
1382 * grant additional privileges.
1384 #define BUGGY_QAT_DEVID_MASK 0x4940
1385 static bool dev_needs_extra_dtlb_flush(struct pci_dev
*pdev
)
1387 if (pdev
->vendor
!= PCI_VENDOR_ID_INTEL
)
1390 if ((pdev
->device
& 0xfffc) != BUGGY_QAT_DEVID_MASK
)
1396 static void iommu_enable_pci_caps(struct device_domain_info
*info
)
1398 struct pci_dev
*pdev
;
1400 if (!dev_is_pci(info
->dev
))
1403 pdev
= to_pci_dev(info
->dev
);
1405 /* The PCIe spec, in its wisdom, declares that the behaviour of
1406 the device if you enable PASID support after ATS support is
1407 undefined. So always enable PASID support on devices which
1408 have it, even if we can't yet know if we're ever going to
1410 if (info
->pasid_supported
&& !pci_enable_pasid(pdev
, info
->pasid_supported
& ~1))
1411 info
->pasid_enabled
= 1;
1413 if (info
->ats_supported
&& pci_ats_page_aligned(pdev
) &&
1414 !pci_enable_ats(pdev
, VTD_PAGE_SHIFT
)) {
1415 info
->ats_enabled
= 1;
1416 domain_update_iotlb(info
->domain
);
1420 static void iommu_disable_pci_caps(struct device_domain_info
*info
)
1422 struct pci_dev
*pdev
;
1424 if (!dev_is_pci(info
->dev
))
1427 pdev
= to_pci_dev(info
->dev
);
1429 if (info
->ats_enabled
) {
1430 pci_disable_ats(pdev
);
1431 info
->ats_enabled
= 0;
1432 domain_update_iotlb(info
->domain
);
1435 if (info
->pasid_enabled
) {
1436 pci_disable_pasid(pdev
);
1437 info
->pasid_enabled
= 0;
1441 static void __iommu_flush_dev_iotlb(struct device_domain_info
*info
,
1442 u64 addr
, unsigned int mask
)
1446 if (!info
|| !info
->ats_enabled
)
1449 sid
= info
->bus
<< 8 | info
->devfn
;
1450 qdep
= info
->ats_qdep
;
1451 qi_flush_dev_iotlb(info
->iommu
, sid
, info
->pfsid
,
1453 quirk_extra_dev_tlb_flush(info
, addr
, mask
, PASID_RID2PASID
, qdep
);
1456 static void iommu_flush_dev_iotlb(struct dmar_domain
*domain
,
1457 u64 addr
, unsigned mask
)
1459 struct device_domain_info
*info
;
1460 unsigned long flags
;
1462 if (!domain
->has_iotlb_device
)
1465 spin_lock_irqsave(&domain
->lock
, flags
);
1466 list_for_each_entry(info
, &domain
->devices
, link
)
1467 __iommu_flush_dev_iotlb(info
, addr
, mask
);
1468 spin_unlock_irqrestore(&domain
->lock
, flags
);
1471 static void iommu_flush_iotlb_psi(struct intel_iommu
*iommu
,
1472 struct dmar_domain
*domain
,
1473 unsigned long pfn
, unsigned int pages
,
1476 unsigned int aligned_pages
= __roundup_pow_of_two(pages
);
1477 unsigned int mask
= ilog2(aligned_pages
);
1478 uint64_t addr
= (uint64_t)pfn
<< VTD_PAGE_SHIFT
;
1479 u16 did
= domain_id_iommu(domain
, iommu
);
1481 if (WARN_ON(!pages
))
1487 if (domain
->use_first_level
) {
1488 qi_flush_piotlb(iommu
, did
, PASID_RID2PASID
, addr
, pages
, ih
);
1490 unsigned long bitmask
= aligned_pages
- 1;
1493 * PSI masks the low order bits of the base address. If the
1494 * address isn't aligned to the mask, then compute a mask value
1495 * needed to ensure the target range is flushed.
1497 if (unlikely(bitmask
& pfn
)) {
1498 unsigned long end_pfn
= pfn
+ pages
- 1, shared_bits
;
1501 * Since end_pfn <= pfn + bitmask, the only way bits
1502 * higher than bitmask can differ in pfn and end_pfn is
1503 * by carrying. This means after masking out bitmask,
1504 * high bits starting with the first set bit in
1505 * shared_bits are all equal in both pfn and end_pfn.
1507 shared_bits
= ~(pfn
^ end_pfn
) & ~bitmask
;
1508 mask
= shared_bits
? __ffs(shared_bits
) : BITS_PER_LONG
;
1512 * Fallback to domain selective flush if no PSI support or
1513 * the size is too big.
1515 if (!cap_pgsel_inv(iommu
->cap
) ||
1516 mask
> cap_max_amask_val(iommu
->cap
))
1517 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1520 iommu
->flush
.flush_iotlb(iommu
, did
, addr
| ih
, mask
,
1525 * In caching mode, changes of pages from non-present to present require
1526 * flush. However, device IOTLB doesn't need to be flushed in this case.
1528 if (!cap_caching_mode(iommu
->cap
) || !map
)
1529 iommu_flush_dev_iotlb(domain
, addr
, mask
);
1532 /* Notification for newly created mappings */
1533 static inline void __mapping_notify_one(struct intel_iommu
*iommu
,
1534 struct dmar_domain
*domain
,
1535 unsigned long pfn
, unsigned int pages
)
1538 * It's a non-present to present mapping. Only flush if caching mode
1541 if (cap_caching_mode(iommu
->cap
) && !domain
->use_first_level
)
1542 iommu_flush_iotlb_psi(iommu
, domain
, pfn
, pages
, 0, 1);
1544 iommu_flush_write_buffer(iommu
);
1547 static void intel_flush_iotlb_all(struct iommu_domain
*domain
)
1549 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
1550 struct iommu_domain_info
*info
;
1553 xa_for_each(&dmar_domain
->iommu_array
, idx
, info
) {
1554 struct intel_iommu
*iommu
= info
->iommu
;
1555 u16 did
= domain_id_iommu(dmar_domain
, iommu
);
1557 if (dmar_domain
->use_first_level
)
1558 qi_flush_piotlb(iommu
, did
, PASID_RID2PASID
, 0, -1, 0);
1560 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0,
1563 if (!cap_caching_mode(iommu
->cap
))
1564 iommu_flush_dev_iotlb(dmar_domain
, 0, MAX_AGAW_PFN_WIDTH
);
1568 static void iommu_disable_protect_mem_regions(struct intel_iommu
*iommu
)
1571 unsigned long flags
;
1573 if (!cap_plmr(iommu
->cap
) && !cap_phmr(iommu
->cap
))
1576 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1577 pmen
= readl(iommu
->reg
+ DMAR_PMEN_REG
);
1578 pmen
&= ~DMA_PMEN_EPM
;
1579 writel(pmen
, iommu
->reg
+ DMAR_PMEN_REG
);
1581 /* wait for the protected region status bit to clear */
1582 IOMMU_WAIT_OP(iommu
, DMAR_PMEN_REG
,
1583 readl
, !(pmen
& DMA_PMEN_PRS
), pmen
);
1585 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1588 static void iommu_enable_translation(struct intel_iommu
*iommu
)
1591 unsigned long flags
;
1593 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
1594 iommu
->gcmd
|= DMA_GCMD_TE
;
1595 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1597 /* Make sure hardware complete it */
1598 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1599 readl
, (sts
& DMA_GSTS_TES
), sts
);
1601 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
1604 static void iommu_disable_translation(struct intel_iommu
*iommu
)
1609 if (iommu_skip_te_disable
&& iommu
->drhd
->gfx_dedicated
&&
1610 (cap_read_drain(iommu
->cap
) || cap_write_drain(iommu
->cap
)))
1613 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
1614 iommu
->gcmd
&= ~DMA_GCMD_TE
;
1615 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
1617 /* Make sure hardware complete it */
1618 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
1619 readl
, (!(sts
& DMA_GSTS_TES
)), sts
);
1621 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
1624 static int iommu_init_domains(struct intel_iommu
*iommu
)
1628 ndomains
= cap_ndoms(iommu
->cap
);
1629 pr_debug("%s: Number of Domains supported <%d>\n",
1630 iommu
->name
, ndomains
);
1632 spin_lock_init(&iommu
->lock
);
1634 iommu
->domain_ids
= bitmap_zalloc(ndomains
, GFP_KERNEL
);
1635 if (!iommu
->domain_ids
)
1639 * If Caching mode is set, then invalid translations are tagged
1640 * with domain-id 0, hence we need to pre-allocate it. We also
1641 * use domain-id 0 as a marker for non-allocated domain-id, so
1642 * make sure it is not used for a real domain.
1644 set_bit(0, iommu
->domain_ids
);
1647 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1648 * entry for first-level or pass-through translation modes should
1649 * be programmed with a domain id different from those used for
1650 * second-level or nested translation. We reserve a domain id for
1653 if (sm_supported(iommu
))
1654 set_bit(FLPT_DEFAULT_DID
, iommu
->domain_ids
);
1659 static void disable_dmar_iommu(struct intel_iommu
*iommu
)
1661 if (!iommu
->domain_ids
)
1665 * All iommu domains must have been detached from the devices,
1666 * hence there should be no domain IDs in use.
1668 if (WARN_ON(bitmap_weight(iommu
->domain_ids
, cap_ndoms(iommu
->cap
))
1669 > NUM_RESERVED_DID
))
1672 if (iommu
->gcmd
& DMA_GCMD_TE
)
1673 iommu_disable_translation(iommu
);
1676 static void free_dmar_iommu(struct intel_iommu
*iommu
)
1678 if (iommu
->domain_ids
) {
1679 bitmap_free(iommu
->domain_ids
);
1680 iommu
->domain_ids
= NULL
;
1683 if (iommu
->copied_tables
) {
1684 bitmap_free(iommu
->copied_tables
);
1685 iommu
->copied_tables
= NULL
;
1688 /* free context mapping */
1689 free_context_table(iommu
);
1691 #ifdef CONFIG_INTEL_IOMMU_SVM
1692 if (pasid_supported(iommu
)) {
1693 if (ecap_prs(iommu
->ecap
))
1694 intel_svm_finish_prq(iommu
);
1700 * Check and return whether first level is used by default for
1703 static bool first_level_by_default(unsigned int type
)
1705 /* Only SL is available in legacy mode */
1706 if (!scalable_mode_support())
1709 /* Only level (either FL or SL) is available, just use it */
1710 if (intel_cap_flts_sanity() ^ intel_cap_slts_sanity())
1711 return intel_cap_flts_sanity();
1713 /* Both levels are available, decide it based on domain type */
1714 return type
!= IOMMU_DOMAIN_UNMANAGED
;
1717 static struct dmar_domain
*alloc_domain(unsigned int type
)
1719 struct dmar_domain
*domain
;
1721 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
1725 domain
->nid
= NUMA_NO_NODE
;
1726 if (first_level_by_default(type
))
1727 domain
->use_first_level
= true;
1728 domain
->has_iotlb_device
= false;
1729 INIT_LIST_HEAD(&domain
->devices
);
1730 spin_lock_init(&domain
->lock
);
1731 xa_init(&domain
->iommu_array
);
1736 static int domain_attach_iommu(struct dmar_domain
*domain
,
1737 struct intel_iommu
*iommu
)
1739 struct iommu_domain_info
*info
, *curr
;
1740 unsigned long ndomains
;
1741 int num
, ret
= -ENOSPC
;
1743 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1747 spin_lock(&iommu
->lock
);
1748 curr
= xa_load(&domain
->iommu_array
, iommu
->seq_id
);
1751 spin_unlock(&iommu
->lock
);
1756 ndomains
= cap_ndoms(iommu
->cap
);
1757 num
= find_first_zero_bit(iommu
->domain_ids
, ndomains
);
1758 if (num
>= ndomains
) {
1759 pr_err("%s: No free domain ids\n", iommu
->name
);
1763 set_bit(num
, iommu
->domain_ids
);
1766 info
->iommu
= iommu
;
1767 curr
= xa_cmpxchg(&domain
->iommu_array
, iommu
->seq_id
,
1768 NULL
, info
, GFP_ATOMIC
);
1770 ret
= xa_err(curr
) ? : -EBUSY
;
1773 domain_update_iommu_cap(domain
);
1775 spin_unlock(&iommu
->lock
);
1779 clear_bit(info
->did
, iommu
->domain_ids
);
1781 spin_unlock(&iommu
->lock
);
1786 static void domain_detach_iommu(struct dmar_domain
*domain
,
1787 struct intel_iommu
*iommu
)
1789 struct iommu_domain_info
*info
;
1791 spin_lock(&iommu
->lock
);
1792 info
= xa_load(&domain
->iommu_array
, iommu
->seq_id
);
1793 if (--info
->refcnt
== 0) {
1794 clear_bit(info
->did
, iommu
->domain_ids
);
1795 xa_erase(&domain
->iommu_array
, iommu
->seq_id
);
1796 domain
->nid
= NUMA_NO_NODE
;
1797 domain_update_iommu_cap(domain
);
1800 spin_unlock(&iommu
->lock
);
1803 static inline int guestwidth_to_adjustwidth(int gaw
)
1806 int r
= (gaw
- 12) % 9;
1817 static void domain_exit(struct dmar_domain
*domain
)
1820 LIST_HEAD(freelist
);
1822 domain_unmap(domain
, 0, DOMAIN_MAX_PFN(domain
->gaw
), &freelist
);
1823 put_pages_list(&freelist
);
1826 if (WARN_ON(!list_empty(&domain
->devices
)))
1833 * Get the PASID directory size for scalable mode context entry.
1834 * Value of X in the PDTS field of a scalable mode context entry
1835 * indicates PASID directory with 2^(X + 7) entries.
1837 static inline unsigned long context_get_sm_pds(struct pasid_table
*table
)
1839 unsigned long pds
, max_pde
;
1841 max_pde
= table
->max_pasid
>> PASID_PDE_SHIFT
;
1842 pds
= find_first_bit(&max_pde
, MAX_NR_PASID_BITS
);
1850 * Set the RID_PASID field of a scalable mode context entry. The
1851 * IOMMU hardware will use the PASID value set in this field for
1852 * DMA translations of DMA requests without PASID.
1855 context_set_sm_rid2pasid(struct context_entry
*context
, unsigned long pasid
)
1857 context
->hi
|= pasid
& ((1 << 20) - 1);
1861 * Set the DTE(Device-TLB Enable) field of a scalable mode context
1864 static inline void context_set_sm_dte(struct context_entry
*context
)
1866 context
->lo
|= BIT_ULL(2);
1870 * Set the PRE(Page Request Enable) field of a scalable mode context
1873 static inline void context_set_sm_pre(struct context_entry
*context
)
1875 context
->lo
|= BIT_ULL(4);
1878 /* Convert value to context PASID directory size field coding. */
1879 #define context_pdts(pds) (((pds) & 0x7) << 9)
1881 static int domain_context_mapping_one(struct dmar_domain
*domain
,
1882 struct intel_iommu
*iommu
,
1883 struct pasid_table
*table
,
1886 struct device_domain_info
*info
=
1887 domain_lookup_dev_info(domain
, iommu
, bus
, devfn
);
1888 u16 did
= domain_id_iommu(domain
, iommu
);
1889 int translation
= CONTEXT_TT_MULTI_LEVEL
;
1890 struct context_entry
*context
;
1893 if (hw_pass_through
&& domain_type_is_si(domain
))
1894 translation
= CONTEXT_TT_PASS_THROUGH
;
1896 pr_debug("Set context mapping for %02x:%02x.%d\n",
1897 bus
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
1899 spin_lock(&iommu
->lock
);
1901 context
= iommu_context_addr(iommu
, bus
, devfn
, 1);
1906 if (context_present(context
) && !context_copied(iommu
, bus
, devfn
))
1910 * For kdump cases, old valid entries may be cached due to the
1911 * in-flight DMA and copied pgtable, but there is no unmapping
1912 * behaviour for them, thus we need an explicit cache flush for
1913 * the newly-mapped device. For kdump, at this point, the device
1914 * is supposed to finish reset at its driver probe stage, so no
1915 * in-flight DMA will exist, and we don't need to worry anymore
1918 if (context_copied(iommu
, bus
, devfn
)) {
1919 u16 did_old
= context_domain_id(context
);
1921 if (did_old
< cap_ndoms(iommu
->cap
)) {
1922 iommu
->flush
.flush_context(iommu
, did_old
,
1923 (((u16
)bus
) << 8) | devfn
,
1924 DMA_CCMD_MASK_NOBIT
,
1925 DMA_CCMD_DEVICE_INVL
);
1926 iommu
->flush
.flush_iotlb(iommu
, did_old
, 0, 0,
1930 clear_context_copied(iommu
, bus
, devfn
);
1933 context_clear_entry(context
);
1935 if (sm_supported(iommu
)) {
1938 /* Setup the PASID DIR pointer: */
1939 pds
= context_get_sm_pds(table
);
1940 context
->lo
= (u64
)virt_to_phys(table
->table
) |
1943 /* Setup the RID_PASID field: */
1944 context_set_sm_rid2pasid(context
, PASID_RID2PASID
);
1947 * Setup the Device-TLB enable bit and Page request
1950 if (info
&& info
->ats_supported
)
1951 context_set_sm_dte(context
);
1952 if (info
&& info
->pri_supported
)
1953 context_set_sm_pre(context
);
1954 if (info
&& info
->pasid_supported
)
1955 context_set_pasid(context
);
1957 struct dma_pte
*pgd
= domain
->pgd
;
1960 context_set_domain_id(context
, did
);
1962 if (translation
!= CONTEXT_TT_PASS_THROUGH
) {
1964 * Skip top levels of page tables for iommu which has
1965 * less agaw than default. Unnecessary for PT mode.
1967 for (agaw
= domain
->agaw
; agaw
> iommu
->agaw
; agaw
--) {
1969 pgd
= phys_to_virt(dma_pte_addr(pgd
));
1970 if (!dma_pte_present(pgd
))
1974 if (info
&& info
->ats_supported
)
1975 translation
= CONTEXT_TT_DEV_IOTLB
;
1977 translation
= CONTEXT_TT_MULTI_LEVEL
;
1979 context_set_address_root(context
, virt_to_phys(pgd
));
1980 context_set_address_width(context
, agaw
);
1983 * In pass through mode, AW must be programmed to
1984 * indicate the largest AGAW value supported by
1985 * hardware. And ASR is ignored by hardware.
1987 context_set_address_width(context
, iommu
->msagaw
);
1990 context_set_translation_type(context
, translation
);
1993 context_set_fault_enable(context
);
1994 context_set_present(context
);
1995 if (!ecap_coherent(iommu
->ecap
))
1996 clflush_cache_range(context
, sizeof(*context
));
1999 * It's a non-present to present mapping. If hardware doesn't cache
2000 * non-present entry we only need to flush the write-buffer. If the
2001 * _does_ cache non-present entries, then it does so in the special
2002 * domain #0, which we have to flush:
2004 if (cap_caching_mode(iommu
->cap
)) {
2005 iommu
->flush
.flush_context(iommu
, 0,
2006 (((u16
)bus
) << 8) | devfn
,
2007 DMA_CCMD_MASK_NOBIT
,
2008 DMA_CCMD_DEVICE_INVL
);
2009 iommu
->flush
.flush_iotlb(iommu
, did
, 0, 0, DMA_TLB_DSI_FLUSH
);
2011 iommu_flush_write_buffer(iommu
);
2017 spin_unlock(&iommu
->lock
);
2022 struct domain_context_mapping_data
{
2023 struct dmar_domain
*domain
;
2024 struct intel_iommu
*iommu
;
2025 struct pasid_table
*table
;
2028 static int domain_context_mapping_cb(struct pci_dev
*pdev
,
2029 u16 alias
, void *opaque
)
2031 struct domain_context_mapping_data
*data
= opaque
;
2033 return domain_context_mapping_one(data
->domain
, data
->iommu
,
2034 data
->table
, PCI_BUS_NUM(alias
),
2039 domain_context_mapping(struct dmar_domain
*domain
, struct device
*dev
)
2041 struct domain_context_mapping_data data
;
2042 struct pasid_table
*table
;
2043 struct intel_iommu
*iommu
;
2046 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2050 table
= intel_pasid_get_table(dev
);
2052 if (!dev_is_pci(dev
))
2053 return domain_context_mapping_one(domain
, iommu
, table
,
2056 data
.domain
= domain
;
2060 return pci_for_each_dma_alias(to_pci_dev(dev
),
2061 &domain_context_mapping_cb
, &data
);
2064 /* Returns a number of VTD pages, but aligned to MM page size */
2065 static inline unsigned long aligned_nrpages(unsigned long host_addr
,
2068 host_addr
&= ~PAGE_MASK
;
2069 return PAGE_ALIGN(host_addr
+ size
) >> VTD_PAGE_SHIFT
;
2072 /* Return largest possible superpage level for a given mapping */
2073 static inline int hardware_largepage_caps(struct dmar_domain
*domain
,
2074 unsigned long iov_pfn
,
2075 unsigned long phy_pfn
,
2076 unsigned long pages
)
2078 int support
, level
= 1;
2079 unsigned long pfnmerge
;
2081 support
= domain
->iommu_superpage
;
2083 /* To use a large page, the virtual *and* physical addresses
2084 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2085 of them will mean we have to use smaller pages. So just
2086 merge them and check both at once. */
2087 pfnmerge
= iov_pfn
| phy_pfn
;
2089 while (support
&& !(pfnmerge
& ~VTD_STRIDE_MASK
)) {
2090 pages
>>= VTD_STRIDE_SHIFT
;
2093 pfnmerge
>>= VTD_STRIDE_SHIFT
;
2101 * Ensure that old small page tables are removed to make room for superpage(s).
2102 * We're going to add new large pages, so make sure we don't remove their parent
2103 * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
2105 static void switch_to_super_page(struct dmar_domain
*domain
,
2106 unsigned long start_pfn
,
2107 unsigned long end_pfn
, int level
)
2109 unsigned long lvl_pages
= lvl_to_nr_pages(level
);
2110 struct iommu_domain_info
*info
;
2111 struct dma_pte
*pte
= NULL
;
2114 while (start_pfn
<= end_pfn
) {
2116 pte
= pfn_to_dma_pte(domain
, start_pfn
, &level
,
2119 if (dma_pte_present(pte
)) {
2120 dma_pte_free_pagetable(domain
, start_pfn
,
2121 start_pfn
+ lvl_pages
- 1,
2124 xa_for_each(&domain
->iommu_array
, i
, info
)
2125 iommu_flush_iotlb_psi(info
->iommu
, domain
,
2126 start_pfn
, lvl_pages
,
2131 start_pfn
+= lvl_pages
;
2132 if (first_pte_in_page(pte
))
2138 __domain_mapping(struct dmar_domain
*domain
, unsigned long iov_pfn
,
2139 unsigned long phys_pfn
, unsigned long nr_pages
, int prot
,
2142 struct dma_pte
*first_pte
= NULL
, *pte
= NULL
;
2143 unsigned int largepage_lvl
= 0;
2144 unsigned long lvl_pages
= 0;
2148 if (unlikely(!domain_pfn_supported(domain
, iov_pfn
+ nr_pages
- 1)))
2151 if ((prot
& (DMA_PTE_READ
|DMA_PTE_WRITE
)) == 0)
2154 attr
= prot
& (DMA_PTE_READ
| DMA_PTE_WRITE
| DMA_PTE_SNP
);
2155 attr
|= DMA_FL_PTE_PRESENT
;
2156 if (domain
->use_first_level
) {
2157 attr
|= DMA_FL_PTE_XD
| DMA_FL_PTE_US
| DMA_FL_PTE_ACCESS
;
2158 if (prot
& DMA_PTE_WRITE
)
2159 attr
|= DMA_FL_PTE_DIRTY
;
2162 pteval
= ((phys_addr_t
)phys_pfn
<< VTD_PAGE_SHIFT
) | attr
;
2164 while (nr_pages
> 0) {
2168 largepage_lvl
= hardware_largepage_caps(domain
, iov_pfn
,
2169 phys_pfn
, nr_pages
);
2171 pte
= pfn_to_dma_pte(domain
, iov_pfn
, &largepage_lvl
,
2177 lvl_pages
= lvl_to_nr_pages(largepage_lvl
);
2179 /* It is large page*/
2180 if (largepage_lvl
> 1) {
2181 unsigned long end_pfn
;
2182 unsigned long pages_to_remove
;
2184 pteval
|= DMA_PTE_LARGE_PAGE
;
2185 pages_to_remove
= min_t(unsigned long, nr_pages
,
2186 nr_pte_to_next_page(pte
) * lvl_pages
);
2187 end_pfn
= iov_pfn
+ pages_to_remove
- 1;
2188 switch_to_super_page(domain
, iov_pfn
, end_pfn
, largepage_lvl
);
2190 pteval
&= ~(uint64_t)DMA_PTE_LARGE_PAGE
;
2194 /* We don't need lock here, nobody else
2195 * touches the iova range
2197 tmp
= cmpxchg64_local(&pte
->val
, 0ULL, pteval
);
2199 static int dumps
= 5;
2200 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2201 iov_pfn
, tmp
, (unsigned long long)pteval
);
2204 debug_dma_dump_mappings(NULL
);
2209 nr_pages
-= lvl_pages
;
2210 iov_pfn
+= lvl_pages
;
2211 phys_pfn
+= lvl_pages
;
2212 pteval
+= lvl_pages
* VTD_PAGE_SIZE
;
2214 /* If the next PTE would be the first in a new page, then we
2215 * need to flush the cache on the entries we've just written.
2216 * And then we'll need to recalculate 'pte', so clear it and
2217 * let it get set again in the if (!pte) block above.
2219 * If we're done (!nr_pages) we need to flush the cache too.
2221 * Also if we've been setting superpages, we may need to
2222 * recalculate 'pte' and switch back to smaller pages for the
2223 * end of the mapping, if the trailing size is not enough to
2224 * use another superpage (i.e. nr_pages < lvl_pages).
2227 if (!nr_pages
|| first_pte_in_page(pte
) ||
2228 (largepage_lvl
> 1 && nr_pages
< lvl_pages
)) {
2229 domain_flush_cache(domain
, first_pte
,
2230 (void *)pte
- (void *)first_pte
);
2238 static void domain_context_clear_one(struct device_domain_info
*info
, u8 bus
, u8 devfn
)
2240 struct intel_iommu
*iommu
= info
->iommu
;
2241 struct context_entry
*context
;
2247 spin_lock(&iommu
->lock
);
2248 context
= iommu_context_addr(iommu
, bus
, devfn
, 0);
2250 spin_unlock(&iommu
->lock
);
2254 if (sm_supported(iommu
)) {
2255 if (hw_pass_through
&& domain_type_is_si(info
->domain
))
2256 did_old
= FLPT_DEFAULT_DID
;
2258 did_old
= domain_id_iommu(info
->domain
, iommu
);
2260 did_old
= context_domain_id(context
);
2263 context_clear_entry(context
);
2264 __iommu_flush_cache(iommu
, context
, sizeof(*context
));
2265 spin_unlock(&iommu
->lock
);
2266 iommu
->flush
.flush_context(iommu
,
2268 (((u16
)bus
) << 8) | devfn
,
2269 DMA_CCMD_MASK_NOBIT
,
2270 DMA_CCMD_DEVICE_INVL
);
2272 if (sm_supported(iommu
))
2273 qi_flush_pasid_cache(iommu
, did_old
, QI_PC_ALL_PASIDS
, 0);
2275 iommu
->flush
.flush_iotlb(iommu
,
2281 __iommu_flush_dev_iotlb(info
, 0, MAX_AGAW_PFN_WIDTH
);
2284 static int domain_setup_first_level(struct intel_iommu
*iommu
,
2285 struct dmar_domain
*domain
,
2289 struct dma_pte
*pgd
= domain
->pgd
;
2294 * Skip top levels of page tables for iommu which has
2295 * less agaw than default. Unnecessary for PT mode.
2297 for (agaw
= domain
->agaw
; agaw
> iommu
->agaw
; agaw
--) {
2298 pgd
= phys_to_virt(dma_pte_addr(pgd
));
2299 if (!dma_pte_present(pgd
))
2303 level
= agaw_to_level(agaw
);
2304 if (level
!= 4 && level
!= 5)
2308 flags
|= PASID_FLAG_FL5LP
;
2310 if (domain
->force_snooping
)
2311 flags
|= PASID_FLAG_PAGE_SNOOP
;
2313 return intel_pasid_setup_first_level(iommu
, dev
, (pgd_t
*)pgd
, pasid
,
2314 domain_id_iommu(domain
, iommu
),
2318 static bool dev_is_real_dma_subdevice(struct device
*dev
)
2320 return dev
&& dev_is_pci(dev
) &&
2321 pci_real_dma_dev(to_pci_dev(dev
)) != to_pci_dev(dev
);
2324 static int iommu_domain_identity_map(struct dmar_domain
*domain
,
2325 unsigned long first_vpfn
,
2326 unsigned long last_vpfn
)
2329 * RMRR range might have overlap with physical memory range,
2332 dma_pte_clear_range(domain
, first_vpfn
, last_vpfn
);
2334 return __domain_mapping(domain
, first_vpfn
,
2335 first_vpfn
, last_vpfn
- first_vpfn
+ 1,
2336 DMA_PTE_READ
|DMA_PTE_WRITE
, GFP_KERNEL
);
2339 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
);
2341 static int __init
si_domain_init(int hw
)
2343 struct dmar_rmrr_unit
*rmrr
;
2347 si_domain
= alloc_domain(IOMMU_DOMAIN_IDENTITY
);
2351 if (md_domain_init(si_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
2352 domain_exit(si_domain
);
2360 for_each_online_node(nid
) {
2361 unsigned long start_pfn
, end_pfn
;
2364 for_each_mem_pfn_range(i
, nid
, &start_pfn
, &end_pfn
, NULL
) {
2365 ret
= iommu_domain_identity_map(si_domain
,
2366 mm_to_dma_pfn(start_pfn
),
2367 mm_to_dma_pfn(end_pfn
));
2374 * Identity map the RMRRs so that devices with RMRRs could also use
2377 for_each_rmrr_units(rmrr
) {
2378 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
2380 unsigned long long start
= rmrr
->base_address
;
2381 unsigned long long end
= rmrr
->end_address
;
2383 if (WARN_ON(end
< start
||
2384 end
>> agaw_to_width(si_domain
->agaw
)))
2387 ret
= iommu_domain_identity_map(si_domain
,
2388 mm_to_dma_pfn(start
>> PAGE_SHIFT
),
2389 mm_to_dma_pfn(end
>> PAGE_SHIFT
));
2398 static int dmar_domain_attach_device(struct dmar_domain
*domain
,
2401 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
2402 struct intel_iommu
*iommu
;
2403 unsigned long flags
;
2407 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
2411 ret
= domain_attach_iommu(domain
, iommu
);
2414 info
->domain
= domain
;
2415 spin_lock_irqsave(&domain
->lock
, flags
);
2416 list_add(&info
->link
, &domain
->devices
);
2417 spin_unlock_irqrestore(&domain
->lock
, flags
);
2419 /* PASID table is mandatory for a PCI device in scalable mode. */
2420 if (sm_supported(iommu
) && !dev_is_real_dma_subdevice(dev
)) {
2421 /* Setup the PASID entry for requests without PASID: */
2422 if (hw_pass_through
&& domain_type_is_si(domain
))
2423 ret
= intel_pasid_setup_pass_through(iommu
, domain
,
2424 dev
, PASID_RID2PASID
);
2425 else if (domain
->use_first_level
)
2426 ret
= domain_setup_first_level(iommu
, domain
, dev
,
2429 ret
= intel_pasid_setup_second_level(iommu
, domain
,
2430 dev
, PASID_RID2PASID
);
2432 dev_err(dev
, "Setup RID2PASID failed\n");
2433 device_block_translation(dev
);
2438 ret
= domain_context_mapping(domain
, dev
);
2440 dev_err(dev
, "Domain context map failed\n");
2441 device_block_translation(dev
);
2445 iommu_enable_pci_caps(info
);
2450 static bool device_has_rmrr(struct device
*dev
)
2452 struct dmar_rmrr_unit
*rmrr
;
2457 for_each_rmrr_units(rmrr
) {
2459 * Return TRUE if this RMRR contains the device that
2462 for_each_active_dev_scope(rmrr
->devices
,
2463 rmrr
->devices_cnt
, i
, tmp
)
2465 is_downstream_to_pci_bridge(dev
, tmp
)) {
2475 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2476 * is relaxable (ie. is allowed to be not enforced under some conditions)
2477 * @dev: device handle
2479 * We assume that PCI USB devices with RMRRs have them largely
2480 * for historical reasons and that the RMRR space is not actively used post
2481 * boot. This exclusion may change if vendors begin to abuse it.
2483 * The same exception is made for graphics devices, with the requirement that
2484 * any use of the RMRR regions will be torn down before assigning the device
2487 * Return: true if the RMRR is relaxable, false otherwise
2489 static bool device_rmrr_is_relaxable(struct device
*dev
)
2491 struct pci_dev
*pdev
;
2493 if (!dev_is_pci(dev
))
2496 pdev
= to_pci_dev(dev
);
2497 if (IS_USB_DEVICE(pdev
) || IS_GFX_DEVICE(pdev
))
2504 * There are a couple cases where we need to restrict the functionality of
2505 * devices associated with RMRRs. The first is when evaluating a device for
2506 * identity mapping because problems exist when devices are moved in and out
2507 * of domains and their respective RMRR information is lost. This means that
2508 * a device with associated RMRRs will never be in a "passthrough" domain.
2509 * The second is use of the device through the IOMMU API. This interface
2510 * expects to have full control of the IOVA space for the device. We cannot
2511 * satisfy both the requirement that RMRR access is maintained and have an
2512 * unencumbered IOVA space. We also have no ability to quiesce the device's
2513 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2514 * We therefore prevent devices associated with an RMRR from participating in
2515 * the IOMMU API, which eliminates them from device assignment.
2517 * In both cases, devices which have relaxable RMRRs are not concerned by this
2518 * restriction. See device_rmrr_is_relaxable comment.
2520 static bool device_is_rmrr_locked(struct device
*dev
)
2522 if (!device_has_rmrr(dev
))
2525 if (device_rmrr_is_relaxable(dev
))
2532 * Return the required default domain type for a specific device.
2534 * @dev: the device in query
2535 * @startup: true if this is during early boot
2538 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2539 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2540 * - 0: both identity and dynamic domains work for this device
2542 static int device_def_domain_type(struct device
*dev
)
2544 if (dev_is_pci(dev
)) {
2545 struct pci_dev
*pdev
= to_pci_dev(dev
);
2547 if ((iommu_identity_mapping
& IDENTMAP_AZALIA
) && IS_AZALIA(pdev
))
2548 return IOMMU_DOMAIN_IDENTITY
;
2550 if ((iommu_identity_mapping
& IDENTMAP_GFX
) && IS_GFX_DEVICE(pdev
))
2551 return IOMMU_DOMAIN_IDENTITY
;
2557 static void intel_iommu_init_qi(struct intel_iommu
*iommu
)
2560 * Start from the sane iommu hardware state.
2561 * If the queued invalidation is already initialized by us
2562 * (for example, while enabling interrupt-remapping) then
2563 * we got the things already rolling from a sane state.
2567 * Clear any previous faults.
2569 dmar_fault(-1, iommu
);
2571 * Disable queued invalidation if supported and already enabled
2572 * before OS handover.
2574 dmar_disable_qi(iommu
);
2577 if (dmar_enable_qi(iommu
)) {
2579 * Queued Invalidate not enabled, use Register Based Invalidate
2581 iommu
->flush
.flush_context
= __iommu_flush_context
;
2582 iommu
->flush
.flush_iotlb
= __iommu_flush_iotlb
;
2583 pr_info("%s: Using Register based invalidation\n",
2586 iommu
->flush
.flush_context
= qi_flush_context
;
2587 iommu
->flush
.flush_iotlb
= qi_flush_iotlb
;
2588 pr_info("%s: Using Queued invalidation\n", iommu
->name
);
2592 static int copy_context_table(struct intel_iommu
*iommu
,
2593 struct root_entry
*old_re
,
2594 struct context_entry
**tbl
,
2597 int tbl_idx
, pos
= 0, idx
, devfn
, ret
= 0, did
;
2598 struct context_entry
*new_ce
= NULL
, ce
;
2599 struct context_entry
*old_ce
= NULL
;
2600 struct root_entry re
;
2601 phys_addr_t old_ce_phys
;
2603 tbl_idx
= ext
? bus
* 2 : bus
;
2604 memcpy(&re
, old_re
, sizeof(re
));
2606 for (devfn
= 0; devfn
< 256; devfn
++) {
2607 /* First calculate the correct index */
2608 idx
= (ext
? devfn
* 2 : devfn
) % 256;
2611 /* First save what we may have and clean up */
2613 tbl
[tbl_idx
] = new_ce
;
2614 __iommu_flush_cache(iommu
, new_ce
,
2624 old_ce_phys
= root_entry_lctp(&re
);
2626 old_ce_phys
= root_entry_uctp(&re
);
2629 if (ext
&& devfn
== 0) {
2630 /* No LCTP, try UCTP */
2639 old_ce
= memremap(old_ce_phys
, PAGE_SIZE
,
2644 new_ce
= alloc_pgtable_page(iommu
->node
, GFP_KERNEL
);
2651 /* Now copy the context entry */
2652 memcpy(&ce
, old_ce
+ idx
, sizeof(ce
));
2654 if (!context_present(&ce
))
2657 did
= context_domain_id(&ce
);
2658 if (did
>= 0 && did
< cap_ndoms(iommu
->cap
))
2659 set_bit(did
, iommu
->domain_ids
);
2661 set_context_copied(iommu
, bus
, devfn
);
2665 tbl
[tbl_idx
+ pos
] = new_ce
;
2667 __iommu_flush_cache(iommu
, new_ce
, VTD_PAGE_SIZE
);
2676 static int copy_translation_tables(struct intel_iommu
*iommu
)
2678 struct context_entry
**ctxt_tbls
;
2679 struct root_entry
*old_rt
;
2680 phys_addr_t old_rt_phys
;
2681 int ctxt_table_entries
;
2686 rtaddr_reg
= dmar_readq(iommu
->reg
+ DMAR_RTADDR_REG
);
2687 ext
= !!(rtaddr_reg
& DMA_RTADDR_SMT
);
2688 new_ext
= !!sm_supported(iommu
);
2691 * The RTT bit can only be changed when translation is disabled,
2692 * but disabling translation means to open a window for data
2693 * corruption. So bail out and don't copy anything if we would
2694 * have to change the bit.
2699 iommu
->copied_tables
= bitmap_zalloc(BIT_ULL(16), GFP_KERNEL
);
2700 if (!iommu
->copied_tables
)
2703 old_rt_phys
= rtaddr_reg
& VTD_PAGE_MASK
;
2707 old_rt
= memremap(old_rt_phys
, PAGE_SIZE
, MEMREMAP_WB
);
2711 /* This is too big for the stack - allocate it from slab */
2712 ctxt_table_entries
= ext
? 512 : 256;
2714 ctxt_tbls
= kcalloc(ctxt_table_entries
, sizeof(void *), GFP_KERNEL
);
2718 for (bus
= 0; bus
< 256; bus
++) {
2719 ret
= copy_context_table(iommu
, &old_rt
[bus
],
2720 ctxt_tbls
, bus
, ext
);
2722 pr_err("%s: Failed to copy context table for bus %d\n",
2728 spin_lock(&iommu
->lock
);
2730 /* Context tables are copied, now write them to the root_entry table */
2731 for (bus
= 0; bus
< 256; bus
++) {
2732 int idx
= ext
? bus
* 2 : bus
;
2735 if (ctxt_tbls
[idx
]) {
2736 val
= virt_to_phys(ctxt_tbls
[idx
]) | 1;
2737 iommu
->root_entry
[bus
].lo
= val
;
2740 if (!ext
|| !ctxt_tbls
[idx
+ 1])
2743 val
= virt_to_phys(ctxt_tbls
[idx
+ 1]) | 1;
2744 iommu
->root_entry
[bus
].hi
= val
;
2747 spin_unlock(&iommu
->lock
);
2751 __iommu_flush_cache(iommu
, iommu
->root_entry
, PAGE_SIZE
);
2761 static int __init
init_dmars(void)
2763 struct dmar_drhd_unit
*drhd
;
2764 struct intel_iommu
*iommu
;
2767 ret
= intel_cap_audit(CAP_AUDIT_STATIC_DMAR
, NULL
);
2771 for_each_iommu(iommu
, drhd
) {
2772 if (drhd
->ignored
) {
2773 iommu_disable_translation(iommu
);
2778 * Find the max pasid size of all IOMMU's in the system.
2779 * We need to ensure the system pasid table is no bigger
2780 * than the smallest supported.
2782 if (pasid_supported(iommu
)) {
2783 u32 temp
= 2 << ecap_pss(iommu
->ecap
);
2785 intel_pasid_max_id
= min_t(u32
, temp
,
2786 intel_pasid_max_id
);
2789 intel_iommu_init_qi(iommu
);
2791 ret
= iommu_init_domains(iommu
);
2795 init_translation_status(iommu
);
2797 if (translation_pre_enabled(iommu
) && !is_kdump_kernel()) {
2798 iommu_disable_translation(iommu
);
2799 clear_translation_pre_enabled(iommu
);
2800 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
2806 * we could share the same root & context tables
2807 * among all IOMMU's. Need to Split it later.
2809 ret
= iommu_alloc_root_entry(iommu
);
2813 if (translation_pre_enabled(iommu
)) {
2814 pr_info("Translation already enabled - trying to copy translation structures\n");
2816 ret
= copy_translation_tables(iommu
);
2819 * We found the IOMMU with translation
2820 * enabled - but failed to copy over the
2821 * old root-entry table. Try to proceed
2822 * by disabling translation now and
2823 * allocating a clean root-entry table.
2824 * This might cause DMAR faults, but
2825 * probably the dump will still succeed.
2827 pr_err("Failed to copy translation tables from previous kernel for %s\n",
2829 iommu_disable_translation(iommu
);
2830 clear_translation_pre_enabled(iommu
);
2832 pr_info("Copied translation tables from previous kernel for %s\n",
2837 if (!ecap_pass_through(iommu
->ecap
))
2838 hw_pass_through
= 0;
2839 intel_svm_check(iommu
);
2843 * Now that qi is enabled on all iommus, set the root entry and flush
2844 * caches. This is required on some Intel X58 chipsets, otherwise the
2845 * flush_context function will loop forever and the boot hangs.
2847 for_each_active_iommu(iommu
, drhd
) {
2848 iommu_flush_write_buffer(iommu
);
2849 iommu_set_root_entry(iommu
);
2852 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2857 iommu_identity_mapping
|= IDENTMAP_GFX
;
2859 check_tylersburg_isoch();
2861 ret
= si_domain_init(hw_pass_through
);
2868 * global invalidate context cache
2869 * global invalidate iotlb
2870 * enable translation
2872 for_each_iommu(iommu
, drhd
) {
2873 if (drhd
->ignored
) {
2875 * we always have to disable PMRs or DMA may fail on
2879 iommu_disable_protect_mem_regions(iommu
);
2883 iommu_flush_write_buffer(iommu
);
2885 #ifdef CONFIG_INTEL_IOMMU_SVM
2886 if (pasid_supported(iommu
) && ecap_prs(iommu
->ecap
)) {
2888 * Call dmar_alloc_hwirq() with dmar_global_lock held,
2889 * could cause possible lock race condition.
2891 up_write(&dmar_global_lock
);
2892 ret
= intel_svm_enable_prq(iommu
);
2893 down_write(&dmar_global_lock
);
2898 ret
= dmar_set_interrupt(iommu
);
2906 for_each_active_iommu(iommu
, drhd
) {
2907 disable_dmar_iommu(iommu
);
2908 free_dmar_iommu(iommu
);
2911 domain_exit(si_domain
);
2918 static void __init
init_no_remapping_devices(void)
2920 struct dmar_drhd_unit
*drhd
;
2924 for_each_drhd_unit(drhd
) {
2925 if (!drhd
->include_all
) {
2926 for_each_active_dev_scope(drhd
->devices
,
2927 drhd
->devices_cnt
, i
, dev
)
2929 /* ignore DMAR unit if no devices exist */
2930 if (i
== drhd
->devices_cnt
)
2935 for_each_active_drhd_unit(drhd
) {
2936 if (drhd
->include_all
)
2939 for_each_active_dev_scope(drhd
->devices
,
2940 drhd
->devices_cnt
, i
, dev
)
2941 if (!dev_is_pci(dev
) || !IS_GFX_DEVICE(to_pci_dev(dev
)))
2943 if (i
< drhd
->devices_cnt
)
2946 /* This IOMMU has *only* gfx devices. Either bypass it or
2947 set the gfx_mapped flag, as appropriate */
2948 drhd
->gfx_dedicated
= 1;
2954 #ifdef CONFIG_SUSPEND
2955 static int init_iommu_hw(void)
2957 struct dmar_drhd_unit
*drhd
;
2958 struct intel_iommu
*iommu
= NULL
;
2961 for_each_active_iommu(iommu
, drhd
) {
2963 ret
= dmar_reenable_qi(iommu
);
2969 for_each_iommu(iommu
, drhd
) {
2970 if (drhd
->ignored
) {
2972 * we always have to disable PMRs or DMA may fail on
2976 iommu_disable_protect_mem_regions(iommu
);
2980 iommu_flush_write_buffer(iommu
);
2981 iommu_set_root_entry(iommu
);
2982 iommu_enable_translation(iommu
);
2983 iommu_disable_protect_mem_regions(iommu
);
2989 static void iommu_flush_all(void)
2991 struct dmar_drhd_unit
*drhd
;
2992 struct intel_iommu
*iommu
;
2994 for_each_active_iommu(iommu
, drhd
) {
2995 iommu
->flush
.flush_context(iommu
, 0, 0, 0,
2996 DMA_CCMD_GLOBAL_INVL
);
2997 iommu
->flush
.flush_iotlb(iommu
, 0, 0, 0,
2998 DMA_TLB_GLOBAL_FLUSH
);
3002 static int iommu_suspend(void)
3004 struct dmar_drhd_unit
*drhd
;
3005 struct intel_iommu
*iommu
= NULL
;
3008 for_each_active_iommu(iommu
, drhd
) {
3009 iommu
->iommu_state
= kcalloc(MAX_SR_DMAR_REGS
, sizeof(u32
),
3011 if (!iommu
->iommu_state
)
3017 for_each_active_iommu(iommu
, drhd
) {
3018 iommu_disable_translation(iommu
);
3020 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3022 iommu
->iommu_state
[SR_DMAR_FECTL_REG
] =
3023 readl(iommu
->reg
+ DMAR_FECTL_REG
);
3024 iommu
->iommu_state
[SR_DMAR_FEDATA_REG
] =
3025 readl(iommu
->reg
+ DMAR_FEDATA_REG
);
3026 iommu
->iommu_state
[SR_DMAR_FEADDR_REG
] =
3027 readl(iommu
->reg
+ DMAR_FEADDR_REG
);
3028 iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
] =
3029 readl(iommu
->reg
+ DMAR_FEUADDR_REG
);
3031 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3036 for_each_active_iommu(iommu
, drhd
)
3037 kfree(iommu
->iommu_state
);
3042 static void iommu_resume(void)
3044 struct dmar_drhd_unit
*drhd
;
3045 struct intel_iommu
*iommu
= NULL
;
3048 if (init_iommu_hw()) {
3050 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3052 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3056 for_each_active_iommu(iommu
, drhd
) {
3058 raw_spin_lock_irqsave(&iommu
->register_lock
, flag
);
3060 writel(iommu
->iommu_state
[SR_DMAR_FECTL_REG
],
3061 iommu
->reg
+ DMAR_FECTL_REG
);
3062 writel(iommu
->iommu_state
[SR_DMAR_FEDATA_REG
],
3063 iommu
->reg
+ DMAR_FEDATA_REG
);
3064 writel(iommu
->iommu_state
[SR_DMAR_FEADDR_REG
],
3065 iommu
->reg
+ DMAR_FEADDR_REG
);
3066 writel(iommu
->iommu_state
[SR_DMAR_FEUADDR_REG
],
3067 iommu
->reg
+ DMAR_FEUADDR_REG
);
3069 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flag
);
3072 for_each_active_iommu(iommu
, drhd
)
3073 kfree(iommu
->iommu_state
);
3076 static struct syscore_ops iommu_syscore_ops
= {
3077 .resume
= iommu_resume
,
3078 .suspend
= iommu_suspend
,
3081 static void __init
init_iommu_pm_ops(void)
3083 register_syscore_ops(&iommu_syscore_ops
);
3087 static inline void init_iommu_pm_ops(void) {}
3088 #endif /* CONFIG_PM */
3090 static int __init
rmrr_sanity_check(struct acpi_dmar_reserved_memory
*rmrr
)
3092 if (!IS_ALIGNED(rmrr
->base_address
, PAGE_SIZE
) ||
3093 !IS_ALIGNED(rmrr
->end_address
+ 1, PAGE_SIZE
) ||
3094 rmrr
->end_address
<= rmrr
->base_address
||
3095 arch_rmrr_sanity_check(rmrr
))
3101 int __init
dmar_parse_one_rmrr(struct acpi_dmar_header
*header
, void *arg
)
3103 struct acpi_dmar_reserved_memory
*rmrr
;
3104 struct dmar_rmrr_unit
*rmrru
;
3106 rmrr
= (struct acpi_dmar_reserved_memory
*)header
;
3107 if (rmrr_sanity_check(rmrr
)) {
3109 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
3110 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3111 rmrr
->base_address
, rmrr
->end_address
,
3112 dmi_get_system_info(DMI_BIOS_VENDOR
),
3113 dmi_get_system_info(DMI_BIOS_VERSION
),
3114 dmi_get_system_info(DMI_PRODUCT_VERSION
));
3115 add_taint(TAINT_FIRMWARE_WORKAROUND
, LOCKDEP_STILL_OK
);
3118 rmrru
= kzalloc(sizeof(*rmrru
), GFP_KERNEL
);
3122 rmrru
->hdr
= header
;
3124 rmrru
->base_address
= rmrr
->base_address
;
3125 rmrru
->end_address
= rmrr
->end_address
;
3127 rmrru
->devices
= dmar_alloc_dev_scope((void *)(rmrr
+ 1),
3128 ((void *)rmrr
) + rmrr
->header
.length
,
3129 &rmrru
->devices_cnt
);
3130 if (rmrru
->devices_cnt
&& rmrru
->devices
== NULL
)
3133 list_add(&rmrru
->list
, &dmar_rmrr_units
);
3142 static struct dmar_atsr_unit
*dmar_find_atsr(struct acpi_dmar_atsr
*atsr
)
3144 struct dmar_atsr_unit
*atsru
;
3145 struct acpi_dmar_atsr
*tmp
;
3147 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
,
3149 tmp
= (struct acpi_dmar_atsr
*)atsru
->hdr
;
3150 if (atsr
->segment
!= tmp
->segment
)
3152 if (atsr
->header
.length
!= tmp
->header
.length
)
3154 if (memcmp(atsr
, tmp
, atsr
->header
.length
) == 0)
3161 int dmar_parse_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3163 struct acpi_dmar_atsr
*atsr
;
3164 struct dmar_atsr_unit
*atsru
;
3166 if (system_state
>= SYSTEM_RUNNING
&& !intel_iommu_enabled
)
3169 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3170 atsru
= dmar_find_atsr(atsr
);
3174 atsru
= kzalloc(sizeof(*atsru
) + hdr
->length
, GFP_KERNEL
);
3179 * If memory is allocated from slab by ACPI _DSM method, we need to
3180 * copy the memory content because the memory buffer will be freed
3183 atsru
->hdr
= (void *)(atsru
+ 1);
3184 memcpy(atsru
->hdr
, hdr
, hdr
->length
);
3185 atsru
->include_all
= atsr
->flags
& 0x1;
3186 if (!atsru
->include_all
) {
3187 atsru
->devices
= dmar_alloc_dev_scope((void *)(atsr
+ 1),
3188 (void *)atsr
+ atsr
->header
.length
,
3189 &atsru
->devices_cnt
);
3190 if (atsru
->devices_cnt
&& atsru
->devices
== NULL
) {
3196 list_add_rcu(&atsru
->list
, &dmar_atsr_units
);
3201 static void intel_iommu_free_atsr(struct dmar_atsr_unit
*atsru
)
3203 dmar_free_dev_scope(&atsru
->devices
, &atsru
->devices_cnt
);
3207 int dmar_release_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3209 struct acpi_dmar_atsr
*atsr
;
3210 struct dmar_atsr_unit
*atsru
;
3212 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3213 atsru
= dmar_find_atsr(atsr
);
3215 list_del_rcu(&atsru
->list
);
3217 intel_iommu_free_atsr(atsru
);
3223 int dmar_check_one_atsr(struct acpi_dmar_header
*hdr
, void *arg
)
3227 struct acpi_dmar_atsr
*atsr
;
3228 struct dmar_atsr_unit
*atsru
;
3230 atsr
= container_of(hdr
, struct acpi_dmar_atsr
, header
);
3231 atsru
= dmar_find_atsr(atsr
);
3235 if (!atsru
->include_all
&& atsru
->devices
&& atsru
->devices_cnt
) {
3236 for_each_active_dev_scope(atsru
->devices
, atsru
->devices_cnt
,
3244 static struct dmar_satc_unit
*dmar_find_satc(struct acpi_dmar_satc
*satc
)
3246 struct dmar_satc_unit
*satcu
;
3247 struct acpi_dmar_satc
*tmp
;
3249 list_for_each_entry_rcu(satcu
, &dmar_satc_units
, list
,
3251 tmp
= (struct acpi_dmar_satc
*)satcu
->hdr
;
3252 if (satc
->segment
!= tmp
->segment
)
3254 if (satc
->header
.length
!= tmp
->header
.length
)
3256 if (memcmp(satc
, tmp
, satc
->header
.length
) == 0)
3263 int dmar_parse_one_satc(struct acpi_dmar_header
*hdr
, void *arg
)
3265 struct acpi_dmar_satc
*satc
;
3266 struct dmar_satc_unit
*satcu
;
3268 if (system_state
>= SYSTEM_RUNNING
&& !intel_iommu_enabled
)
3271 satc
= container_of(hdr
, struct acpi_dmar_satc
, header
);
3272 satcu
= dmar_find_satc(satc
);
3276 satcu
= kzalloc(sizeof(*satcu
) + hdr
->length
, GFP_KERNEL
);
3280 satcu
->hdr
= (void *)(satcu
+ 1);
3281 memcpy(satcu
->hdr
, hdr
, hdr
->length
);
3282 satcu
->atc_required
= satc
->flags
& 0x1;
3283 satcu
->devices
= dmar_alloc_dev_scope((void *)(satc
+ 1),
3284 (void *)satc
+ satc
->header
.length
,
3285 &satcu
->devices_cnt
);
3286 if (satcu
->devices_cnt
&& !satcu
->devices
) {
3290 list_add_rcu(&satcu
->list
, &dmar_satc_units
);
3295 static int intel_iommu_add(struct dmar_drhd_unit
*dmaru
)
3298 struct intel_iommu
*iommu
= dmaru
->iommu
;
3300 ret
= intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR
, iommu
);
3304 if (hw_pass_through
&& !ecap_pass_through(iommu
->ecap
)) {
3305 pr_warn("%s: Doesn't support hardware pass through.\n",
3310 sp
= domain_update_iommu_superpage(NULL
, iommu
) - 1;
3311 if (sp
>= 0 && !(cap_super_page_val(iommu
->cap
) & (1 << sp
))) {
3312 pr_warn("%s: Doesn't support large page.\n",
3318 * Disable translation if already enabled prior to OS handover.
3320 if (iommu
->gcmd
& DMA_GCMD_TE
)
3321 iommu_disable_translation(iommu
);
3323 ret
= iommu_init_domains(iommu
);
3325 ret
= iommu_alloc_root_entry(iommu
);
3329 intel_svm_check(iommu
);
3331 if (dmaru
->ignored
) {
3333 * we always have to disable PMRs or DMA may fail on this device
3336 iommu_disable_protect_mem_regions(iommu
);
3340 intel_iommu_init_qi(iommu
);
3341 iommu_flush_write_buffer(iommu
);
3343 #ifdef CONFIG_INTEL_IOMMU_SVM
3344 if (pasid_supported(iommu
) && ecap_prs(iommu
->ecap
)) {
3345 ret
= intel_svm_enable_prq(iommu
);
3350 ret
= dmar_set_interrupt(iommu
);
3354 iommu_set_root_entry(iommu
);
3355 iommu_enable_translation(iommu
);
3357 iommu_disable_protect_mem_regions(iommu
);
3361 disable_dmar_iommu(iommu
);
3363 free_dmar_iommu(iommu
);
3367 int dmar_iommu_hotplug(struct dmar_drhd_unit
*dmaru
, bool insert
)
3370 struct intel_iommu
*iommu
= dmaru
->iommu
;
3372 if (!intel_iommu_enabled
)
3378 ret
= intel_iommu_add(dmaru
);
3380 disable_dmar_iommu(iommu
);
3381 free_dmar_iommu(iommu
);
3387 static void intel_iommu_free_dmars(void)
3389 struct dmar_rmrr_unit
*rmrru
, *rmrr_n
;
3390 struct dmar_atsr_unit
*atsru
, *atsr_n
;
3391 struct dmar_satc_unit
*satcu
, *satc_n
;
3393 list_for_each_entry_safe(rmrru
, rmrr_n
, &dmar_rmrr_units
, list
) {
3394 list_del(&rmrru
->list
);
3395 dmar_free_dev_scope(&rmrru
->devices
, &rmrru
->devices_cnt
);
3399 list_for_each_entry_safe(atsru
, atsr_n
, &dmar_atsr_units
, list
) {
3400 list_del(&atsru
->list
);
3401 intel_iommu_free_atsr(atsru
);
3403 list_for_each_entry_safe(satcu
, satc_n
, &dmar_satc_units
, list
) {
3404 list_del(&satcu
->list
);
3405 dmar_free_dev_scope(&satcu
->devices
, &satcu
->devices_cnt
);
3410 static struct dmar_satc_unit
*dmar_find_matched_satc_unit(struct pci_dev
*dev
)
3412 struct dmar_satc_unit
*satcu
;
3413 struct acpi_dmar_satc
*satc
;
3417 dev
= pci_physfn(dev
);
3420 list_for_each_entry_rcu(satcu
, &dmar_satc_units
, list
) {
3421 satc
= container_of(satcu
->hdr
, struct acpi_dmar_satc
, header
);
3422 if (satc
->segment
!= pci_domain_nr(dev
->bus
))
3424 for_each_dev_scope(satcu
->devices
, satcu
->devices_cnt
, i
, tmp
)
3425 if (to_pci_dev(tmp
) == dev
)
3434 static int dmar_ats_supported(struct pci_dev
*dev
, struct intel_iommu
*iommu
)
3437 struct pci_bus
*bus
;
3438 struct pci_dev
*bridge
= NULL
;
3440 struct acpi_dmar_atsr
*atsr
;
3441 struct dmar_atsr_unit
*atsru
;
3442 struct dmar_satc_unit
*satcu
;
3444 dev
= pci_physfn(dev
);
3445 satcu
= dmar_find_matched_satc_unit(dev
);
3448 * This device supports ATS as it is in SATC table.
3449 * When IOMMU is in legacy mode, enabling ATS is done
3450 * automatically by HW for the device that requires
3451 * ATS, hence OS should not enable this device ATS
3452 * to avoid duplicated TLB invalidation.
3454 return !(satcu
->atc_required
&& !sm_supported(iommu
));
3456 for (bus
= dev
->bus
; bus
; bus
= bus
->parent
) {
3458 /* If it's an integrated device, allow ATS */
3461 /* Connected via non-PCIe: no ATS */
3462 if (!pci_is_pcie(bridge
) ||
3463 pci_pcie_type(bridge
) == PCI_EXP_TYPE_PCI_BRIDGE
)
3465 /* If we found the root port, look it up in the ATSR */
3466 if (pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
)
3471 list_for_each_entry_rcu(atsru
, &dmar_atsr_units
, list
) {
3472 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3473 if (atsr
->segment
!= pci_domain_nr(dev
->bus
))
3476 for_each_dev_scope(atsru
->devices
, atsru
->devices_cnt
, i
, tmp
)
3477 if (tmp
== &bridge
->dev
)
3480 if (atsru
->include_all
)
3490 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info
*info
)
3493 struct dmar_rmrr_unit
*rmrru
;
3494 struct dmar_atsr_unit
*atsru
;
3495 struct dmar_satc_unit
*satcu
;
3496 struct acpi_dmar_atsr
*atsr
;
3497 struct acpi_dmar_reserved_memory
*rmrr
;
3498 struct acpi_dmar_satc
*satc
;
3500 if (!intel_iommu_enabled
&& system_state
>= SYSTEM_RUNNING
)
3503 list_for_each_entry(rmrru
, &dmar_rmrr_units
, list
) {
3504 rmrr
= container_of(rmrru
->hdr
,
3505 struct acpi_dmar_reserved_memory
, header
);
3506 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3507 ret
= dmar_insert_dev_scope(info
, (void *)(rmrr
+ 1),
3508 ((void *)rmrr
) + rmrr
->header
.length
,
3509 rmrr
->segment
, rmrru
->devices
,
3510 rmrru
->devices_cnt
);
3513 } else if (info
->event
== BUS_NOTIFY_REMOVED_DEVICE
) {
3514 dmar_remove_dev_scope(info
, rmrr
->segment
,
3515 rmrru
->devices
, rmrru
->devices_cnt
);
3519 list_for_each_entry(atsru
, &dmar_atsr_units
, list
) {
3520 if (atsru
->include_all
)
3523 atsr
= container_of(atsru
->hdr
, struct acpi_dmar_atsr
, header
);
3524 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3525 ret
= dmar_insert_dev_scope(info
, (void *)(atsr
+ 1),
3526 (void *)atsr
+ atsr
->header
.length
,
3527 atsr
->segment
, atsru
->devices
,
3528 atsru
->devices_cnt
);
3533 } else if (info
->event
== BUS_NOTIFY_REMOVED_DEVICE
) {
3534 if (dmar_remove_dev_scope(info
, atsr
->segment
,
3535 atsru
->devices
, atsru
->devices_cnt
))
3539 list_for_each_entry(satcu
, &dmar_satc_units
, list
) {
3540 satc
= container_of(satcu
->hdr
, struct acpi_dmar_satc
, header
);
3541 if (info
->event
== BUS_NOTIFY_ADD_DEVICE
) {
3542 ret
= dmar_insert_dev_scope(info
, (void *)(satc
+ 1),
3543 (void *)satc
+ satc
->header
.length
,
3544 satc
->segment
, satcu
->devices
,
3545 satcu
->devices_cnt
);
3550 } else if (info
->event
== BUS_NOTIFY_REMOVED_DEVICE
) {
3551 if (dmar_remove_dev_scope(info
, satc
->segment
,
3552 satcu
->devices
, satcu
->devices_cnt
))
3560 static int intel_iommu_memory_notifier(struct notifier_block
*nb
,
3561 unsigned long val
, void *v
)
3563 struct memory_notify
*mhp
= v
;
3564 unsigned long start_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
);
3565 unsigned long last_vpfn
= mm_to_dma_pfn(mhp
->start_pfn
+
3569 case MEM_GOING_ONLINE
:
3570 if (iommu_domain_identity_map(si_domain
,
3571 start_vpfn
, last_vpfn
)) {
3572 pr_warn("Failed to build identity map for [%lx-%lx]\n",
3573 start_vpfn
, last_vpfn
);
3579 case MEM_CANCEL_ONLINE
:
3581 struct dmar_drhd_unit
*drhd
;
3582 struct intel_iommu
*iommu
;
3583 LIST_HEAD(freelist
);
3585 domain_unmap(si_domain
, start_vpfn
, last_vpfn
, &freelist
);
3588 for_each_active_iommu(iommu
, drhd
)
3589 iommu_flush_iotlb_psi(iommu
, si_domain
,
3590 start_vpfn
, mhp
->nr_pages
,
3591 list_empty(&freelist
), 0);
3593 put_pages_list(&freelist
);
3601 static struct notifier_block intel_iommu_memory_nb
= {
3602 .notifier_call
= intel_iommu_memory_notifier
,
3606 static void intel_disable_iommus(void)
3608 struct intel_iommu
*iommu
= NULL
;
3609 struct dmar_drhd_unit
*drhd
;
3611 for_each_iommu(iommu
, drhd
)
3612 iommu_disable_translation(iommu
);
3615 void intel_iommu_shutdown(void)
3617 struct dmar_drhd_unit
*drhd
;
3618 struct intel_iommu
*iommu
= NULL
;
3620 if (no_iommu
|| dmar_disabled
)
3623 down_write(&dmar_global_lock
);
3625 /* Disable PMRs explicitly here. */
3626 for_each_iommu(iommu
, drhd
)
3627 iommu_disable_protect_mem_regions(iommu
);
3629 /* Make sure the IOMMUs are switched off */
3630 intel_disable_iommus();
3632 up_write(&dmar_global_lock
);
3635 static inline struct intel_iommu
*dev_to_intel_iommu(struct device
*dev
)
3637 struct iommu_device
*iommu_dev
= dev_to_iommu_device(dev
);
3639 return container_of(iommu_dev
, struct intel_iommu
, iommu
);
3642 static ssize_t
version_show(struct device
*dev
,
3643 struct device_attribute
*attr
, char *buf
)
3645 struct intel_iommu
*iommu
= dev_to_intel_iommu(dev
);
3646 u32 ver
= readl(iommu
->reg
+ DMAR_VER_REG
);
3647 return sysfs_emit(buf
, "%d:%d\n",
3648 DMAR_VER_MAJOR(ver
), DMAR_VER_MINOR(ver
));
3650 static DEVICE_ATTR_RO(version
);
3652 static ssize_t
address_show(struct device
*dev
,
3653 struct device_attribute
*attr
, char *buf
)
3655 struct intel_iommu
*iommu
= dev_to_intel_iommu(dev
);
3656 return sysfs_emit(buf
, "%llx\n", iommu
->reg_phys
);
3658 static DEVICE_ATTR_RO(address
);
3660 static ssize_t
cap_show(struct device
*dev
,
3661 struct device_attribute
*attr
, char *buf
)
3663 struct intel_iommu
*iommu
= dev_to_intel_iommu(dev
);
3664 return sysfs_emit(buf
, "%llx\n", iommu
->cap
);
3666 static DEVICE_ATTR_RO(cap
);
3668 static ssize_t
ecap_show(struct device
*dev
,
3669 struct device_attribute
*attr
, char *buf
)
3671 struct intel_iommu
*iommu
= dev_to_intel_iommu(dev
);
3672 return sysfs_emit(buf
, "%llx\n", iommu
->ecap
);
3674 static DEVICE_ATTR_RO(ecap
);
3676 static ssize_t
domains_supported_show(struct device
*dev
,
3677 struct device_attribute
*attr
, char *buf
)
3679 struct intel_iommu
*iommu
= dev_to_intel_iommu(dev
);
3680 return sysfs_emit(buf
, "%ld\n", cap_ndoms(iommu
->cap
));
3682 static DEVICE_ATTR_RO(domains_supported
);
3684 static ssize_t
domains_used_show(struct device
*dev
,
3685 struct device_attribute
*attr
, char *buf
)
3687 struct intel_iommu
*iommu
= dev_to_intel_iommu(dev
);
3688 return sysfs_emit(buf
, "%d\n",
3689 bitmap_weight(iommu
->domain_ids
,
3690 cap_ndoms(iommu
->cap
)));
3692 static DEVICE_ATTR_RO(domains_used
);
3694 static struct attribute
*intel_iommu_attrs
[] = {
3695 &dev_attr_version
.attr
,
3696 &dev_attr_address
.attr
,
3698 &dev_attr_ecap
.attr
,
3699 &dev_attr_domains_supported
.attr
,
3700 &dev_attr_domains_used
.attr
,
3704 static struct attribute_group intel_iommu_group
= {
3705 .name
= "intel-iommu",
3706 .attrs
= intel_iommu_attrs
,
3709 const struct attribute_group
*intel_iommu_groups
[] = {
3714 static inline bool has_external_pci(void)
3716 struct pci_dev
*pdev
= NULL
;
3718 for_each_pci_dev(pdev
)
3719 if (pdev
->external_facing
) {
3727 static int __init
platform_optin_force_iommu(void)
3729 if (!dmar_platform_optin() || no_platform_optin
|| !has_external_pci())
3732 if (no_iommu
|| dmar_disabled
)
3733 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
3736 * If Intel-IOMMU is disabled by default, we will apply identity
3737 * map for all devices except those marked as being untrusted.
3740 iommu_set_default_passthrough(false);
3748 static int __init
probe_acpi_namespace_devices(void)
3750 struct dmar_drhd_unit
*drhd
;
3751 /* To avoid a -Wunused-but-set-variable warning. */
3752 struct intel_iommu
*iommu __maybe_unused
;
3756 for_each_active_iommu(iommu
, drhd
) {
3757 for_each_active_dev_scope(drhd
->devices
,
3758 drhd
->devices_cnt
, i
, dev
) {
3759 struct acpi_device_physical_node
*pn
;
3760 struct iommu_group
*group
;
3761 struct acpi_device
*adev
;
3763 if (dev
->bus
!= &acpi_bus_type
)
3766 adev
= to_acpi_device(dev
);
3767 mutex_lock(&adev
->physical_node_lock
);
3768 list_for_each_entry(pn
,
3769 &adev
->physical_node_list
, node
) {
3770 group
= iommu_group_get(pn
->dev
);
3772 iommu_group_put(group
);
3776 ret
= iommu_probe_device(pn
->dev
);
3780 mutex_unlock(&adev
->physical_node_lock
);
3790 static __init
int tboot_force_iommu(void)
3792 if (!tboot_enabled())
3795 if (no_iommu
|| dmar_disabled
)
3796 pr_warn("Forcing Intel-IOMMU to enabled\n");
3804 int __init
intel_iommu_init(void)
3807 struct dmar_drhd_unit
*drhd
;
3808 struct intel_iommu
*iommu
;
3811 * Intel IOMMU is required for a TXT/tboot launch or platform
3812 * opt in, so enforce that.
3814 force_on
= (!intel_iommu_tboot_noforce
&& tboot_force_iommu()) ||
3815 platform_optin_force_iommu();
3817 down_write(&dmar_global_lock
);
3818 if (dmar_table_init()) {
3820 panic("tboot: Failed to initialize DMAR table\n");
3824 if (dmar_dev_scope_init() < 0) {
3826 panic("tboot: Failed to initialize DMAR device scope\n");
3830 up_write(&dmar_global_lock
);
3833 * The bus notifier takes the dmar_global_lock, so lockdep will
3834 * complain later when we register it under the lock.
3836 dmar_register_bus_notifier();
3838 down_write(&dmar_global_lock
);
3841 intel_iommu_debugfs_init();
3843 if (no_iommu
|| dmar_disabled
) {
3845 * We exit the function here to ensure IOMMU's remapping and
3846 * mempool aren't setup, which means that the IOMMU's PMRs
3847 * won't be disabled via the call to init_dmars(). So disable
3848 * it explicitly here. The PMRs were setup by tboot prior to
3849 * calling SENTER, but the kernel is expected to reset/tear
3852 if (intel_iommu_tboot_noforce
) {
3853 for_each_iommu(iommu
, drhd
)
3854 iommu_disable_protect_mem_regions(iommu
);
3858 * Make sure the IOMMUs are switched off, even when we
3859 * boot into a kexec kernel and the previous kernel left
3862 intel_disable_iommus();
3866 if (list_empty(&dmar_rmrr_units
))
3867 pr_info("No RMRR found\n");
3869 if (list_empty(&dmar_atsr_units
))
3870 pr_info("No ATSR found\n");
3872 if (list_empty(&dmar_satc_units
))
3873 pr_info("No SATC found\n");
3875 init_no_remapping_devices();
3880 panic("tboot: Failed to initialize DMARs\n");
3881 pr_err("Initialization failed\n");
3884 up_write(&dmar_global_lock
);
3886 init_iommu_pm_ops();
3888 down_read(&dmar_global_lock
);
3889 for_each_active_iommu(iommu
, drhd
) {
3891 * The flush queue implementation does not perform
3892 * page-selective invalidations that are required for efficient
3893 * TLB flushes in virtual environments. The benefit of batching
3894 * is likely to be much lower than the overhead of synchronizing
3895 * the virtual and physical IOMMU page-tables.
3897 if (cap_caching_mode(iommu
->cap
) &&
3898 !first_level_by_default(IOMMU_DOMAIN_DMA
)) {
3899 pr_info_once("IOMMU batching disallowed due to virtualization\n");
3900 iommu_set_dma_strict();
3902 iommu_device_sysfs_add(&iommu
->iommu
, NULL
,
3905 iommu_device_register(&iommu
->iommu
, &intel_iommu_ops
, NULL
);
3907 iommu_pmu_register(iommu
);
3909 up_read(&dmar_global_lock
);
3911 if (si_domain
&& !hw_pass_through
)
3912 register_memory_notifier(&intel_iommu_memory_nb
);
3914 down_read(&dmar_global_lock
);
3915 if (probe_acpi_namespace_devices())
3916 pr_warn("ACPI name space devices didn't probe correctly\n");
3918 /* Finally, we enable the DMA remapping hardware. */
3919 for_each_iommu(iommu
, drhd
) {
3920 if (!drhd
->ignored
&& !translation_pre_enabled(iommu
))
3921 iommu_enable_translation(iommu
);
3923 iommu_disable_protect_mem_regions(iommu
);
3925 up_read(&dmar_global_lock
);
3927 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
3929 intel_iommu_enabled
= 1;
3934 intel_iommu_free_dmars();
3935 up_write(&dmar_global_lock
);
3939 static int domain_context_clear_one_cb(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
3941 struct device_domain_info
*info
= opaque
;
3943 domain_context_clear_one(info
, PCI_BUS_NUM(alias
), alias
& 0xff);
3948 * NB - intel-iommu lacks any sort of reference counting for the users of
3949 * dependent devices. If multiple endpoints have intersecting dependent
3950 * devices, unbinding the driver from any one of them will possibly leave
3951 * the others unable to operate.
3953 static void domain_context_clear(struct device_domain_info
*info
)
3955 if (!info
->iommu
|| !info
->dev
|| !dev_is_pci(info
->dev
))
3958 pci_for_each_dma_alias(to_pci_dev(info
->dev
),
3959 &domain_context_clear_one_cb
, info
);
3962 static void dmar_remove_one_dev_info(struct device
*dev
)
3964 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
3965 struct dmar_domain
*domain
= info
->domain
;
3966 struct intel_iommu
*iommu
= info
->iommu
;
3967 unsigned long flags
;
3969 if (!dev_is_real_dma_subdevice(info
->dev
)) {
3970 if (dev_is_pci(info
->dev
) && sm_supported(iommu
))
3971 intel_pasid_tear_down_entry(iommu
, info
->dev
,
3972 PASID_RID2PASID
, false);
3974 iommu_disable_pci_caps(info
);
3975 domain_context_clear(info
);
3978 spin_lock_irqsave(&domain
->lock
, flags
);
3979 list_del(&info
->link
);
3980 spin_unlock_irqrestore(&domain
->lock
, flags
);
3982 domain_detach_iommu(domain
, iommu
);
3983 info
->domain
= NULL
;
3987 * Clear the page table pointer in context or pasid table entries so that
3988 * all DMA requests without PASID from the device are blocked. If the page
3989 * table has been set, clean up the data structures.
3991 static void device_block_translation(struct device
*dev
)
3993 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
3994 struct intel_iommu
*iommu
= info
->iommu
;
3995 unsigned long flags
;
3997 iommu_disable_pci_caps(info
);
3998 if (!dev_is_real_dma_subdevice(dev
)) {
3999 if (sm_supported(iommu
))
4000 intel_pasid_tear_down_entry(iommu
, dev
,
4001 PASID_RID2PASID
, false);
4003 domain_context_clear(info
);
4009 spin_lock_irqsave(&info
->domain
->lock
, flags
);
4010 list_del(&info
->link
);
4011 spin_unlock_irqrestore(&info
->domain
->lock
, flags
);
4013 domain_detach_iommu(info
->domain
, iommu
);
4014 info
->domain
= NULL
;
4017 static int md_domain_init(struct dmar_domain
*domain
, int guest_width
)
4021 /* calculate AGAW */
4022 domain
->gaw
= guest_width
;
4023 adjust_width
= guestwidth_to_adjustwidth(guest_width
);
4024 domain
->agaw
= width_to_agaw(adjust_width
);
4026 domain
->iommu_coherency
= false;
4027 domain
->iommu_superpage
= 0;
4028 domain
->max_addr
= 0;
4030 /* always allocate the top pgd */
4031 domain
->pgd
= alloc_pgtable_page(domain
->nid
, GFP_ATOMIC
);
4034 domain_flush_cache(domain
, domain
->pgd
, PAGE_SIZE
);
4038 static int blocking_domain_attach_dev(struct iommu_domain
*domain
,
4041 device_block_translation(dev
);
4045 static struct iommu_domain blocking_domain
= {
4046 .ops
= &(const struct iommu_domain_ops
) {
4047 .attach_dev
= blocking_domain_attach_dev
,
4048 .free
= intel_iommu_domain_free
4052 static struct iommu_domain
*intel_iommu_domain_alloc(unsigned type
)
4054 struct dmar_domain
*dmar_domain
;
4055 struct iommu_domain
*domain
;
4058 case IOMMU_DOMAIN_BLOCKED
:
4059 return &blocking_domain
;
4060 case IOMMU_DOMAIN_DMA
:
4061 case IOMMU_DOMAIN_UNMANAGED
:
4062 dmar_domain
= alloc_domain(type
);
4064 pr_err("Can't allocate dmar_domain\n");
4067 if (md_domain_init(dmar_domain
, DEFAULT_DOMAIN_ADDRESS_WIDTH
)) {
4068 pr_err("Domain initialization failed\n");
4069 domain_exit(dmar_domain
);
4073 domain
= &dmar_domain
->domain
;
4074 domain
->geometry
.aperture_start
= 0;
4075 domain
->geometry
.aperture_end
=
4076 __DOMAIN_MAX_ADDR(dmar_domain
->gaw
);
4077 domain
->geometry
.force_aperture
= true;
4080 case IOMMU_DOMAIN_IDENTITY
:
4081 return &si_domain
->domain
;
4082 case IOMMU_DOMAIN_SVA
:
4083 return intel_svm_domain_alloc();
4091 static void intel_iommu_domain_free(struct iommu_domain
*domain
)
4093 if (domain
!= &si_domain
->domain
&& domain
!= &blocking_domain
)
4094 domain_exit(to_dmar_domain(domain
));
4097 static int prepare_domain_attach_device(struct iommu_domain
*domain
,
4100 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4101 struct intel_iommu
*iommu
;
4104 iommu
= device_to_iommu(dev
, NULL
, NULL
);
4108 if (dmar_domain
->force_snooping
&& !ecap_sc_support(iommu
->ecap
))
4111 /* check if this iommu agaw is sufficient for max mapped address */
4112 addr_width
= agaw_to_width(iommu
->agaw
);
4113 if (addr_width
> cap_mgaw(iommu
->cap
))
4114 addr_width
= cap_mgaw(iommu
->cap
);
4116 if (dmar_domain
->max_addr
> (1LL << addr_width
))
4118 dmar_domain
->gaw
= addr_width
;
4121 * Knock out extra levels of page tables if necessary
4123 while (iommu
->agaw
< dmar_domain
->agaw
) {
4124 struct dma_pte
*pte
;
4126 pte
= dmar_domain
->pgd
;
4127 if (dma_pte_present(pte
)) {
4128 dmar_domain
->pgd
= phys_to_virt(dma_pte_addr(pte
));
4129 free_pgtable_page(pte
);
4131 dmar_domain
->agaw
--;
4137 static int intel_iommu_attach_device(struct iommu_domain
*domain
,
4140 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4143 if (domain
->type
== IOMMU_DOMAIN_UNMANAGED
&&
4144 device_is_rmrr_locked(dev
)) {
4145 dev_warn(dev
, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4150 device_block_translation(dev
);
4152 ret
= prepare_domain_attach_device(domain
, dev
);
4156 return dmar_domain_attach_device(to_dmar_domain(domain
), dev
);
4159 static int intel_iommu_map(struct iommu_domain
*domain
,
4160 unsigned long iova
, phys_addr_t hpa
,
4161 size_t size
, int iommu_prot
, gfp_t gfp
)
4163 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4167 if (iommu_prot
& IOMMU_READ
)
4168 prot
|= DMA_PTE_READ
;
4169 if (iommu_prot
& IOMMU_WRITE
)
4170 prot
|= DMA_PTE_WRITE
;
4171 if (dmar_domain
->set_pte_snp
)
4172 prot
|= DMA_PTE_SNP
;
4174 max_addr
= iova
+ size
;
4175 if (dmar_domain
->max_addr
< max_addr
) {
4178 /* check if minimum agaw is sufficient for mapped address */
4179 end
= __DOMAIN_MAX_ADDR(dmar_domain
->gaw
) + 1;
4180 if (end
< max_addr
) {
4181 pr_err("%s: iommu width (%d) is not "
4182 "sufficient for the mapped address (%llx)\n",
4183 __func__
, dmar_domain
->gaw
, max_addr
);
4186 dmar_domain
->max_addr
= max_addr
;
4188 /* Round up size to next multiple of PAGE_SIZE, if it and
4189 the low bits of hpa would take us onto the next page */
4190 size
= aligned_nrpages(hpa
, size
);
4191 return __domain_mapping(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4192 hpa
>> VTD_PAGE_SHIFT
, size
, prot
, gfp
);
4195 static int intel_iommu_map_pages(struct iommu_domain
*domain
,
4196 unsigned long iova
, phys_addr_t paddr
,
4197 size_t pgsize
, size_t pgcount
,
4198 int prot
, gfp_t gfp
, size_t *mapped
)
4200 unsigned long pgshift
= __ffs(pgsize
);
4201 size_t size
= pgcount
<< pgshift
;
4204 if (pgsize
!= SZ_4K
&& pgsize
!= SZ_2M
&& pgsize
!= SZ_1G
)
4207 if (!IS_ALIGNED(iova
| paddr
, pgsize
))
4210 ret
= intel_iommu_map(domain
, iova
, paddr
, size
, prot
, gfp
);
4217 static size_t intel_iommu_unmap(struct iommu_domain
*domain
,
4218 unsigned long iova
, size_t size
,
4219 struct iommu_iotlb_gather
*gather
)
4221 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4222 unsigned long start_pfn
, last_pfn
;
4225 /* Cope with horrid API which requires us to unmap more than the
4226 size argument if it happens to be a large-page mapping. */
4227 if (unlikely(!pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
,
4228 &level
, GFP_ATOMIC
)))
4231 if (size
< VTD_PAGE_SIZE
<< level_to_offset_bits(level
))
4232 size
= VTD_PAGE_SIZE
<< level_to_offset_bits(level
);
4234 start_pfn
= iova
>> VTD_PAGE_SHIFT
;
4235 last_pfn
= (iova
+ size
- 1) >> VTD_PAGE_SHIFT
;
4237 domain_unmap(dmar_domain
, start_pfn
, last_pfn
, &gather
->freelist
);
4239 if (dmar_domain
->max_addr
== iova
+ size
)
4240 dmar_domain
->max_addr
= iova
;
4243 * We do not use page-selective IOTLB invalidation in flush queue,
4244 * so there is no need to track page and sync iotlb.
4246 if (!iommu_iotlb_gather_queued(gather
))
4247 iommu_iotlb_gather_add_page(domain
, gather
, iova
, size
);
4252 static size_t intel_iommu_unmap_pages(struct iommu_domain
*domain
,
4254 size_t pgsize
, size_t pgcount
,
4255 struct iommu_iotlb_gather
*gather
)
4257 unsigned long pgshift
= __ffs(pgsize
);
4258 size_t size
= pgcount
<< pgshift
;
4260 return intel_iommu_unmap(domain
, iova
, size
, gather
);
4263 static void intel_iommu_tlb_sync(struct iommu_domain
*domain
,
4264 struct iommu_iotlb_gather
*gather
)
4266 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4267 unsigned long iova_pfn
= IOVA_PFN(gather
->start
);
4268 size_t size
= gather
->end
- gather
->start
;
4269 struct iommu_domain_info
*info
;
4270 unsigned long start_pfn
;
4271 unsigned long nrpages
;
4274 nrpages
= aligned_nrpages(gather
->start
, size
);
4275 start_pfn
= mm_to_dma_pfn(iova_pfn
);
4277 xa_for_each(&dmar_domain
->iommu_array
, i
, info
)
4278 iommu_flush_iotlb_psi(info
->iommu
, dmar_domain
,
4280 list_empty(&gather
->freelist
), 0);
4282 put_pages_list(&gather
->freelist
);
4285 static phys_addr_t
intel_iommu_iova_to_phys(struct iommu_domain
*domain
,
4288 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4289 struct dma_pte
*pte
;
4293 pte
= pfn_to_dma_pte(dmar_domain
, iova
>> VTD_PAGE_SHIFT
, &level
,
4295 if (pte
&& dma_pte_present(pte
))
4296 phys
= dma_pte_addr(pte
) +
4297 (iova
& (BIT_MASK(level_to_offset_bits(level
) +
4298 VTD_PAGE_SHIFT
) - 1));
4303 static bool domain_support_force_snooping(struct dmar_domain
*domain
)
4305 struct device_domain_info
*info
;
4306 bool support
= true;
4308 assert_spin_locked(&domain
->lock
);
4309 list_for_each_entry(info
, &domain
->devices
, link
) {
4310 if (!ecap_sc_support(info
->iommu
->ecap
)) {
4319 static void domain_set_force_snooping(struct dmar_domain
*domain
)
4321 struct device_domain_info
*info
;
4323 assert_spin_locked(&domain
->lock
);
4325 * Second level page table supports per-PTE snoop control. The
4326 * iommu_map() interface will handle this by setting SNP bit.
4328 if (!domain
->use_first_level
) {
4329 domain
->set_pte_snp
= true;
4333 list_for_each_entry(info
, &domain
->devices
, link
)
4334 intel_pasid_setup_page_snoop_control(info
->iommu
, info
->dev
,
4338 static bool intel_iommu_enforce_cache_coherency(struct iommu_domain
*domain
)
4340 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4341 unsigned long flags
;
4343 if (dmar_domain
->force_snooping
)
4346 spin_lock_irqsave(&dmar_domain
->lock
, flags
);
4347 if (!domain_support_force_snooping(dmar_domain
)) {
4348 spin_unlock_irqrestore(&dmar_domain
->lock
, flags
);
4352 domain_set_force_snooping(dmar_domain
);
4353 dmar_domain
->force_snooping
= true;
4354 spin_unlock_irqrestore(&dmar_domain
->lock
, flags
);
4359 static bool intel_iommu_capable(struct device
*dev
, enum iommu_cap cap
)
4361 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4364 case IOMMU_CAP_CACHE_COHERENCY
:
4365 case IOMMU_CAP_DEFERRED_FLUSH
:
4367 case IOMMU_CAP_PRE_BOOT_PROTECTION
:
4368 return dmar_platform_optin();
4369 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY
:
4370 return ecap_sc_support(info
->iommu
->ecap
);
4376 static struct iommu_device
*intel_iommu_probe_device(struct device
*dev
)
4378 struct pci_dev
*pdev
= dev_is_pci(dev
) ? to_pci_dev(dev
) : NULL
;
4379 struct device_domain_info
*info
;
4380 struct intel_iommu
*iommu
;
4384 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
4385 if (!iommu
|| !iommu
->iommu
.ops
)
4386 return ERR_PTR(-ENODEV
);
4388 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
4390 return ERR_PTR(-ENOMEM
);
4392 if (dev_is_real_dma_subdevice(dev
)) {
4393 info
->bus
= pdev
->bus
->number
;
4394 info
->devfn
= pdev
->devfn
;
4395 info
->segment
= pci_domain_nr(pdev
->bus
);
4398 info
->devfn
= devfn
;
4399 info
->segment
= iommu
->segment
;
4403 info
->iommu
= iommu
;
4404 if (dev_is_pci(dev
)) {
4405 if (ecap_dev_iotlb_support(iommu
->ecap
) &&
4406 pci_ats_supported(pdev
) &&
4407 dmar_ats_supported(pdev
, iommu
)) {
4408 info
->ats_supported
= 1;
4409 info
->dtlb_extra_inval
= dev_needs_extra_dtlb_flush(pdev
);
4412 * For IOMMU that supports device IOTLB throttling
4413 * (DIT), we assign PFSID to the invalidation desc
4414 * of a VF such that IOMMU HW can gauge queue depth
4415 * at PF level. If DIT is not set, PFSID will be
4416 * treated as reserved, which should be set to 0.
4418 if (ecap_dit(iommu
->ecap
))
4419 info
->pfsid
= pci_dev_id(pci_physfn(pdev
));
4420 info
->ats_qdep
= pci_ats_queue_depth(pdev
);
4422 if (sm_supported(iommu
)) {
4423 if (pasid_supported(iommu
)) {
4424 int features
= pci_pasid_features(pdev
);
4427 info
->pasid_supported
= features
| 1;
4430 if (info
->ats_supported
&& ecap_prs(iommu
->ecap
) &&
4431 pci_pri_supported(pdev
))
4432 info
->pri_supported
= 1;
4436 dev_iommu_priv_set(dev
, info
);
4438 if (sm_supported(iommu
) && !dev_is_real_dma_subdevice(dev
)) {
4439 ret
= intel_pasid_alloc_table(dev
);
4441 dev_err(dev
, "PASID table allocation failed\n");
4442 dev_iommu_priv_set(dev
, NULL
);
4444 return ERR_PTR(ret
);
4448 return &iommu
->iommu
;
4451 static void intel_iommu_release_device(struct device
*dev
)
4453 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4455 dmar_remove_one_dev_info(dev
);
4456 intel_pasid_free_table(dev
);
4457 dev_iommu_priv_set(dev
, NULL
);
4459 set_dma_ops(dev
, NULL
);
4462 static void intel_iommu_probe_finalize(struct device
*dev
)
4464 set_dma_ops(dev
, NULL
);
4465 iommu_setup_dma_ops(dev
, 0, U64_MAX
);
4468 static void intel_iommu_get_resv_regions(struct device
*device
,
4469 struct list_head
*head
)
4471 int prot
= DMA_PTE_READ
| DMA_PTE_WRITE
;
4472 struct iommu_resv_region
*reg
;
4473 struct dmar_rmrr_unit
*rmrr
;
4474 struct device
*i_dev
;
4478 for_each_rmrr_units(rmrr
) {
4479 for_each_active_dev_scope(rmrr
->devices
, rmrr
->devices_cnt
,
4481 struct iommu_resv_region
*resv
;
4482 enum iommu_resv_type type
;
4485 if (i_dev
!= device
&&
4486 !is_downstream_to_pci_bridge(device
, i_dev
))
4489 length
= rmrr
->end_address
- rmrr
->base_address
+ 1;
4491 type
= device_rmrr_is_relaxable(device
) ?
4492 IOMMU_RESV_DIRECT_RELAXABLE
: IOMMU_RESV_DIRECT
;
4494 resv
= iommu_alloc_resv_region(rmrr
->base_address
,
4500 list_add_tail(&resv
->list
, head
);
4505 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
4506 if (dev_is_pci(device
)) {
4507 struct pci_dev
*pdev
= to_pci_dev(device
);
4509 if ((pdev
->class >> 8) == PCI_CLASS_BRIDGE_ISA
) {
4510 reg
= iommu_alloc_resv_region(0, 1UL << 24, prot
,
4511 IOMMU_RESV_DIRECT_RELAXABLE
,
4514 list_add_tail(®
->list
, head
);
4517 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
4519 reg
= iommu_alloc_resv_region(IOAPIC_RANGE_START
,
4520 IOAPIC_RANGE_END
- IOAPIC_RANGE_START
+ 1,
4521 0, IOMMU_RESV_MSI
, GFP_KERNEL
);
4524 list_add_tail(®
->list
, head
);
4527 static struct iommu_group
*intel_iommu_device_group(struct device
*dev
)
4529 if (dev_is_pci(dev
))
4530 return pci_device_group(dev
);
4531 return generic_device_group(dev
);
4534 static int intel_iommu_enable_sva(struct device
*dev
)
4536 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4537 struct intel_iommu
*iommu
;
4539 if (!info
|| dmar_disabled
)
4542 iommu
= info
->iommu
;
4546 if (!(iommu
->flags
& VTD_FLAG_SVM_CAPABLE
))
4549 if (!info
->pasid_enabled
|| !info
->ats_enabled
)
4553 * Devices having device-specific I/O fault handling should not
4554 * support PCI/PRI. The IOMMU side has no means to check the
4555 * capability of device-specific IOPF. Therefore, IOMMU can only
4556 * default that if the device driver enables SVA on a non-PRI
4557 * device, it will handle IOPF in its own way.
4559 if (!info
->pri_supported
)
4562 /* Devices supporting PRI should have it enabled. */
4563 if (!info
->pri_enabled
)
4569 static int intel_iommu_enable_iopf(struct device
*dev
)
4571 struct pci_dev
*pdev
= dev_is_pci(dev
) ? to_pci_dev(dev
) : NULL
;
4572 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4573 struct intel_iommu
*iommu
;
4576 if (!pdev
|| !info
|| !info
->ats_enabled
|| !info
->pri_supported
)
4579 if (info
->pri_enabled
)
4582 iommu
= info
->iommu
;
4586 /* PASID is required in PRG Response Message. */
4587 if (info
->pasid_enabled
&& !pci_prg_resp_pasid_required(pdev
))
4590 ret
= pci_reset_pri(pdev
);
4594 ret
= iopf_queue_add_device(iommu
->iopf_queue
, dev
);
4598 ret
= iommu_register_device_fault_handler(dev
, iommu_queue_iopf
, dev
);
4600 goto iopf_remove_device
;
4602 ret
= pci_enable_pri(pdev
, PRQ_DEPTH
);
4604 goto iopf_unregister_handler
;
4605 info
->pri_enabled
= 1;
4609 iopf_unregister_handler
:
4610 iommu_unregister_device_fault_handler(dev
);
4612 iopf_queue_remove_device(iommu
->iopf_queue
, dev
);
4617 static int intel_iommu_disable_iopf(struct device
*dev
)
4619 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4620 struct intel_iommu
*iommu
= info
->iommu
;
4622 if (!info
->pri_enabled
)
4626 * PCIe spec states that by clearing PRI enable bit, the Page
4627 * Request Interface will not issue new page requests, but has
4628 * outstanding page requests that have been transmitted or are
4629 * queued for transmission. This is supposed to be called after
4630 * the device driver has stopped DMA, all PASIDs have been
4631 * unbound and the outstanding PRQs have been drained.
4633 pci_disable_pri(to_pci_dev(dev
));
4634 info
->pri_enabled
= 0;
4637 * With PRI disabled and outstanding PRQs drained, unregistering
4638 * fault handler and removing device from iopf queue should never
4641 WARN_ON(iommu_unregister_device_fault_handler(dev
));
4642 WARN_ON(iopf_queue_remove_device(iommu
->iopf_queue
, dev
));
4648 intel_iommu_dev_enable_feat(struct device
*dev
, enum iommu_dev_features feat
)
4651 case IOMMU_DEV_FEAT_IOPF
:
4652 return intel_iommu_enable_iopf(dev
);
4654 case IOMMU_DEV_FEAT_SVA
:
4655 return intel_iommu_enable_sva(dev
);
4663 intel_iommu_dev_disable_feat(struct device
*dev
, enum iommu_dev_features feat
)
4666 case IOMMU_DEV_FEAT_IOPF
:
4667 return intel_iommu_disable_iopf(dev
);
4669 case IOMMU_DEV_FEAT_SVA
:
4677 static bool intel_iommu_is_attach_deferred(struct device
*dev
)
4679 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4681 return translation_pre_enabled(info
->iommu
) && !info
->domain
;
4685 * Check that the device does not live on an external facing PCI port that is
4686 * marked as untrusted. Such devices should not be able to apply quirks and
4687 * thus not be able to bypass the IOMMU restrictions.
4689 static bool risky_device(struct pci_dev
*pdev
)
4691 if (pdev
->untrusted
) {
4693 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
4694 pdev
->vendor
, pdev
->device
);
4695 pci_info(pdev
, "Please check with your BIOS/Platform vendor about this\n");
4701 static void intel_iommu_iotlb_sync_map(struct iommu_domain
*domain
,
4702 unsigned long iova
, size_t size
)
4704 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
4705 unsigned long pages
= aligned_nrpages(iova
, size
);
4706 unsigned long pfn
= iova
>> VTD_PAGE_SHIFT
;
4707 struct iommu_domain_info
*info
;
4710 xa_for_each(&dmar_domain
->iommu_array
, i
, info
)
4711 __mapping_notify_one(info
->iommu
, dmar_domain
, pfn
, pages
);
4714 static void intel_iommu_remove_dev_pasid(struct device
*dev
, ioasid_t pasid
)
4716 struct intel_iommu
*iommu
= device_to_iommu(dev
, NULL
, NULL
);
4717 struct iommu_domain
*domain
;
4719 /* Domain type specific cleanup: */
4720 domain
= iommu_get_domain_for_dev_pasid(dev
, pasid
, 0);
4722 switch (domain
->type
) {
4723 case IOMMU_DOMAIN_SVA
:
4724 intel_svm_remove_dev_pasid(dev
, pasid
);
4727 /* should never reach here */
4733 intel_pasid_tear_down_entry(iommu
, dev
, pasid
, false);
4736 static void *intel_iommu_hw_info(struct device
*dev
, u32
*length
, u32
*type
)
4738 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
4739 struct intel_iommu
*iommu
= info
->iommu
;
4740 struct iommu_hw_info_vtd
*vtd
;
4742 vtd
= kzalloc(sizeof(*vtd
), GFP_KERNEL
);
4744 return ERR_PTR(-ENOMEM
);
4746 vtd
->cap_reg
= iommu
->cap
;
4747 vtd
->ecap_reg
= iommu
->ecap
;
4748 *length
= sizeof(*vtd
);
4749 *type
= IOMMU_HW_INFO_TYPE_INTEL_VTD
;
4753 const struct iommu_ops intel_iommu_ops
= {
4754 .capable
= intel_iommu_capable
,
4755 .hw_info
= intel_iommu_hw_info
,
4756 .domain_alloc
= intel_iommu_domain_alloc
,
4757 .probe_device
= intel_iommu_probe_device
,
4758 .probe_finalize
= intel_iommu_probe_finalize
,
4759 .release_device
= intel_iommu_release_device
,
4760 .get_resv_regions
= intel_iommu_get_resv_regions
,
4761 .device_group
= intel_iommu_device_group
,
4762 .dev_enable_feat
= intel_iommu_dev_enable_feat
,
4763 .dev_disable_feat
= intel_iommu_dev_disable_feat
,
4764 .is_attach_deferred
= intel_iommu_is_attach_deferred
,
4765 .def_domain_type
= device_def_domain_type
,
4766 .remove_dev_pasid
= intel_iommu_remove_dev_pasid
,
4767 .pgsize_bitmap
= SZ_4K
,
4768 #ifdef CONFIG_INTEL_IOMMU_SVM
4769 .page_response
= intel_svm_page_response
,
4771 .default_domain_ops
= &(const struct iommu_domain_ops
) {
4772 .attach_dev
= intel_iommu_attach_device
,
4773 .map_pages
= intel_iommu_map_pages
,
4774 .unmap_pages
= intel_iommu_unmap_pages
,
4775 .iotlb_sync_map
= intel_iommu_iotlb_sync_map
,
4776 .flush_iotlb_all
= intel_flush_iotlb_all
,
4777 .iotlb_sync
= intel_iommu_tlb_sync
,
4778 .iova_to_phys
= intel_iommu_iova_to_phys
,
4779 .free
= intel_iommu_domain_free
,
4780 .enforce_cache_coherency
= intel_iommu_enforce_cache_coherency
,
4784 static void quirk_iommu_igfx(struct pci_dev
*dev
)
4786 if (risky_device(dev
))
4789 pci_info(dev
, "Disabling IOMMU for graphics on this chipset\n");
4793 /* G4x/GM45 integrated gfx dmar support is totally busted. */
4794 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_igfx
);
4795 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_igfx
);
4796 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_igfx
);
4797 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_igfx
);
4798 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_igfx
);
4799 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_igfx
);
4800 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_igfx
);
4802 /* Broadwell igfx malfunctions with dmar */
4803 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1606, quirk_iommu_igfx
);
4804 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x160B, quirk_iommu_igfx
);
4805 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x160E, quirk_iommu_igfx
);
4806 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1602, quirk_iommu_igfx
);
4807 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x160A, quirk_iommu_igfx
);
4808 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x160D, quirk_iommu_igfx
);
4809 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1616, quirk_iommu_igfx
);
4810 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x161B, quirk_iommu_igfx
);
4811 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x161E, quirk_iommu_igfx
);
4812 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1612, quirk_iommu_igfx
);
4813 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x161A, quirk_iommu_igfx
);
4814 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x161D, quirk_iommu_igfx
);
4815 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1626, quirk_iommu_igfx
);
4816 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x162B, quirk_iommu_igfx
);
4817 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x162E, quirk_iommu_igfx
);
4818 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1622, quirk_iommu_igfx
);
4819 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x162A, quirk_iommu_igfx
);
4820 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x162D, quirk_iommu_igfx
);
4821 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1636, quirk_iommu_igfx
);
4822 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x163B, quirk_iommu_igfx
);
4823 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x163E, quirk_iommu_igfx
);
4824 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x1632, quirk_iommu_igfx
);
4825 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x163A, quirk_iommu_igfx
);
4826 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x163D, quirk_iommu_igfx
);
4828 static void quirk_iommu_rwbf(struct pci_dev
*dev
)
4830 if (risky_device(dev
))
4834 * Mobile 4 Series Chipset neglects to set RWBF capability,
4835 * but needs it. Same seems to hold for the desktop versions.
4837 pci_info(dev
, "Forcing write-buffer flush capability\n");
4841 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2a40, quirk_iommu_rwbf
);
4842 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e00, quirk_iommu_rwbf
);
4843 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e10, quirk_iommu_rwbf
);
4844 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e20, quirk_iommu_rwbf
);
4845 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e30, quirk_iommu_rwbf
);
4846 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e40, quirk_iommu_rwbf
);
4847 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x2e90, quirk_iommu_rwbf
);
4850 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4851 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4852 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4853 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4854 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4855 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4856 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4857 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4859 static void quirk_calpella_no_shadow_gtt(struct pci_dev
*dev
)
4863 if (risky_device(dev
))
4866 if (pci_read_config_word(dev
, GGC
, &ggc
))
4869 if (!(ggc
& GGC_MEMORY_VT_ENABLED
)) {
4870 pci_info(dev
, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4872 } else if (dmar_map_gfx
) {
4873 /* we have to ensure the gfx device is idle before we flush */
4874 pci_info(dev
, "Disabling batched IOTLB flush on Ironlake\n");
4875 iommu_set_dma_strict();
4878 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0040, quirk_calpella_no_shadow_gtt
);
4879 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0044, quirk_calpella_no_shadow_gtt
);
4880 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x0062, quirk_calpella_no_shadow_gtt
);
4881 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, 0x006a, quirk_calpella_no_shadow_gtt
);
4883 static void quirk_igfx_skip_te_disable(struct pci_dev
*dev
)
4887 if (!IS_GFX_DEVICE(dev
))
4890 ver
= (dev
->device
>> 8) & 0xff;
4891 if (ver
!= 0x45 && ver
!= 0x46 && ver
!= 0x4c &&
4892 ver
!= 0x4e && ver
!= 0x8a && ver
!= 0x98 &&
4893 ver
!= 0x9a && ver
!= 0xa7)
4896 if (risky_device(dev
))
4899 pci_info(dev
, "Skip IOMMU disabling for graphics\n");
4900 iommu_skip_te_disable
= 1;
4902 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL
, PCI_ANY_ID
, quirk_igfx_skip_te_disable
);
4904 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4905 ISOCH DMAR unit for the Azalia sound device, but not give it any
4906 TLB entries, which causes it to deadlock. Check for that. We do
4907 this in a function called from init_dmars(), instead of in a PCI
4908 quirk, because we don't want to print the obnoxious "BIOS broken"
4909 message if VT-d is actually disabled.
4911 static void __init
check_tylersburg_isoch(void)
4913 struct pci_dev
*pdev
;
4914 uint32_t vtisochctrl
;
4916 /* If there's no Azalia in the system anyway, forget it. */
4917 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x3a3e, NULL
);
4921 if (risky_device(pdev
)) {
4928 /* System Management Registers. Might be hidden, in which case
4929 we can't do the sanity check. But that's OK, because the
4930 known-broken BIOSes _don't_ actually hide it, so far. */
4931 pdev
= pci_get_device(PCI_VENDOR_ID_INTEL
, 0x342e, NULL
);
4935 if (risky_device(pdev
)) {
4940 if (pci_read_config_dword(pdev
, 0x188, &vtisochctrl
)) {
4947 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4948 if (vtisochctrl
& 1)
4951 /* Drop all bits other than the number of TLB entries */
4952 vtisochctrl
&= 0x1c;
4954 /* If we have the recommended number of TLB entries (16), fine. */
4955 if (vtisochctrl
== 0x10)
4958 /* Zero TLB entries? You get to ride the short bus to school. */
4960 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4961 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4962 dmi_get_system_info(DMI_BIOS_VENDOR
),
4963 dmi_get_system_info(DMI_BIOS_VERSION
),
4964 dmi_get_system_info(DMI_PRODUCT_VERSION
));
4965 iommu_identity_mapping
|= IDENTMAP_AZALIA
;
4969 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4974 * Here we deal with a device TLB defect where device may inadvertently issue ATS
4975 * invalidation completion before posted writes initiated with translated address
4976 * that utilized translations matching the invalidation address range, violating
4977 * the invalidation completion ordering.
4978 * Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
4979 * vulnerable to this defect. In other words, any dTLB invalidation initiated not
4980 * under the control of the trusted/privileged host device driver must use this
4982 * Device TLBs are invalidated under the following six conditions:
4983 * 1. Device driver does DMA API unmap IOVA
4984 * 2. Device driver unbind a PASID from a process, sva_unbind_device()
4985 * 3. PASID is torn down, after PASID cache is flushed. e.g. process
4986 * exit_mmap() due to crash
4987 * 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
4988 * VM has to free pages that were unmapped
4989 * 5. Userspace driver unmaps a DMA buffer
4990 * 6. Cache invalidation in vSVA usage (upcoming)
4992 * For #1 and #2, device drivers are responsible for stopping DMA traffic
4993 * before unmap/unbind. For #3, iommu driver gets mmu_notifier to
4994 * invalidate TLB the same way as normal user unmap which will use this quirk.
4995 * The dTLB invalidation after PASID cache flush does not need this quirk.
4997 * As a reminder, #6 will *NEED* this quirk as we enable nested translation.
4999 void quirk_extra_dev_tlb_flush(struct device_domain_info
*info
,
5000 unsigned long address
, unsigned long mask
,
5001 u32 pasid
, u16 qdep
)
5005 if (likely(!info
->dtlb_extra_inval
))
5008 sid
= PCI_DEVID(info
->bus
, info
->devfn
);
5009 if (pasid
== PASID_RID2PASID
) {
5010 qi_flush_dev_iotlb(info
->iommu
, sid
, info
->pfsid
,
5011 qdep
, address
, mask
);
5013 qi_flush_dev_iotlb_pasid(info
->iommu
, sid
, info
->pfsid
,
5014 pasid
, qdep
, address
, mask
);
5018 #define ecmd_get_status_code(res) (((res) & 0xff) >> 1)
5021 * Function to submit a command to the enhanced command interface. The
5022 * valid enhanced command descriptions are defined in Table 47 of the
5023 * VT-d spec. The VT-d hardware implementation may support some but not
5024 * all commands, which can be determined by checking the Enhanced
5025 * Command Capability Register.
5028 * - 0: Command successful without any error;
5029 * - Negative: software error value;
5030 * - Nonzero positive: failure status code defined in Table 48.
5032 int ecmd_submit_sync(struct intel_iommu
*iommu
, u8 ecmd
, u64 oa
, u64 ob
)
5034 unsigned long flags
;
5038 if (!cap_ecmds(iommu
->cap
))
5041 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
5043 res
= dmar_readq(iommu
->reg
+ DMAR_ECRSP_REG
);
5044 if (res
& DMA_ECMD_ECRSP_IP
) {
5050 * Unconditionally write the operand B, because
5051 * - There is no side effect if an ecmd doesn't require an
5052 * operand B, but we set the register to some value.
5053 * - It's not invoked in any critical path. The extra MMIO
5054 * write doesn't bring any performance concerns.
5056 dmar_writeq(iommu
->reg
+ DMAR_ECEO_REG
, ob
);
5057 dmar_writeq(iommu
->reg
+ DMAR_ECMD_REG
, ecmd
| (oa
<< DMA_ECMD_OA_SHIFT
));
5059 IOMMU_WAIT_OP(iommu
, DMAR_ECRSP_REG
, dmar_readq
,
5060 !(res
& DMA_ECMD_ECRSP_IP
), res
);
5062 if (res
& DMA_ECMD_ECRSP_IP
) {
5067 ret
= ecmd_get_status_code(res
);
5069 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);