1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
5 * Leo Duran <leo.duran@amd.com>
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
11 #include <linux/ratelimit.h>
12 #include <linux/pci.h>
13 #include <linux/acpi.h>
14 #include <linux/pci-ats.h>
15 #include <linux/bitmap.h>
16 #include <linux/slab.h>
17 #include <linux/debugfs.h>
18 #include <linux/scatterlist.h>
19 #include <linux/dma-map-ops.h>
20 #include <linux/dma-direct.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/delay.h>
23 #include <linux/amd-iommu.h>
24 #include <linux/notifier.h>
25 #include <linux/export.h>
26 #include <linux/irq.h>
27 #include <linux/msi.h>
28 #include <linux/irqdomain.h>
29 #include <linux/percpu.h>
30 #include <linux/io-pgtable.h>
31 #include <linux/cc_platform.h>
32 #include <asm/irq_remapping.h>
33 #include <asm/io_apic.h>
35 #include <asm/hw_irq.h>
36 #include <asm/proto.h>
37 #include <asm/iommu.h>
40 #include <uapi/linux/iommufd.h>
42 #include "amd_iommu.h"
43 #include "../dma-iommu.h"
44 #include "../irq_remapping.h"
46 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
48 /* IO virtual address start page frame number */
49 #define IOVA_START_PFN (1)
50 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
52 /* Reserved IOVA ranges */
53 #define MSI_RANGE_START (0xfee00000)
54 #define MSI_RANGE_END (0xfeefffff)
55 #define HT_RANGE_START (0xfd00000000ULL)
56 #define HT_RANGE_END (0xffffffffffULL)
58 #define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
60 static DEFINE_SPINLOCK(pd_bitmap_lock
);
62 LIST_HEAD(ioapic_map
);
64 LIST_HEAD(acpihid_map
);
66 const struct iommu_ops amd_iommu_ops
;
67 const struct iommu_dirty_ops amd_dirty_ops
;
69 int amd_iommu_max_glx_val
= -1;
72 * general struct to manage commands send to an IOMMU
78 struct kmem_cache
*amd_iommu_irq_cache
;
80 static void detach_device(struct device
*dev
);
82 /****************************************************************************
86 ****************************************************************************/
88 static inline int get_acpihid_device_id(struct device
*dev
,
89 struct acpihid_map_entry
**entry
)
91 struct acpi_device
*adev
= ACPI_COMPANION(dev
);
92 struct acpihid_map_entry
*p
;
97 list_for_each_entry(p
, &acpihid_map
, list
) {
98 if (acpi_dev_hid_uid_match(adev
, p
->hid
,
99 p
->uid
[0] ? p
->uid
: NULL
)) {
108 static inline int get_device_sbdf_id(struct device
*dev
)
113 sbdf
= get_pci_sbdf_id(to_pci_dev(dev
));
115 sbdf
= get_acpihid_device_id(dev
, NULL
);
120 struct dev_table_entry
*get_dev_table(struct amd_iommu
*iommu
)
122 struct dev_table_entry
*dev_table
;
123 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
125 BUG_ON(pci_seg
== NULL
);
126 dev_table
= pci_seg
->dev_table
;
127 BUG_ON(dev_table
== NULL
);
132 static inline u16
get_device_segment(struct device
*dev
)
136 if (dev_is_pci(dev
)) {
137 struct pci_dev
*pdev
= to_pci_dev(dev
);
139 seg
= pci_domain_nr(pdev
->bus
);
141 u32 devid
= get_acpihid_device_id(dev
, NULL
);
143 seg
= PCI_SBDF_TO_SEGID(devid
);
149 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
150 void amd_iommu_set_rlookup_table(struct amd_iommu
*iommu
, u16 devid
)
152 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
154 pci_seg
->rlookup_table
[devid
] = iommu
;
157 static struct amd_iommu
*__rlookup_amd_iommu(u16 seg
, u16 devid
)
159 struct amd_iommu_pci_seg
*pci_seg
;
161 for_each_pci_segment(pci_seg
) {
162 if (pci_seg
->id
== seg
)
163 return pci_seg
->rlookup_table
[devid
];
168 static struct amd_iommu
*rlookup_amd_iommu(struct device
*dev
)
170 u16 seg
= get_device_segment(dev
);
171 int devid
= get_device_sbdf_id(dev
);
175 return __rlookup_amd_iommu(seg
, PCI_SBDF_TO_DEVID(devid
));
178 static struct protection_domain
*to_pdomain(struct iommu_domain
*dom
)
180 return container_of(dom
, struct protection_domain
, domain
);
183 static struct iommu_dev_data
*alloc_dev_data(struct amd_iommu
*iommu
, u16 devid
)
185 struct iommu_dev_data
*dev_data
;
186 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
188 dev_data
= kzalloc(sizeof(*dev_data
), GFP_KERNEL
);
192 spin_lock_init(&dev_data
->lock
);
193 dev_data
->devid
= devid
;
194 ratelimit_default_init(&dev_data
->rs
);
196 llist_add(&dev_data
->dev_data_list
, &pci_seg
->dev_data_list
);
200 static struct iommu_dev_data
*search_dev_data(struct amd_iommu
*iommu
, u16 devid
)
202 struct iommu_dev_data
*dev_data
;
203 struct llist_node
*node
;
204 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
206 if (llist_empty(&pci_seg
->dev_data_list
))
209 node
= pci_seg
->dev_data_list
.first
;
210 llist_for_each_entry(dev_data
, node
, dev_data_list
) {
211 if (dev_data
->devid
== devid
)
218 static int clone_alias(struct pci_dev
*pdev
, u16 alias
, void *data
)
220 struct amd_iommu
*iommu
;
221 struct dev_table_entry
*dev_table
;
222 u16 devid
= pci_dev_id(pdev
);
227 iommu
= rlookup_amd_iommu(&pdev
->dev
);
231 amd_iommu_set_rlookup_table(iommu
, alias
);
232 dev_table
= get_dev_table(iommu
);
233 memcpy(dev_table
[alias
].data
,
234 dev_table
[devid
].data
,
235 sizeof(dev_table
[alias
].data
));
240 static void clone_aliases(struct amd_iommu
*iommu
, struct device
*dev
)
242 struct pci_dev
*pdev
;
244 if (!dev_is_pci(dev
))
246 pdev
= to_pci_dev(dev
);
249 * The IVRS alias stored in the alias table may not be
250 * part of the PCI DMA aliases if it's bus differs
251 * from the original device.
253 clone_alias(pdev
, iommu
->pci_seg
->alias_table
[pci_dev_id(pdev
)], NULL
);
255 pci_for_each_dma_alias(pdev
, clone_alias
, NULL
);
258 static void setup_aliases(struct amd_iommu
*iommu
, struct device
*dev
)
260 struct pci_dev
*pdev
= to_pci_dev(dev
);
261 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
264 /* For ACPI HID devices, there are no aliases */
265 if (!dev_is_pci(dev
))
269 * Add the IVRS alias to the pci aliases if it is on the same
270 * bus. The IVRS table may know about a quirk that we don't.
272 ivrs_alias
= pci_seg
->alias_table
[pci_dev_id(pdev
)];
273 if (ivrs_alias
!= pci_dev_id(pdev
) &&
274 PCI_BUS_NUM(ivrs_alias
) == pdev
->bus
->number
)
275 pci_add_dma_alias(pdev
, ivrs_alias
& 0xff, 1);
277 clone_aliases(iommu
, dev
);
280 static struct iommu_dev_data
*find_dev_data(struct amd_iommu
*iommu
, u16 devid
)
282 struct iommu_dev_data
*dev_data
;
284 dev_data
= search_dev_data(iommu
, devid
);
286 if (dev_data
== NULL
) {
287 dev_data
= alloc_dev_data(iommu
, devid
);
291 if (translation_pre_enabled(iommu
))
292 dev_data
->defer_attach
= true;
299 * Find or create an IOMMU group for a acpihid device.
301 static struct iommu_group
*acpihid_device_group(struct device
*dev
)
303 struct acpihid_map_entry
*p
, *entry
= NULL
;
306 devid
= get_acpihid_device_id(dev
, &entry
);
308 return ERR_PTR(devid
);
310 list_for_each_entry(p
, &acpihid_map
, list
) {
311 if ((devid
== p
->devid
) && p
->group
)
312 entry
->group
= p
->group
;
316 entry
->group
= generic_device_group(dev
);
318 iommu_group_ref_get(entry
->group
);
323 static inline bool pdev_pasid_supported(struct iommu_dev_data
*dev_data
)
325 return (dev_data
->flags
& AMD_IOMMU_DEVICE_FLAG_PASID_SUP
);
328 static u32
pdev_get_caps(struct pci_dev
*pdev
)
333 if (pci_ats_supported(pdev
))
334 flags
|= AMD_IOMMU_DEVICE_FLAG_ATS_SUP
;
336 if (pci_pri_supported(pdev
))
337 flags
|= AMD_IOMMU_DEVICE_FLAG_PRI_SUP
;
339 features
= pci_pasid_features(pdev
);
341 flags
|= AMD_IOMMU_DEVICE_FLAG_PASID_SUP
;
343 if (features
& PCI_PASID_CAP_EXEC
)
344 flags
|= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP
;
346 if (features
& PCI_PASID_CAP_PRIV
)
347 flags
|= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP
;
353 static inline int pdev_enable_cap_ats(struct pci_dev
*pdev
)
355 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(&pdev
->dev
);
358 if (dev_data
->ats_enabled
)
361 if (amd_iommu_iotlb_sup
&&
362 (dev_data
->flags
& AMD_IOMMU_DEVICE_FLAG_ATS_SUP
)) {
363 ret
= pci_enable_ats(pdev
, PAGE_SHIFT
);
365 dev_data
->ats_enabled
= 1;
366 dev_data
->ats_qdep
= pci_ats_queue_depth(pdev
);
373 static inline void pdev_disable_cap_ats(struct pci_dev
*pdev
)
375 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(&pdev
->dev
);
377 if (dev_data
->ats_enabled
) {
378 pci_disable_ats(pdev
);
379 dev_data
->ats_enabled
= 0;
383 int amd_iommu_pdev_enable_cap_pri(struct pci_dev
*pdev
)
385 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(&pdev
->dev
);
388 if (dev_data
->pri_enabled
)
391 if (dev_data
->flags
& AMD_IOMMU_DEVICE_FLAG_PRI_SUP
) {
393 * First reset the PRI state of the device.
394 * FIXME: Hardcode number of outstanding requests for now
396 if (!pci_reset_pri(pdev
) && !pci_enable_pri(pdev
, 32)) {
397 dev_data
->pri_enabled
= 1;
398 dev_data
->pri_tlp
= pci_prg_resp_pasid_required(pdev
);
407 void amd_iommu_pdev_disable_cap_pri(struct pci_dev
*pdev
)
409 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(&pdev
->dev
);
411 if (dev_data
->pri_enabled
) {
412 pci_disable_pri(pdev
);
413 dev_data
->pri_enabled
= 0;
417 static inline int pdev_enable_cap_pasid(struct pci_dev
*pdev
)
419 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(&pdev
->dev
);
422 if (dev_data
->pasid_enabled
)
425 if (dev_data
->flags
& AMD_IOMMU_DEVICE_FLAG_PASID_SUP
) {
426 /* Only allow access to user-accessible pages */
427 ret
= pci_enable_pasid(pdev
, 0);
429 dev_data
->pasid_enabled
= 1;
435 static inline void pdev_disable_cap_pasid(struct pci_dev
*pdev
)
437 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(&pdev
->dev
);
439 if (dev_data
->pasid_enabled
) {
440 pci_disable_pasid(pdev
);
441 dev_data
->pasid_enabled
= 0;
445 static void pdev_enable_caps(struct pci_dev
*pdev
)
447 pdev_enable_cap_ats(pdev
);
448 pdev_enable_cap_pasid(pdev
);
449 amd_iommu_pdev_enable_cap_pri(pdev
);
453 static void pdev_disable_caps(struct pci_dev
*pdev
)
455 pdev_disable_cap_ats(pdev
);
456 pdev_disable_cap_pasid(pdev
);
457 amd_iommu_pdev_disable_cap_pri(pdev
);
461 * This function checks if the driver got a valid device from the caller to
462 * avoid dereferencing invalid pointers.
464 static bool check_device(struct device
*dev
)
466 struct amd_iommu_pci_seg
*pci_seg
;
467 struct amd_iommu
*iommu
;
473 sbdf
= get_device_sbdf_id(dev
);
476 devid
= PCI_SBDF_TO_DEVID(sbdf
);
478 iommu
= rlookup_amd_iommu(dev
);
482 /* Out of our scope? */
483 pci_seg
= iommu
->pci_seg
;
484 if (devid
> pci_seg
->last_bdf
)
490 static int iommu_init_device(struct amd_iommu
*iommu
, struct device
*dev
)
492 struct iommu_dev_data
*dev_data
;
495 if (dev_iommu_priv_get(dev
))
498 sbdf
= get_device_sbdf_id(dev
);
502 devid
= PCI_SBDF_TO_DEVID(sbdf
);
503 dev_data
= find_dev_data(iommu
, devid
);
508 setup_aliases(iommu
, dev
);
511 * By default we use passthrough mode for IOMMUv2 capable device.
512 * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
513 * invalid address), we ignore the capability for the device so
514 * it'll be forced to go into translation mode.
516 if ((iommu_default_passthrough() || !amd_iommu_force_isolation
) &&
517 dev_is_pci(dev
) && amd_iommu_gt_ppr_supported()) {
518 dev_data
->flags
= pdev_get_caps(to_pci_dev(dev
));
521 dev_iommu_priv_set(dev
, dev_data
);
526 static void iommu_ignore_device(struct amd_iommu
*iommu
, struct device
*dev
)
528 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
529 struct dev_table_entry
*dev_table
= get_dev_table(iommu
);
532 sbdf
= get_device_sbdf_id(dev
);
536 devid
= PCI_SBDF_TO_DEVID(sbdf
);
537 pci_seg
->rlookup_table
[devid
] = NULL
;
538 memset(&dev_table
[devid
], 0, sizeof(struct dev_table_entry
));
540 setup_aliases(iommu
, dev
);
543 static void amd_iommu_uninit_device(struct device
*dev
)
545 struct iommu_dev_data
*dev_data
;
547 dev_data
= dev_iommu_priv_get(dev
);
551 if (dev_data
->domain
)
554 dev_iommu_priv_set(dev
, NULL
);
557 * We keep dev_data around for unplugged devices and reuse it when the
558 * device is re-plugged - not doing so would introduce a ton of races.
562 /****************************************************************************
564 * Interrupt handling functions
566 ****************************************************************************/
568 static void dump_dte_entry(struct amd_iommu
*iommu
, u16 devid
)
571 struct dev_table_entry
*dev_table
= get_dev_table(iommu
);
573 for (i
= 0; i
< 4; ++i
)
574 pr_err("DTE[%d]: %016llx\n", i
, dev_table
[devid
].data
[i
]);
577 static void dump_command(unsigned long phys_addr
)
579 struct iommu_cmd
*cmd
= iommu_phys_to_virt(phys_addr
);
582 for (i
= 0; i
< 4; ++i
)
583 pr_err("CMD[%d]: %08x\n", i
, cmd
->data
[i
]);
586 static void amd_iommu_report_rmp_hw_error(struct amd_iommu
*iommu
, volatile u32
*event
)
588 struct iommu_dev_data
*dev_data
= NULL
;
589 int devid
, vmg_tag
, flags
;
590 struct pci_dev
*pdev
;
593 devid
= (event
[0] >> EVENT_DEVID_SHIFT
) & EVENT_DEVID_MASK
;
594 vmg_tag
= (event
[1]) & 0xFFFF;
595 flags
= (event
[1] >> EVENT_FLAGS_SHIFT
) & EVENT_FLAGS_MASK
;
596 spa
= ((u64
)event
[3] << 32) | (event
[2] & 0xFFFFFFF8);
598 pdev
= pci_get_domain_bus_and_slot(iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
),
601 dev_data
= dev_iommu_priv_get(&pdev
->dev
);
604 if (__ratelimit(&dev_data
->rs
)) {
605 pci_err(pdev
, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
606 vmg_tag
, spa
, flags
);
609 pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
610 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
611 vmg_tag
, spa
, flags
);
618 static void amd_iommu_report_rmp_fault(struct amd_iommu
*iommu
, volatile u32
*event
)
620 struct iommu_dev_data
*dev_data
= NULL
;
621 int devid
, flags_rmp
, vmg_tag
, flags
;
622 struct pci_dev
*pdev
;
625 devid
= (event
[0] >> EVENT_DEVID_SHIFT
) & EVENT_DEVID_MASK
;
626 flags_rmp
= (event
[0] >> EVENT_FLAGS_SHIFT
) & 0xFF;
627 vmg_tag
= (event
[1]) & 0xFFFF;
628 flags
= (event
[1] >> EVENT_FLAGS_SHIFT
) & EVENT_FLAGS_MASK
;
629 gpa
= ((u64
)event
[3] << 32) | event
[2];
631 pdev
= pci_get_domain_bus_and_slot(iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
),
634 dev_data
= dev_iommu_priv_get(&pdev
->dev
);
637 if (__ratelimit(&dev_data
->rs
)) {
638 pci_err(pdev
, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
639 vmg_tag
, gpa
, flags_rmp
, flags
);
642 pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%04x:%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
643 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
644 vmg_tag
, gpa
, flags_rmp
, flags
);
651 #define IS_IOMMU_MEM_TRANSACTION(flags) \
652 (((flags) & EVENT_FLAG_I) == 0)
654 #define IS_WRITE_REQUEST(flags) \
655 ((flags) & EVENT_FLAG_RW)
657 static void amd_iommu_report_page_fault(struct amd_iommu
*iommu
,
658 u16 devid
, u16 domain_id
,
659 u64 address
, int flags
)
661 struct iommu_dev_data
*dev_data
= NULL
;
662 struct pci_dev
*pdev
;
664 pdev
= pci_get_domain_bus_and_slot(iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
),
667 dev_data
= dev_iommu_priv_get(&pdev
->dev
);
671 * If this is a DMA fault (for which the I(nterrupt)
672 * bit will be unset), allow report_iommu_fault() to
673 * prevent logging it.
675 if (IS_IOMMU_MEM_TRANSACTION(flags
)) {
676 /* Device not attached to domain properly */
677 if (dev_data
->domain
== NULL
) {
678 pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
679 pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
680 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
),
681 PCI_FUNC(devid
), domain_id
);
685 if (!report_iommu_fault(&dev_data
->domain
->domain
,
687 IS_WRITE_REQUEST(flags
) ?
693 if (__ratelimit(&dev_data
->rs
)) {
694 pci_err(pdev
, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
695 domain_id
, address
, flags
);
698 pr_err_ratelimited("Event logged [IO_PAGE_FAULT device=%04x:%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
699 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
700 domain_id
, address
, flags
);
708 static void iommu_print_event(struct amd_iommu
*iommu
, void *__evt
)
710 struct device
*dev
= iommu
->iommu
.dev
;
711 int type
, devid
, flags
, tag
;
712 volatile u32
*event
= __evt
;
718 type
= (event
[1] >> EVENT_TYPE_SHIFT
) & EVENT_TYPE_MASK
;
719 devid
= (event
[0] >> EVENT_DEVID_SHIFT
) & EVENT_DEVID_MASK
;
720 pasid
= (event
[0] & EVENT_DOMID_MASK_HI
) |
721 (event
[1] & EVENT_DOMID_MASK_LO
);
722 flags
= (event
[1] >> EVENT_FLAGS_SHIFT
) & EVENT_FLAGS_MASK
;
723 address
= (u64
)(((u64
)event
[3]) << 32) | event
[2];
726 /* Did we hit the erratum? */
727 if (++count
== LOOP_TIMEOUT
) {
728 pr_err("No event written to event log\n");
735 if (type
== EVENT_TYPE_IO_FAULT
) {
736 amd_iommu_report_page_fault(iommu
, devid
, pasid
, address
, flags
);
741 case EVENT_TYPE_ILL_DEV
:
742 dev_err(dev
, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
743 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
744 pasid
, address
, flags
);
745 dump_dte_entry(iommu
, devid
);
747 case EVENT_TYPE_DEV_TAB_ERR
:
748 dev_err(dev
, "Event logged [DEV_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x "
749 "address=0x%llx flags=0x%04x]\n",
750 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
753 case EVENT_TYPE_PAGE_TAB_ERR
:
754 dev_err(dev
, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%04x:%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
755 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
756 pasid
, address
, flags
);
758 case EVENT_TYPE_ILL_CMD
:
759 dev_err(dev
, "Event logged [ILLEGAL_COMMAND_ERROR address=0x%llx]\n", address
);
760 dump_command(address
);
762 case EVENT_TYPE_CMD_HARD_ERR
:
763 dev_err(dev
, "Event logged [COMMAND_HARDWARE_ERROR address=0x%llx flags=0x%04x]\n",
766 case EVENT_TYPE_IOTLB_INV_TO
:
767 dev_err(dev
, "Event logged [IOTLB_INV_TIMEOUT device=%04x:%02x:%02x.%x address=0x%llx]\n",
768 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
771 case EVENT_TYPE_INV_DEV_REQ
:
772 dev_err(dev
, "Event logged [INVALID_DEVICE_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
773 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
774 pasid
, address
, flags
);
776 case EVENT_TYPE_RMP_FAULT
:
777 amd_iommu_report_rmp_fault(iommu
, event
);
779 case EVENT_TYPE_RMP_HW_ERR
:
780 amd_iommu_report_rmp_hw_error(iommu
, event
);
782 case EVENT_TYPE_INV_PPR_REQ
:
783 pasid
= PPR_PASID(*((u64
*)__evt
));
784 tag
= event
[1] & 0x03FF;
785 dev_err(dev
, "Event logged [INVALID_PPR_REQUEST device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
786 iommu
->pci_seg
->id
, PCI_BUS_NUM(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
787 pasid
, address
, flags
, tag
);
790 dev_err(dev
, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
791 event
[0], event
[1], event
[2], event
[3]);
795 * To detect the hardware errata 732 we need to clear the
796 * entry back to zero. This issue does not exist on SNP
797 * enabled system. Also this buffer is not writeable on
798 * SNP enabled system.
800 if (!amd_iommu_snp_en
)
801 memset(__evt
, 0, 4 * sizeof(u32
));
804 static void iommu_poll_events(struct amd_iommu
*iommu
)
808 head
= readl(iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
809 tail
= readl(iommu
->mmio_base
+ MMIO_EVT_TAIL_OFFSET
);
811 while (head
!= tail
) {
812 iommu_print_event(iommu
, iommu
->evt_buf
+ head
);
813 head
= (head
+ EVENT_ENTRY_SIZE
) % EVT_BUFFER_SIZE
;
816 writel(head
, iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
819 static void iommu_poll_ppr_log(struct amd_iommu
*iommu
)
823 if (iommu
->ppr_log
== NULL
)
826 head
= readl(iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
827 tail
= readl(iommu
->mmio_base
+ MMIO_PPR_TAIL_OFFSET
);
829 while (head
!= tail
) {
834 raw
= (u64
*)(iommu
->ppr_log
+ head
);
837 * Hardware bug: Interrupt may arrive before the entry is
838 * written to memory. If this happens we need to wait for the
841 for (i
= 0; i
< LOOP_TIMEOUT
; ++i
) {
842 if (PPR_REQ_TYPE(raw
[0]) != 0)
847 /* Avoid memcpy function-call overhead */
852 * To detect the hardware errata 733 we need to clear the
853 * entry back to zero. This issue does not exist on SNP
854 * enabled system. Also this buffer is not writeable on
855 * SNP enabled system.
857 if (!amd_iommu_snp_en
)
858 raw
[0] = raw
[1] = 0UL;
860 /* Update head pointer of hardware ring-buffer */
861 head
= (head
+ PPR_ENTRY_SIZE
) % PPR_LOG_SIZE
;
862 writel(head
, iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
864 /* TODO: PPR Handler will be added when we add IOPF support */
866 /* Refresh ring-buffer information */
867 head
= readl(iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
868 tail
= readl(iommu
->mmio_base
+ MMIO_PPR_TAIL_OFFSET
);
872 #ifdef CONFIG_IRQ_REMAP
873 static int (*iommu_ga_log_notifier
)(u32
);
875 int amd_iommu_register_ga_log_notifier(int (*notifier
)(u32
))
877 iommu_ga_log_notifier
= notifier
;
881 EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier
);
883 static void iommu_poll_ga_log(struct amd_iommu
*iommu
)
887 if (iommu
->ga_log
== NULL
)
890 head
= readl(iommu
->mmio_base
+ MMIO_GA_HEAD_OFFSET
);
891 tail
= readl(iommu
->mmio_base
+ MMIO_GA_TAIL_OFFSET
);
893 while (head
!= tail
) {
897 raw
= (u64
*)(iommu
->ga_log
+ head
);
899 /* Avoid memcpy function-call overhead */
902 /* Update head pointer of hardware ring-buffer */
903 head
= (head
+ GA_ENTRY_SIZE
) % GA_LOG_SIZE
;
904 writel(head
, iommu
->mmio_base
+ MMIO_GA_HEAD_OFFSET
);
906 /* Handle GA entry */
907 switch (GA_REQ_TYPE(log_entry
)) {
909 if (!iommu_ga_log_notifier
)
912 pr_debug("%s: devid=%#x, ga_tag=%#x\n",
913 __func__
, GA_DEVID(log_entry
),
916 if (iommu_ga_log_notifier(GA_TAG(log_entry
)) != 0)
917 pr_err("GA log notifier failed.\n");
926 amd_iommu_set_pci_msi_domain(struct device
*dev
, struct amd_iommu
*iommu
)
928 if (!irq_remapping_enabled
|| !dev_is_pci(dev
) ||
929 !pci_dev_has_default_msi_parent_domain(to_pci_dev(dev
)))
932 dev_set_msi_domain(dev
, iommu
->ir_domain
);
935 #else /* CONFIG_IRQ_REMAP */
937 amd_iommu_set_pci_msi_domain(struct device
*dev
, struct amd_iommu
*iommu
) { }
938 #endif /* !CONFIG_IRQ_REMAP */
940 static void amd_iommu_handle_irq(void *data
, const char *evt_type
,
941 u32 int_mask
, u32 overflow_mask
,
942 void (*int_handler
)(struct amd_iommu
*),
943 void (*overflow_handler
)(struct amd_iommu
*))
945 struct amd_iommu
*iommu
= (struct amd_iommu
*) data
;
946 u32 status
= readl(iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
947 u32 mask
= int_mask
| overflow_mask
;
949 while (status
& mask
) {
950 /* Enable interrupt sources again */
951 writel(mask
, iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
954 pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
955 iommu
->index
, evt_type
);
959 if ((status
& overflow_mask
) && overflow_handler
)
960 overflow_handler(iommu
);
963 * Hardware bug: ERBT1312
964 * When re-enabling interrupt (by writing 1
965 * to clear the bit), the hardware might also try to set
966 * the interrupt bit in the event status register.
967 * In this scenario, the bit will be set, and disable
968 * subsequent interrupts.
970 * Workaround: The IOMMU driver should read back the
971 * status register and check if the interrupt bits are cleared.
972 * If not, driver will need to go through the interrupt handler
973 * again and re-clear the bits
975 status
= readl(iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
979 irqreturn_t
amd_iommu_int_thread_evtlog(int irq
, void *data
)
981 amd_iommu_handle_irq(data
, "Evt", MMIO_STATUS_EVT_INT_MASK
,
982 MMIO_STATUS_EVT_OVERFLOW_MASK
,
983 iommu_poll_events
, amd_iommu_restart_event_logging
);
988 irqreturn_t
amd_iommu_int_thread_pprlog(int irq
, void *data
)
990 amd_iommu_handle_irq(data
, "PPR", MMIO_STATUS_PPR_INT_MASK
,
991 MMIO_STATUS_PPR_OVERFLOW_MASK
,
992 iommu_poll_ppr_log
, amd_iommu_restart_ppr_log
);
997 irqreturn_t
amd_iommu_int_thread_galog(int irq
, void *data
)
999 #ifdef CONFIG_IRQ_REMAP
1000 amd_iommu_handle_irq(data
, "GA", MMIO_STATUS_GALOG_INT_MASK
,
1001 MMIO_STATUS_GALOG_OVERFLOW_MASK
,
1002 iommu_poll_ga_log
, amd_iommu_restart_ga_log
);
1008 irqreturn_t
amd_iommu_int_thread(int irq
, void *data
)
1010 amd_iommu_int_thread_evtlog(irq
, data
);
1011 amd_iommu_int_thread_pprlog(irq
, data
);
1012 amd_iommu_int_thread_galog(irq
, data
);
1017 irqreturn_t
amd_iommu_int_handler(int irq
, void *data
)
1019 return IRQ_WAKE_THREAD
;
1022 /****************************************************************************
1024 * IOMMU command queuing functions
1026 ****************************************************************************/
1028 static int wait_on_sem(struct amd_iommu
*iommu
, u64 data
)
1032 while (*iommu
->cmd_sem
!= data
&& i
< LOOP_TIMEOUT
) {
1037 if (i
== LOOP_TIMEOUT
) {
1038 pr_alert("Completion-Wait loop timed out\n");
1045 static void copy_cmd_to_buffer(struct amd_iommu
*iommu
,
1046 struct iommu_cmd
*cmd
)
1051 /* Copy command to buffer */
1052 tail
= iommu
->cmd_buf_tail
;
1053 target
= iommu
->cmd_buf
+ tail
;
1054 memcpy(target
, cmd
, sizeof(*cmd
));
1056 tail
= (tail
+ sizeof(*cmd
)) % CMD_BUFFER_SIZE
;
1057 iommu
->cmd_buf_tail
= tail
;
1059 /* Tell the IOMMU about it */
1060 writel(tail
, iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
1063 static void build_completion_wait(struct iommu_cmd
*cmd
,
1064 struct amd_iommu
*iommu
,
1067 u64 paddr
= iommu_virt_to_phys((void *)iommu
->cmd_sem
);
1069 memset(cmd
, 0, sizeof(*cmd
));
1070 cmd
->data
[0] = lower_32_bits(paddr
) | CMD_COMPL_WAIT_STORE_MASK
;
1071 cmd
->data
[1] = upper_32_bits(paddr
);
1072 cmd
->data
[2] = lower_32_bits(data
);
1073 cmd
->data
[3] = upper_32_bits(data
);
1074 CMD_SET_TYPE(cmd
, CMD_COMPL_WAIT
);
1077 static void build_inv_dte(struct iommu_cmd
*cmd
, u16 devid
)
1079 memset(cmd
, 0, sizeof(*cmd
));
1080 cmd
->data
[0] = devid
;
1081 CMD_SET_TYPE(cmd
, CMD_INV_DEV_ENTRY
);
1085 * Builds an invalidation address which is suitable for one page or multiple
1086 * pages. Sets the size bit (S) as needed is more than one page is flushed.
1088 static inline u64
build_inv_address(u64 address
, size_t size
)
1090 u64 pages
, end
, msb_diff
;
1092 pages
= iommu_num_pages(address
, size
, PAGE_SIZE
);
1095 return address
& PAGE_MASK
;
1097 end
= address
+ size
- 1;
1100 * msb_diff would hold the index of the most significant bit that
1101 * flipped between the start and end.
1103 msb_diff
= fls64(end
^ address
) - 1;
1106 * Bits 63:52 are sign extended. If for some reason bit 51 is different
1107 * between the start and the end, invalidate everything.
1109 if (unlikely(msb_diff
> 51)) {
1110 address
= CMD_INV_IOMMU_ALL_PAGES_ADDRESS
;
1113 * The msb-bit must be clear on the address. Just set all the
1116 address
|= (1ull << msb_diff
) - 1;
1119 /* Clear bits 11:0 */
1120 address
&= PAGE_MASK
;
1122 /* Set the size bit - we flush more than one 4kb page */
1123 return address
| CMD_INV_IOMMU_PAGES_SIZE_MASK
;
1126 static void build_inv_iommu_pages(struct iommu_cmd
*cmd
, u64 address
,
1127 size_t size
, u16 domid
, int pde
)
1129 u64 inv_address
= build_inv_address(address
, size
);
1131 memset(cmd
, 0, sizeof(*cmd
));
1132 cmd
->data
[1] |= domid
;
1133 cmd
->data
[2] = lower_32_bits(inv_address
);
1134 cmd
->data
[3] = upper_32_bits(inv_address
);
1135 CMD_SET_TYPE(cmd
, CMD_INV_IOMMU_PAGES
);
1136 if (pde
) /* PDE bit - we want to flush everything, not only the PTEs */
1137 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK
;
1140 static void build_inv_iotlb_pages(struct iommu_cmd
*cmd
, u16 devid
, int qdep
,
1141 u64 address
, size_t size
)
1143 u64 inv_address
= build_inv_address(address
, size
);
1145 memset(cmd
, 0, sizeof(*cmd
));
1146 cmd
->data
[0] = devid
;
1147 cmd
->data
[0] |= (qdep
& 0xff) << 24;
1148 cmd
->data
[1] = devid
;
1149 cmd
->data
[2] = lower_32_bits(inv_address
);
1150 cmd
->data
[3] = upper_32_bits(inv_address
);
1151 CMD_SET_TYPE(cmd
, CMD_INV_IOTLB_PAGES
);
1154 static void build_inv_iommu_pasid(struct iommu_cmd
*cmd
, u16 domid
, u32 pasid
,
1155 u64 address
, bool size
)
1157 memset(cmd
, 0, sizeof(*cmd
));
1159 address
&= ~(0xfffULL
);
1161 cmd
->data
[0] = pasid
;
1162 cmd
->data
[1] = domid
;
1163 cmd
->data
[2] = lower_32_bits(address
);
1164 cmd
->data
[3] = upper_32_bits(address
);
1165 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK
;
1166 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_GN_MASK
;
1168 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
1169 CMD_SET_TYPE(cmd
, CMD_INV_IOMMU_PAGES
);
1172 static void build_inv_iotlb_pasid(struct iommu_cmd
*cmd
, u16 devid
, u32 pasid
,
1173 int qdep
, u64 address
, bool size
)
1175 memset(cmd
, 0, sizeof(*cmd
));
1177 address
&= ~(0xfffULL
);
1179 cmd
->data
[0] = devid
;
1180 cmd
->data
[0] |= ((pasid
>> 8) & 0xff) << 16;
1181 cmd
->data
[0] |= (qdep
& 0xff) << 24;
1182 cmd
->data
[1] = devid
;
1183 cmd
->data
[1] |= (pasid
& 0xff) << 16;
1184 cmd
->data
[2] = lower_32_bits(address
);
1185 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_GN_MASK
;
1186 cmd
->data
[3] = upper_32_bits(address
);
1188 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
1189 CMD_SET_TYPE(cmd
, CMD_INV_IOTLB_PAGES
);
1192 static void build_complete_ppr(struct iommu_cmd
*cmd
, u16 devid
, u32 pasid
,
1193 int status
, int tag
, u8 gn
)
1195 memset(cmd
, 0, sizeof(*cmd
));
1197 cmd
->data
[0] = devid
;
1199 cmd
->data
[1] = pasid
;
1200 cmd
->data
[2] = CMD_INV_IOMMU_PAGES_GN_MASK
;
1202 cmd
->data
[3] = tag
& 0x1ff;
1203 cmd
->data
[3] |= (status
& PPR_STATUS_MASK
) << PPR_STATUS_SHIFT
;
1205 CMD_SET_TYPE(cmd
, CMD_COMPLETE_PPR
);
1208 static void build_inv_all(struct iommu_cmd
*cmd
)
1210 memset(cmd
, 0, sizeof(*cmd
));
1211 CMD_SET_TYPE(cmd
, CMD_INV_ALL
);
1214 static void build_inv_irt(struct iommu_cmd
*cmd
, u16 devid
)
1216 memset(cmd
, 0, sizeof(*cmd
));
1217 cmd
->data
[0] = devid
;
1218 CMD_SET_TYPE(cmd
, CMD_INV_IRT
);
1222 * Writes the command to the IOMMUs command buffer and informs the
1223 * hardware about the new command.
1225 static int __iommu_queue_command_sync(struct amd_iommu
*iommu
,
1226 struct iommu_cmd
*cmd
,
1229 unsigned int count
= 0;
1230 u32 left
, next_tail
;
1232 next_tail
= (iommu
->cmd_buf_tail
+ sizeof(*cmd
)) % CMD_BUFFER_SIZE
;
1234 left
= (iommu
->cmd_buf_head
- next_tail
) % CMD_BUFFER_SIZE
;
1237 /* Skip udelay() the first time around */
1239 if (count
== LOOP_TIMEOUT
) {
1240 pr_err("Command buffer timeout\n");
1247 /* Update head and recheck remaining space */
1248 iommu
->cmd_buf_head
= readl(iommu
->mmio_base
+
1249 MMIO_CMD_HEAD_OFFSET
);
1254 copy_cmd_to_buffer(iommu
, cmd
);
1256 /* Do we need to make sure all commands are processed? */
1257 iommu
->need_sync
= sync
;
1262 static int iommu_queue_command_sync(struct amd_iommu
*iommu
,
1263 struct iommu_cmd
*cmd
,
1266 unsigned long flags
;
1269 raw_spin_lock_irqsave(&iommu
->lock
, flags
);
1270 ret
= __iommu_queue_command_sync(iommu
, cmd
, sync
);
1271 raw_spin_unlock_irqrestore(&iommu
->lock
, flags
);
1276 static int iommu_queue_command(struct amd_iommu
*iommu
, struct iommu_cmd
*cmd
)
1278 return iommu_queue_command_sync(iommu
, cmd
, true);
1282 * This function queues a completion wait command into the command
1283 * buffer of an IOMMU
1285 static int iommu_completion_wait(struct amd_iommu
*iommu
)
1287 struct iommu_cmd cmd
;
1288 unsigned long flags
;
1292 if (!iommu
->need_sync
)
1295 data
= atomic64_add_return(1, &iommu
->cmd_sem_val
);
1296 build_completion_wait(&cmd
, iommu
, data
);
1298 raw_spin_lock_irqsave(&iommu
->lock
, flags
);
1300 ret
= __iommu_queue_command_sync(iommu
, &cmd
, false);
1304 ret
= wait_on_sem(iommu
, data
);
1307 raw_spin_unlock_irqrestore(&iommu
->lock
, flags
);
1312 static int iommu_flush_dte(struct amd_iommu
*iommu
, u16 devid
)
1314 struct iommu_cmd cmd
;
1316 build_inv_dte(&cmd
, devid
);
1318 return iommu_queue_command(iommu
, &cmd
);
1321 static void amd_iommu_flush_dte_all(struct amd_iommu
*iommu
)
1324 u16 last_bdf
= iommu
->pci_seg
->last_bdf
;
1326 for (devid
= 0; devid
<= last_bdf
; ++devid
)
1327 iommu_flush_dte(iommu
, devid
);
1329 iommu_completion_wait(iommu
);
1333 * This function uses heavy locking and may disable irqs for some time. But
1334 * this is no issue because it is only called during resume.
1336 static void amd_iommu_flush_tlb_all(struct amd_iommu
*iommu
)
1339 u16 last_bdf
= iommu
->pci_seg
->last_bdf
;
1341 for (dom_id
= 0; dom_id
<= last_bdf
; ++dom_id
) {
1342 struct iommu_cmd cmd
;
1343 build_inv_iommu_pages(&cmd
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
,
1345 iommu_queue_command(iommu
, &cmd
);
1348 iommu_completion_wait(iommu
);
1351 static void amd_iommu_flush_tlb_domid(struct amd_iommu
*iommu
, u32 dom_id
)
1353 struct iommu_cmd cmd
;
1355 build_inv_iommu_pages(&cmd
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
,
1357 iommu_queue_command(iommu
, &cmd
);
1359 iommu_completion_wait(iommu
);
1362 static void amd_iommu_flush_all(struct amd_iommu
*iommu
)
1364 struct iommu_cmd cmd
;
1366 build_inv_all(&cmd
);
1368 iommu_queue_command(iommu
, &cmd
);
1369 iommu_completion_wait(iommu
);
1372 static void iommu_flush_irt(struct amd_iommu
*iommu
, u16 devid
)
1374 struct iommu_cmd cmd
;
1376 build_inv_irt(&cmd
, devid
);
1378 iommu_queue_command(iommu
, &cmd
);
1381 static void amd_iommu_flush_irt_all(struct amd_iommu
*iommu
)
1384 u16 last_bdf
= iommu
->pci_seg
->last_bdf
;
1386 if (iommu
->irtcachedis_enabled
)
1389 for (devid
= 0; devid
<= last_bdf
; devid
++)
1390 iommu_flush_irt(iommu
, devid
);
1392 iommu_completion_wait(iommu
);
1395 void iommu_flush_all_caches(struct amd_iommu
*iommu
)
1397 if (check_feature(FEATURE_IA
)) {
1398 amd_iommu_flush_all(iommu
);
1400 amd_iommu_flush_dte_all(iommu
);
1401 amd_iommu_flush_irt_all(iommu
);
1402 amd_iommu_flush_tlb_all(iommu
);
1407 * Command send function for flushing on-device TLB
1409 static int device_flush_iotlb(struct iommu_dev_data
*dev_data
,
1410 u64 address
, size_t size
)
1412 struct amd_iommu
*iommu
;
1413 struct iommu_cmd cmd
;
1416 qdep
= dev_data
->ats_qdep
;
1417 iommu
= rlookup_amd_iommu(dev_data
->dev
);
1421 build_inv_iotlb_pages(&cmd
, dev_data
->devid
, qdep
, address
, size
);
1423 return iommu_queue_command(iommu
, &cmd
);
1426 static int device_flush_dte_alias(struct pci_dev
*pdev
, u16 alias
, void *data
)
1428 struct amd_iommu
*iommu
= data
;
1430 return iommu_flush_dte(iommu
, alias
);
1434 * Command send function for invalidating a device table entry
1436 static int device_flush_dte(struct iommu_dev_data
*dev_data
)
1438 struct amd_iommu
*iommu
;
1439 struct pci_dev
*pdev
= NULL
;
1440 struct amd_iommu_pci_seg
*pci_seg
;
1444 iommu
= rlookup_amd_iommu(dev_data
->dev
);
1448 if (dev_is_pci(dev_data
->dev
))
1449 pdev
= to_pci_dev(dev_data
->dev
);
1452 ret
= pci_for_each_dma_alias(pdev
,
1453 device_flush_dte_alias
, iommu
);
1455 ret
= iommu_flush_dte(iommu
, dev_data
->devid
);
1459 pci_seg
= iommu
->pci_seg
;
1460 alias
= pci_seg
->alias_table
[dev_data
->devid
];
1461 if (alias
!= dev_data
->devid
) {
1462 ret
= iommu_flush_dte(iommu
, alias
);
1467 if (dev_data
->ats_enabled
)
1468 ret
= device_flush_iotlb(dev_data
, 0, ~0UL);
1474 * TLB invalidation function which is called from the mapping functions.
1475 * It invalidates a single PTE if the range to flush is within a single
1476 * page. Otherwise it flushes the whole TLB of the IOMMU.
1478 static void __domain_flush_pages(struct protection_domain
*domain
,
1479 u64 address
, size_t size
, int pde
)
1481 struct iommu_dev_data
*dev_data
;
1482 struct iommu_cmd cmd
;
1485 build_inv_iommu_pages(&cmd
, address
, size
, domain
->id
, pde
);
1487 for (i
= 0; i
< amd_iommu_get_num_iommus(); ++i
) {
1488 if (!domain
->dev_iommu
[i
])
1492 * Devices of this domain are behind this IOMMU
1493 * We need a TLB flush
1495 ret
|= iommu_queue_command(amd_iommus
[i
], &cmd
);
1498 list_for_each_entry(dev_data
, &domain
->dev_list
, list
) {
1500 if (!dev_data
->ats_enabled
)
1503 ret
|= device_flush_iotlb(dev_data
, address
, size
);
1509 static void domain_flush_pages(struct protection_domain
*domain
,
1510 u64 address
, size_t size
, int pde
)
1512 if (likely(!amd_iommu_np_cache
)) {
1513 __domain_flush_pages(domain
, address
, size
, pde
);
1518 * When NpCache is on, we infer that we run in a VM and use a vIOMMU.
1519 * In such setups it is best to avoid flushes of ranges which are not
1520 * naturally aligned, since it would lead to flushes of unmodified
1521 * PTEs. Such flushes would require the hypervisor to do more work than
1522 * necessary. Therefore, perform repeated flushes of aligned ranges
1523 * until you cover the range. Each iteration flushes the smaller
1524 * between the natural alignment of the address that we flush and the
1525 * greatest naturally aligned region that fits in the range.
1528 int addr_alignment
= __ffs(address
);
1529 int size_alignment
= __fls(size
);
1534 * size is always non-zero, but address might be zero, causing
1535 * addr_alignment to be negative. As the casting of the
1536 * argument in __ffs(address) to long might trim the high bits
1537 * of the address on x86-32, cast to long when doing the check.
1539 if (likely((unsigned long)address
!= 0))
1540 min_alignment
= min(addr_alignment
, size_alignment
);
1542 min_alignment
= size_alignment
;
1544 flush_size
= 1ul << min_alignment
;
1546 __domain_flush_pages(domain
, address
, flush_size
, pde
);
1547 address
+= flush_size
;
1552 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1553 void amd_iommu_domain_flush_tlb_pde(struct protection_domain
*domain
)
1555 domain_flush_pages(domain
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
, 1);
1558 void amd_iommu_domain_flush_complete(struct protection_domain
*domain
)
1562 for (i
= 0; i
< amd_iommu_get_num_iommus(); ++i
) {
1563 if (domain
&& !domain
->dev_iommu
[i
])
1567 * Devices of this domain are behind this IOMMU
1568 * We need to wait for completion of all commands.
1570 iommu_completion_wait(amd_iommus
[i
]);
1574 /* Flush the not present cache if it exists */
1575 static void domain_flush_np_cache(struct protection_domain
*domain
,
1576 dma_addr_t iova
, size_t size
)
1578 if (unlikely(amd_iommu_np_cache
)) {
1579 unsigned long flags
;
1581 spin_lock_irqsave(&domain
->lock
, flags
);
1582 domain_flush_pages(domain
, iova
, size
, 1);
1583 amd_iommu_domain_flush_complete(domain
);
1584 spin_unlock_irqrestore(&domain
->lock
, flags
);
1590 * This function flushes the DTEs for all devices in domain
1592 static void domain_flush_devices(struct protection_domain
*domain
)
1594 struct iommu_dev_data
*dev_data
;
1596 list_for_each_entry(dev_data
, &domain
->dev_list
, list
)
1597 device_flush_dte(dev_data
);
1600 /****************************************************************************
1602 * The next functions belong to the domain allocation. A domain is
1603 * allocated for every IOMMU as the default domain. If device isolation
1604 * is enabled, every device get its own domain. The most important thing
1605 * about domains is the page table mapping the DMA address space they
1608 ****************************************************************************/
1610 static u16
domain_id_alloc(void)
1614 spin_lock(&pd_bitmap_lock
);
1615 id
= find_first_zero_bit(amd_iommu_pd_alloc_bitmap
, MAX_DOMAIN_ID
);
1617 if (id
> 0 && id
< MAX_DOMAIN_ID
)
1618 __set_bit(id
, amd_iommu_pd_alloc_bitmap
);
1621 spin_unlock(&pd_bitmap_lock
);
1626 static void domain_id_free(int id
)
1628 spin_lock(&pd_bitmap_lock
);
1629 if (id
> 0 && id
< MAX_DOMAIN_ID
)
1630 __clear_bit(id
, amd_iommu_pd_alloc_bitmap
);
1631 spin_unlock(&pd_bitmap_lock
);
1634 static void free_gcr3_tbl_level1(u64
*tbl
)
1639 for (i
= 0; i
< 512; ++i
) {
1640 if (!(tbl
[i
] & GCR3_VALID
))
1643 ptr
= iommu_phys_to_virt(tbl
[i
] & PAGE_MASK
);
1645 free_page((unsigned long)ptr
);
1649 static void free_gcr3_tbl_level2(u64
*tbl
)
1654 for (i
= 0; i
< 512; ++i
) {
1655 if (!(tbl
[i
] & GCR3_VALID
))
1658 ptr
= iommu_phys_to_virt(tbl
[i
] & PAGE_MASK
);
1660 free_gcr3_tbl_level1(ptr
);
1664 static void free_gcr3_table(struct protection_domain
*domain
)
1666 if (domain
->glx
== 2)
1667 free_gcr3_tbl_level2(domain
->gcr3_tbl
);
1668 else if (domain
->glx
== 1)
1669 free_gcr3_tbl_level1(domain
->gcr3_tbl
);
1671 BUG_ON(domain
->glx
!= 0);
1673 free_page((unsigned long)domain
->gcr3_tbl
);
1677 * Number of GCR3 table levels required. Level must be 4-Kbyte
1678 * page and can contain up to 512 entries.
1680 static int get_gcr3_levels(int pasids
)
1685 return amd_iommu_max_glx_val
;
1687 levels
= get_count_order(pasids
);
1689 return levels
? (DIV_ROUND_UP(levels
, 9) - 1) : levels
;
1692 /* Note: This function expects iommu_domain->lock to be held prior calling the function. */
1693 static int setup_gcr3_table(struct protection_domain
*domain
, int pasids
)
1695 int levels
= get_gcr3_levels(pasids
);
1697 if (levels
> amd_iommu_max_glx_val
)
1700 domain
->gcr3_tbl
= alloc_pgtable_page(domain
->nid
, GFP_ATOMIC
);
1701 if (domain
->gcr3_tbl
== NULL
)
1704 domain
->glx
= levels
;
1705 domain
->flags
|= PD_IOMMUV2_MASK
;
1707 amd_iommu_domain_update(domain
);
1712 static void set_dte_entry(struct amd_iommu
*iommu
, u16 devid
,
1713 struct protection_domain
*domain
, bool ats
, bool ppr
)
1718 struct dev_table_entry
*dev_table
= get_dev_table(iommu
);
1720 if (domain
->iop
.mode
!= PAGE_MODE_NONE
)
1721 pte_root
= iommu_virt_to_phys(domain
->iop
.root
);
1723 pte_root
|= (domain
->iop
.mode
& DEV_ENTRY_MODE_MASK
)
1724 << DEV_ENTRY_MODE_SHIFT
;
1726 pte_root
|= DTE_FLAG_IR
| DTE_FLAG_IW
| DTE_FLAG_V
;
1729 * When SNP is enabled, Only set TV bit when IOMMU
1730 * page translation is in use.
1732 if (!amd_iommu_snp_en
|| (domain
->id
!= 0))
1733 pte_root
|= DTE_FLAG_TV
;
1735 flags
= dev_table
[devid
].data
[1];
1738 flags
|= DTE_FLAG_IOTLB
;
1741 pte_root
|= 1ULL << DEV_ENTRY_PPR
;
1743 if (domain
->dirty_tracking
)
1744 pte_root
|= DTE_FLAG_HAD
;
1746 if (domain
->flags
& PD_IOMMUV2_MASK
) {
1747 u64 gcr3
= iommu_virt_to_phys(domain
->gcr3_tbl
);
1748 u64 glx
= domain
->glx
;
1751 pte_root
|= DTE_FLAG_GV
;
1752 pte_root
|= (glx
& DTE_GLX_MASK
) << DTE_GLX_SHIFT
;
1754 /* First mask out possible old values for GCR3 table */
1755 tmp
= DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B
;
1758 tmp
= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C
;
1761 /* Encode GCR3 table into DTE */
1762 tmp
= DTE_GCR3_VAL_A(gcr3
) << DTE_GCR3_SHIFT_A
;
1765 tmp
= DTE_GCR3_VAL_B(gcr3
) << DTE_GCR3_SHIFT_B
;
1768 tmp
= DTE_GCR3_VAL_C(gcr3
) << DTE_GCR3_SHIFT_C
;
1771 if (amd_iommu_gpt_level
== PAGE_MODE_5_LEVEL
) {
1772 dev_table
[devid
].data
[2] |=
1773 ((u64
)GUEST_PGTABLE_5_LEVEL
<< DTE_GPT_LEVEL_SHIFT
);
1776 if (domain
->flags
& PD_GIOV_MASK
)
1777 pte_root
|= DTE_FLAG_GIOV
;
1780 flags
&= ~DEV_DOMID_MASK
;
1781 flags
|= domain
->id
;
1783 old_domid
= dev_table
[devid
].data
[1] & DEV_DOMID_MASK
;
1784 dev_table
[devid
].data
[1] = flags
;
1785 dev_table
[devid
].data
[0] = pte_root
;
1788 * A kdump kernel might be replacing a domain ID that was copied from
1789 * the previous kernel--if so, it needs to flush the translation cache
1790 * entries for the old domain ID that is being overwritten
1793 amd_iommu_flush_tlb_domid(iommu
, old_domid
);
1797 static void clear_dte_entry(struct amd_iommu
*iommu
, u16 devid
)
1799 struct dev_table_entry
*dev_table
= get_dev_table(iommu
);
1801 /* remove entry from the device table seen by the hardware */
1802 dev_table
[devid
].data
[0] = DTE_FLAG_V
;
1804 if (!amd_iommu_snp_en
)
1805 dev_table
[devid
].data
[0] |= DTE_FLAG_TV
;
1807 dev_table
[devid
].data
[1] &= DTE_FLAG_MASK
;
1809 amd_iommu_apply_erratum_63(iommu
, devid
);
1812 static void do_attach(struct iommu_dev_data
*dev_data
,
1813 struct protection_domain
*domain
)
1815 struct amd_iommu
*iommu
;
1818 iommu
= rlookup_amd_iommu(dev_data
->dev
);
1821 ats
= dev_data
->ats_enabled
;
1823 /* Update data structures */
1824 dev_data
->domain
= domain
;
1825 list_add(&dev_data
->list
, &domain
->dev_list
);
1827 /* Update NUMA Node ID */
1828 if (domain
->nid
== NUMA_NO_NODE
)
1829 domain
->nid
= dev_to_node(dev_data
->dev
);
1831 /* Do reference counting */
1832 domain
->dev_iommu
[iommu
->index
] += 1;
1833 domain
->dev_cnt
+= 1;
1835 /* Update device table */
1836 set_dte_entry(iommu
, dev_data
->devid
, domain
,
1837 ats
, dev_data
->ppr
);
1838 clone_aliases(iommu
, dev_data
->dev
);
1840 device_flush_dte(dev_data
);
1843 static void do_detach(struct iommu_dev_data
*dev_data
)
1845 struct protection_domain
*domain
= dev_data
->domain
;
1846 struct amd_iommu
*iommu
;
1848 iommu
= rlookup_amd_iommu(dev_data
->dev
);
1852 /* Update data structures */
1853 dev_data
->domain
= NULL
;
1854 list_del(&dev_data
->list
);
1855 clear_dte_entry(iommu
, dev_data
->devid
);
1856 clone_aliases(iommu
, dev_data
->dev
);
1858 /* Flush the DTE entry */
1859 device_flush_dte(dev_data
);
1862 amd_iommu_domain_flush_tlb_pde(domain
);
1864 /* Wait for the flushes to finish */
1865 amd_iommu_domain_flush_complete(domain
);
1867 /* decrease reference counters - needs to happen after the flushes */
1868 domain
->dev_iommu
[iommu
->index
] -= 1;
1869 domain
->dev_cnt
-= 1;
1873 * If a device is not yet associated with a domain, this function makes the
1874 * device visible in the domain
1876 static int attach_device(struct device
*dev
,
1877 struct protection_domain
*domain
)
1879 struct iommu_dev_data
*dev_data
;
1880 unsigned long flags
;
1883 spin_lock_irqsave(&domain
->lock
, flags
);
1885 dev_data
= dev_iommu_priv_get(dev
);
1887 spin_lock(&dev_data
->lock
);
1889 if (dev_data
->domain
!= NULL
) {
1894 if (dev_is_pci(dev
))
1895 pdev_enable_caps(to_pci_dev(dev
));
1897 do_attach(dev_data
, domain
);
1900 * We might boot into a crash-kernel here. The crashed kernel
1901 * left the caches in the IOMMU dirty. So we have to flush
1902 * here to evict all dirty stuff.
1904 amd_iommu_domain_flush_tlb_pde(domain
);
1906 amd_iommu_domain_flush_complete(domain
);
1909 spin_unlock(&dev_data
->lock
);
1911 spin_unlock_irqrestore(&domain
->lock
, flags
);
1917 * Removes a device from a protection domain (with devtable_lock held)
1919 static void detach_device(struct device
*dev
)
1921 struct protection_domain
*domain
;
1922 struct iommu_dev_data
*dev_data
;
1923 unsigned long flags
;
1925 dev_data
= dev_iommu_priv_get(dev
);
1926 domain
= dev_data
->domain
;
1928 spin_lock_irqsave(&domain
->lock
, flags
);
1930 spin_lock(&dev_data
->lock
);
1933 * First check if the device is still attached. It might already
1934 * be detached from its domain because the generic
1935 * iommu_detach_group code detached it and we try again here in
1936 * our alias handling.
1938 if (WARN_ON(!dev_data
->domain
))
1941 do_detach(dev_data
);
1943 if (dev_is_pci(dev
))
1944 pdev_disable_caps(to_pci_dev(dev
));
1947 spin_unlock(&dev_data
->lock
);
1949 spin_unlock_irqrestore(&domain
->lock
, flags
);
1952 static struct iommu_device
*amd_iommu_probe_device(struct device
*dev
)
1954 struct iommu_device
*iommu_dev
;
1955 struct amd_iommu
*iommu
;
1958 if (!check_device(dev
))
1959 return ERR_PTR(-ENODEV
);
1961 iommu
= rlookup_amd_iommu(dev
);
1963 return ERR_PTR(-ENODEV
);
1965 /* Not registered yet? */
1966 if (!iommu
->iommu
.ops
)
1967 return ERR_PTR(-ENODEV
);
1969 if (dev_iommu_priv_get(dev
))
1970 return &iommu
->iommu
;
1972 ret
= iommu_init_device(iommu
, dev
);
1974 if (ret
!= -ENOTSUPP
)
1975 dev_err(dev
, "Failed to initialize - trying to proceed anyway\n");
1976 iommu_dev
= ERR_PTR(ret
);
1977 iommu_ignore_device(iommu
, dev
);
1979 amd_iommu_set_pci_msi_domain(dev
, iommu
);
1980 iommu_dev
= &iommu
->iommu
;
1983 iommu_completion_wait(iommu
);
1988 static void amd_iommu_probe_finalize(struct device
*dev
)
1990 /* Domains are initialized for this device - have a look what we ended up with */
1991 set_dma_ops(dev
, NULL
);
1992 iommu_setup_dma_ops(dev
, 0, U64_MAX
);
1995 static void amd_iommu_release_device(struct device
*dev
)
1997 struct amd_iommu
*iommu
;
1999 if (!check_device(dev
))
2002 iommu
= rlookup_amd_iommu(dev
);
2006 amd_iommu_uninit_device(dev
);
2007 iommu_completion_wait(iommu
);
2010 static struct iommu_group
*amd_iommu_device_group(struct device
*dev
)
2012 if (dev_is_pci(dev
))
2013 return pci_device_group(dev
);
2015 return acpihid_device_group(dev
);
2018 /*****************************************************************************
2020 * The next functions belong to the dma_ops mapping/unmapping code.
2022 *****************************************************************************/
2024 static void update_device_table(struct protection_domain
*domain
)
2026 struct iommu_dev_data
*dev_data
;
2028 list_for_each_entry(dev_data
, &domain
->dev_list
, list
) {
2029 struct amd_iommu
*iommu
= rlookup_amd_iommu(dev_data
->dev
);
2033 set_dte_entry(iommu
, dev_data
->devid
, domain
,
2034 dev_data
->ats_enabled
, dev_data
->ppr
);
2035 clone_aliases(iommu
, dev_data
->dev
);
2039 void amd_iommu_update_and_flush_device_table(struct protection_domain
*domain
)
2041 update_device_table(domain
);
2042 domain_flush_devices(domain
);
2045 void amd_iommu_domain_update(struct protection_domain
*domain
)
2047 /* Update device table */
2048 amd_iommu_update_and_flush_device_table(domain
);
2050 /* Flush domain TLB(s) and wait for completion */
2051 amd_iommu_domain_flush_tlb_pde(domain
);
2052 amd_iommu_domain_flush_complete(domain
);
2055 /*****************************************************************************
2057 * The following functions belong to the exported interface of AMD IOMMU
2059 * This interface allows access to lower level functions of the IOMMU
2060 * like protection domain handling and assignement of devices to domains
2061 * which is not possible with the dma_ops interface.
2063 *****************************************************************************/
2065 static void cleanup_domain(struct protection_domain
*domain
)
2067 struct iommu_dev_data
*entry
;
2069 lockdep_assert_held(&domain
->lock
);
2071 if (!domain
->dev_cnt
)
2074 while (!list_empty(&domain
->dev_list
)) {
2075 entry
= list_first_entry(&domain
->dev_list
,
2076 struct iommu_dev_data
, list
);
2077 BUG_ON(!entry
->domain
);
2080 WARN_ON(domain
->dev_cnt
!= 0);
2083 static void protection_domain_free(struct protection_domain
*domain
)
2088 if (domain
->iop
.pgtbl_cfg
.tlb
)
2089 free_io_pgtable_ops(&domain
->iop
.iop
.ops
);
2091 if (domain
->flags
& PD_IOMMUV2_MASK
)
2092 free_gcr3_table(domain
);
2094 if (domain
->iop
.root
)
2095 free_page((unsigned long)domain
->iop
.root
);
2098 domain_id_free(domain
->id
);
2103 static int protection_domain_init_v1(struct protection_domain
*domain
, int mode
)
2105 u64
*pt_root
= NULL
;
2107 BUG_ON(mode
< PAGE_MODE_NONE
|| mode
> PAGE_MODE_6_LEVEL
);
2109 if (mode
!= PAGE_MODE_NONE
) {
2110 pt_root
= (void *)get_zeroed_page(GFP_KERNEL
);
2115 amd_iommu_domain_set_pgtable(domain
, pt_root
, mode
);
2120 static int protection_domain_init_v2(struct protection_domain
*domain
)
2122 domain
->flags
|= PD_GIOV_MASK
;
2124 domain
->domain
.pgsize_bitmap
= AMD_IOMMU_PGSIZES_V2
;
2126 if (setup_gcr3_table(domain
, 1))
2132 static struct protection_domain
*protection_domain_alloc(unsigned int type
)
2134 struct io_pgtable_ops
*pgtbl_ops
;
2135 struct protection_domain
*domain
;
2139 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
2143 domain
->id
= domain_id_alloc();
2147 spin_lock_init(&domain
->lock
);
2148 INIT_LIST_HEAD(&domain
->dev_list
);
2149 domain
->nid
= NUMA_NO_NODE
;
2152 /* No need to allocate io pgtable ops in passthrough mode */
2153 case IOMMU_DOMAIN_IDENTITY
:
2155 case IOMMU_DOMAIN_DMA
:
2156 pgtable
= amd_iommu_pgtable
;
2159 * Force IOMMU v1 page table when allocating
2160 * domain for pass-through devices.
2162 case IOMMU_DOMAIN_UNMANAGED
:
2163 pgtable
= AMD_IOMMU_V1
;
2171 ret
= protection_domain_init_v1(domain
, DEFAULT_PGTABLE_LEVEL
);
2174 ret
= protection_domain_init_v2(domain
);
2184 pgtbl_ops
= alloc_io_pgtable_ops(pgtable
, &domain
->iop
.pgtbl_cfg
, domain
);
2190 protection_domain_free(domain
);
2194 static inline u64
dma_max_address(void)
2196 if (amd_iommu_pgtable
== AMD_IOMMU_V1
)
2199 /* V2 with 4/5 level page table */
2200 return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level
)) - 1);
2203 static bool amd_iommu_hd_support(struct amd_iommu
*iommu
)
2205 return iommu
&& (iommu
->features
& FEATURE_HDSUP
);
2208 static struct iommu_domain
*do_iommu_domain_alloc(unsigned int type
,
2209 struct device
*dev
, u32 flags
)
2211 bool dirty_tracking
= flags
& IOMMU_HWPT_ALLOC_DIRTY_TRACKING
;
2212 struct protection_domain
*domain
;
2213 struct amd_iommu
*iommu
= NULL
;
2216 iommu
= rlookup_amd_iommu(dev
);
2218 return ERR_PTR(-ENODEV
);
2222 * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
2223 * default to use IOMMU_DOMAIN_DMA[_FQ].
2225 if (amd_iommu_snp_en
&& (type
== IOMMU_DOMAIN_IDENTITY
))
2226 return ERR_PTR(-EINVAL
);
2228 if (dirty_tracking
&& !amd_iommu_hd_support(iommu
))
2229 return ERR_PTR(-EOPNOTSUPP
);
2231 domain
= protection_domain_alloc(type
);
2233 return ERR_PTR(-ENOMEM
);
2235 domain
->domain
.geometry
.aperture_start
= 0;
2236 domain
->domain
.geometry
.aperture_end
= dma_max_address();
2237 domain
->domain
.geometry
.force_aperture
= true;
2240 domain
->domain
.type
= type
;
2241 domain
->domain
.pgsize_bitmap
= iommu
->iommu
.ops
->pgsize_bitmap
;
2242 domain
->domain
.ops
= iommu
->iommu
.ops
->default_domain_ops
;
2245 domain
->domain
.dirty_ops
= &amd_dirty_ops
;
2248 return &domain
->domain
;
2251 static struct iommu_domain
*amd_iommu_domain_alloc(unsigned int type
)
2253 struct iommu_domain
*domain
;
2255 domain
= do_iommu_domain_alloc(type
, NULL
, 0);
2262 static struct iommu_domain
*
2263 amd_iommu_domain_alloc_user(struct device
*dev
, u32 flags
,
2264 struct iommu_domain
*parent
,
2265 const struct iommu_user_data
*user_data
)
2268 unsigned int type
= IOMMU_DOMAIN_UNMANAGED
;
2270 if ((flags
& ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING
) || parent
|| user_data
)
2271 return ERR_PTR(-EOPNOTSUPP
);
2273 return do_iommu_domain_alloc(type
, dev
, flags
);
2276 static void amd_iommu_domain_free(struct iommu_domain
*dom
)
2278 struct protection_domain
*domain
;
2279 unsigned long flags
;
2284 domain
= to_pdomain(dom
);
2286 spin_lock_irqsave(&domain
->lock
, flags
);
2288 cleanup_domain(domain
);
2290 spin_unlock_irqrestore(&domain
->lock
, flags
);
2292 protection_domain_free(domain
);
2295 static int amd_iommu_attach_device(struct iommu_domain
*dom
,
2298 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(dev
);
2299 struct protection_domain
*domain
= to_pdomain(dom
);
2300 struct amd_iommu
*iommu
= rlookup_amd_iommu(dev
);
2304 * Skip attach device to domain if new domain is same as
2305 * devices current domain
2307 if (dev_data
->domain
== domain
)
2310 dev_data
->defer_attach
= false;
2313 * Restrict to devices with compatible IOMMU hardware support
2314 * when enforcement of dirty tracking is enabled.
2316 if (dom
->dirty_ops
&& !amd_iommu_hd_support(iommu
))
2319 if (dev_data
->domain
)
2322 ret
= attach_device(dev
, domain
);
2324 #ifdef CONFIG_IRQ_REMAP
2325 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
)) {
2326 if (dom
->type
== IOMMU_DOMAIN_UNMANAGED
)
2327 dev_data
->use_vapic
= 1;
2329 dev_data
->use_vapic
= 0;
2333 iommu_completion_wait(iommu
);
2338 static int amd_iommu_iotlb_sync_map(struct iommu_domain
*dom
,
2339 unsigned long iova
, size_t size
)
2341 struct protection_domain
*domain
= to_pdomain(dom
);
2342 struct io_pgtable_ops
*ops
= &domain
->iop
.iop
.ops
;
2345 domain_flush_np_cache(domain
, iova
, size
);
2349 static int amd_iommu_map_pages(struct iommu_domain
*dom
, unsigned long iova
,
2350 phys_addr_t paddr
, size_t pgsize
, size_t pgcount
,
2351 int iommu_prot
, gfp_t gfp
, size_t *mapped
)
2353 struct protection_domain
*domain
= to_pdomain(dom
);
2354 struct io_pgtable_ops
*ops
= &domain
->iop
.iop
.ops
;
2358 if ((amd_iommu_pgtable
== AMD_IOMMU_V1
) &&
2359 (domain
->iop
.mode
== PAGE_MODE_NONE
))
2362 if (iommu_prot
& IOMMU_READ
)
2363 prot
|= IOMMU_PROT_IR
;
2364 if (iommu_prot
& IOMMU_WRITE
)
2365 prot
|= IOMMU_PROT_IW
;
2367 if (ops
->map_pages
) {
2368 ret
= ops
->map_pages(ops
, iova
, paddr
, pgsize
,
2369 pgcount
, prot
, gfp
, mapped
);
2375 static void amd_iommu_iotlb_gather_add_page(struct iommu_domain
*domain
,
2376 struct iommu_iotlb_gather
*gather
,
2377 unsigned long iova
, size_t size
)
2380 * AMD's IOMMU can flush as many pages as necessary in a single flush.
2381 * Unless we run in a virtual machine, which can be inferred according
2382 * to whether "non-present cache" is on, it is probably best to prefer
2383 * (potentially) too extensive TLB flushing (i.e., more misses) over
2384 * mutliple TLB flushes (i.e., more flushes). For virtual machines the
2385 * hypervisor needs to synchronize the host IOMMU PTEs with those of
2386 * the guest, and the trade-off is different: unnecessary TLB flushes
2387 * should be avoided.
2389 if (amd_iommu_np_cache
&&
2390 iommu_iotlb_gather_is_disjoint(gather
, iova
, size
))
2391 iommu_iotlb_sync(domain
, gather
);
2393 iommu_iotlb_gather_add_range(gather
, iova
, size
);
2396 static size_t amd_iommu_unmap_pages(struct iommu_domain
*dom
, unsigned long iova
,
2397 size_t pgsize
, size_t pgcount
,
2398 struct iommu_iotlb_gather
*gather
)
2400 struct protection_domain
*domain
= to_pdomain(dom
);
2401 struct io_pgtable_ops
*ops
= &domain
->iop
.iop
.ops
;
2404 if ((amd_iommu_pgtable
== AMD_IOMMU_V1
) &&
2405 (domain
->iop
.mode
== PAGE_MODE_NONE
))
2408 r
= (ops
->unmap_pages
) ? ops
->unmap_pages(ops
, iova
, pgsize
, pgcount
, NULL
) : 0;
2411 amd_iommu_iotlb_gather_add_page(dom
, gather
, iova
, r
);
2416 static phys_addr_t
amd_iommu_iova_to_phys(struct iommu_domain
*dom
,
2419 struct protection_domain
*domain
= to_pdomain(dom
);
2420 struct io_pgtable_ops
*ops
= &domain
->iop
.iop
.ops
;
2422 return ops
->iova_to_phys(ops
, iova
);
2425 static bool amd_iommu_capable(struct device
*dev
, enum iommu_cap cap
)
2428 case IOMMU_CAP_CACHE_COHERENCY
:
2430 case IOMMU_CAP_NOEXEC
:
2432 case IOMMU_CAP_PRE_BOOT_PROTECTION
:
2433 return amdr_ivrs_remap_support
;
2434 case IOMMU_CAP_ENFORCE_CACHE_COHERENCY
:
2436 case IOMMU_CAP_DEFERRED_FLUSH
:
2438 case IOMMU_CAP_DIRTY_TRACKING
: {
2439 struct amd_iommu
*iommu
= rlookup_amd_iommu(dev
);
2441 return amd_iommu_hd_support(iommu
);
2450 static int amd_iommu_set_dirty_tracking(struct iommu_domain
*domain
,
2453 struct protection_domain
*pdomain
= to_pdomain(domain
);
2454 struct dev_table_entry
*dev_table
;
2455 struct iommu_dev_data
*dev_data
;
2456 bool domain_flush
= false;
2457 struct amd_iommu
*iommu
;
2458 unsigned long flags
;
2461 spin_lock_irqsave(&pdomain
->lock
, flags
);
2462 if (!(pdomain
->dirty_tracking
^ enable
)) {
2463 spin_unlock_irqrestore(&pdomain
->lock
, flags
);
2467 list_for_each_entry(dev_data
, &pdomain
->dev_list
, list
) {
2468 iommu
= rlookup_amd_iommu(dev_data
->dev
);
2472 dev_table
= get_dev_table(iommu
);
2473 pte_root
= dev_table
[dev_data
->devid
].data
[0];
2475 pte_root
= (enable
? pte_root
| DTE_FLAG_HAD
:
2476 pte_root
& ~DTE_FLAG_HAD
);
2478 /* Flush device DTE */
2479 dev_table
[dev_data
->devid
].data
[0] = pte_root
;
2480 device_flush_dte(dev_data
);
2481 domain_flush
= true;
2484 /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
2486 amd_iommu_domain_flush_tlb_pde(pdomain
);
2487 amd_iommu_domain_flush_complete(pdomain
);
2489 pdomain
->dirty_tracking
= enable
;
2490 spin_unlock_irqrestore(&pdomain
->lock
, flags
);
2495 static int amd_iommu_read_and_clear_dirty(struct iommu_domain
*domain
,
2496 unsigned long iova
, size_t size
,
2497 unsigned long flags
,
2498 struct iommu_dirty_bitmap
*dirty
)
2500 struct protection_domain
*pdomain
= to_pdomain(domain
);
2501 struct io_pgtable_ops
*ops
= &pdomain
->iop
.iop
.ops
;
2502 unsigned long lflags
;
2504 if (!ops
|| !ops
->read_and_clear_dirty
)
2507 spin_lock_irqsave(&pdomain
->lock
, lflags
);
2508 if (!pdomain
->dirty_tracking
&& dirty
->bitmap
) {
2509 spin_unlock_irqrestore(&pdomain
->lock
, lflags
);
2512 spin_unlock_irqrestore(&pdomain
->lock
, lflags
);
2514 return ops
->read_and_clear_dirty(ops
, iova
, size
, flags
, dirty
);
2517 static void amd_iommu_get_resv_regions(struct device
*dev
,
2518 struct list_head
*head
)
2520 struct iommu_resv_region
*region
;
2521 struct unity_map_entry
*entry
;
2522 struct amd_iommu
*iommu
;
2523 struct amd_iommu_pci_seg
*pci_seg
;
2526 sbdf
= get_device_sbdf_id(dev
);
2530 devid
= PCI_SBDF_TO_DEVID(sbdf
);
2531 iommu
= rlookup_amd_iommu(dev
);
2534 pci_seg
= iommu
->pci_seg
;
2536 list_for_each_entry(entry
, &pci_seg
->unity_map
, list
) {
2540 if (devid
< entry
->devid_start
|| devid
> entry
->devid_end
)
2543 type
= IOMMU_RESV_DIRECT
;
2544 length
= entry
->address_end
- entry
->address_start
;
2545 if (entry
->prot
& IOMMU_PROT_IR
)
2547 if (entry
->prot
& IOMMU_PROT_IW
)
2548 prot
|= IOMMU_WRITE
;
2549 if (entry
->prot
& IOMMU_UNITY_MAP_FLAG_EXCL_RANGE
)
2550 /* Exclusion range */
2551 type
= IOMMU_RESV_RESERVED
;
2553 region
= iommu_alloc_resv_region(entry
->address_start
,
2557 dev_err(dev
, "Out of memory allocating dm-regions\n");
2560 list_add_tail(®ion
->list
, head
);
2563 region
= iommu_alloc_resv_region(MSI_RANGE_START
,
2564 MSI_RANGE_END
- MSI_RANGE_START
+ 1,
2565 0, IOMMU_RESV_MSI
, GFP_KERNEL
);
2568 list_add_tail(®ion
->list
, head
);
2570 region
= iommu_alloc_resv_region(HT_RANGE_START
,
2571 HT_RANGE_END
- HT_RANGE_START
+ 1,
2572 0, IOMMU_RESV_RESERVED
, GFP_KERNEL
);
2575 list_add_tail(®ion
->list
, head
);
2578 bool amd_iommu_is_attach_deferred(struct device
*dev
)
2580 struct iommu_dev_data
*dev_data
= dev_iommu_priv_get(dev
);
2582 return dev_data
->defer_attach
;
2585 static void amd_iommu_flush_iotlb_all(struct iommu_domain
*domain
)
2587 struct protection_domain
*dom
= to_pdomain(domain
);
2588 unsigned long flags
;
2590 spin_lock_irqsave(&dom
->lock
, flags
);
2591 amd_iommu_domain_flush_tlb_pde(dom
);
2592 amd_iommu_domain_flush_complete(dom
);
2593 spin_unlock_irqrestore(&dom
->lock
, flags
);
2596 static void amd_iommu_iotlb_sync(struct iommu_domain
*domain
,
2597 struct iommu_iotlb_gather
*gather
)
2599 struct protection_domain
*dom
= to_pdomain(domain
);
2600 unsigned long flags
;
2602 spin_lock_irqsave(&dom
->lock
, flags
);
2603 domain_flush_pages(dom
, gather
->start
, gather
->end
- gather
->start
+ 1, 1);
2604 amd_iommu_domain_flush_complete(dom
);
2605 spin_unlock_irqrestore(&dom
->lock
, flags
);
2608 static int amd_iommu_def_domain_type(struct device
*dev
)
2610 struct iommu_dev_data
*dev_data
;
2612 dev_data
= dev_iommu_priv_get(dev
);
2617 * Do not identity map IOMMUv2 capable devices when:
2618 * - memory encryption is active, because some of those devices
2619 * (AMD GPUs) don't have the encryption bit in their DMA-mask
2620 * and require remapping.
2621 * - SNP is enabled, because it prohibits DTE[Mode]=0.
2623 if (pdev_pasid_supported(dev_data
) &&
2624 !cc_platform_has(CC_ATTR_MEM_ENCRYPT
) &&
2625 !amd_iommu_snp_en
) {
2626 return IOMMU_DOMAIN_IDENTITY
;
2632 static bool amd_iommu_enforce_cache_coherency(struct iommu_domain
*domain
)
2634 /* IOMMU_PTE_FC is always set */
2638 const struct iommu_dirty_ops amd_dirty_ops
= {
2639 .set_dirty_tracking
= amd_iommu_set_dirty_tracking
,
2640 .read_and_clear_dirty
= amd_iommu_read_and_clear_dirty
,
2643 const struct iommu_ops amd_iommu_ops
= {
2644 .capable
= amd_iommu_capable
,
2645 .domain_alloc
= amd_iommu_domain_alloc
,
2646 .domain_alloc_user
= amd_iommu_domain_alloc_user
,
2647 .probe_device
= amd_iommu_probe_device
,
2648 .release_device
= amd_iommu_release_device
,
2649 .probe_finalize
= amd_iommu_probe_finalize
,
2650 .device_group
= amd_iommu_device_group
,
2651 .get_resv_regions
= amd_iommu_get_resv_regions
,
2652 .is_attach_deferred
= amd_iommu_is_attach_deferred
,
2653 .pgsize_bitmap
= AMD_IOMMU_PGSIZES
,
2654 .def_domain_type
= amd_iommu_def_domain_type
,
2655 .default_domain_ops
= &(const struct iommu_domain_ops
) {
2656 .attach_dev
= amd_iommu_attach_device
,
2657 .map_pages
= amd_iommu_map_pages
,
2658 .unmap_pages
= amd_iommu_unmap_pages
,
2659 .iotlb_sync_map
= amd_iommu_iotlb_sync_map
,
2660 .iova_to_phys
= amd_iommu_iova_to_phys
,
2661 .flush_iotlb_all
= amd_iommu_flush_iotlb_all
,
2662 .iotlb_sync
= amd_iommu_iotlb_sync
,
2663 .free
= amd_iommu_domain_free
,
2664 .enforce_cache_coherency
= amd_iommu_enforce_cache_coherency
,
2668 static int __flush_pasid(struct protection_domain
*domain
, u32 pasid
,
2669 u64 address
, bool size
)
2671 struct iommu_dev_data
*dev_data
;
2672 struct iommu_cmd cmd
;
2675 if (!(domain
->flags
& PD_IOMMUV2_MASK
))
2678 build_inv_iommu_pasid(&cmd
, domain
->id
, pasid
, address
, size
);
2681 * IOMMU TLB needs to be flushed before Device TLB to
2682 * prevent device TLB refill from IOMMU TLB
2684 for (i
= 0; i
< amd_iommu_get_num_iommus(); ++i
) {
2685 if (domain
->dev_iommu
[i
] == 0)
2688 ret
= iommu_queue_command(amd_iommus
[i
], &cmd
);
2693 /* Wait until IOMMU TLB flushes are complete */
2694 amd_iommu_domain_flush_complete(domain
);
2696 /* Now flush device TLBs */
2697 list_for_each_entry(dev_data
, &domain
->dev_list
, list
) {
2698 struct amd_iommu
*iommu
;
2702 There might be non-IOMMUv2 capable devices in an IOMMUv2
2705 if (!dev_data
->ats_enabled
)
2708 qdep
= dev_data
->ats_qdep
;
2709 iommu
= rlookup_amd_iommu(dev_data
->dev
);
2712 build_inv_iotlb_pasid(&cmd
, dev_data
->devid
, pasid
,
2713 qdep
, address
, size
);
2715 ret
= iommu_queue_command(iommu
, &cmd
);
2720 /* Wait until all device TLBs are flushed */
2721 amd_iommu_domain_flush_complete(domain
);
2730 static int __amd_iommu_flush_page(struct protection_domain
*domain
, u32 pasid
,
2733 return __flush_pasid(domain
, pasid
, address
, false);
2736 int amd_iommu_flush_page(struct iommu_domain
*dom
, u32 pasid
,
2739 struct protection_domain
*domain
= to_pdomain(dom
);
2740 unsigned long flags
;
2743 spin_lock_irqsave(&domain
->lock
, flags
);
2744 ret
= __amd_iommu_flush_page(domain
, pasid
, address
);
2745 spin_unlock_irqrestore(&domain
->lock
, flags
);
2750 static int __amd_iommu_flush_tlb(struct protection_domain
*domain
, u32 pasid
)
2752 return __flush_pasid(domain
, pasid
, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
,
2756 int amd_iommu_flush_tlb(struct iommu_domain
*dom
, u32 pasid
)
2758 struct protection_domain
*domain
= to_pdomain(dom
);
2759 unsigned long flags
;
2762 spin_lock_irqsave(&domain
->lock
, flags
);
2763 ret
= __amd_iommu_flush_tlb(domain
, pasid
);
2764 spin_unlock_irqrestore(&domain
->lock
, flags
);
2769 static u64
*__get_gcr3_pte(u64
*root
, int level
, u32 pasid
, bool alloc
)
2776 index
= (pasid
>> (9 * level
)) & 0x1ff;
2782 if (!(*pte
& GCR3_VALID
)) {
2786 root
= (void *)get_zeroed_page(GFP_ATOMIC
);
2790 *pte
= iommu_virt_to_phys(root
) | GCR3_VALID
;
2793 root
= iommu_phys_to_virt(*pte
& PAGE_MASK
);
2801 static int __set_gcr3(struct protection_domain
*domain
, u32 pasid
,
2806 if (domain
->iop
.mode
!= PAGE_MODE_NONE
)
2809 pte
= __get_gcr3_pte(domain
->gcr3_tbl
, domain
->glx
, pasid
, true);
2813 *pte
= (cr3
& PAGE_MASK
) | GCR3_VALID
;
2815 return __amd_iommu_flush_tlb(domain
, pasid
);
2818 static int __clear_gcr3(struct protection_domain
*domain
, u32 pasid
)
2822 if (domain
->iop
.mode
!= PAGE_MODE_NONE
)
2825 pte
= __get_gcr3_pte(domain
->gcr3_tbl
, domain
->glx
, pasid
, false);
2831 return __amd_iommu_flush_tlb(domain
, pasid
);
2834 int amd_iommu_domain_set_gcr3(struct iommu_domain
*dom
, u32 pasid
,
2837 struct protection_domain
*domain
= to_pdomain(dom
);
2838 unsigned long flags
;
2841 spin_lock_irqsave(&domain
->lock
, flags
);
2842 ret
= __set_gcr3(domain
, pasid
, cr3
);
2843 spin_unlock_irqrestore(&domain
->lock
, flags
);
2848 int amd_iommu_domain_clear_gcr3(struct iommu_domain
*dom
, u32 pasid
)
2850 struct protection_domain
*domain
= to_pdomain(dom
);
2851 unsigned long flags
;
2854 spin_lock_irqsave(&domain
->lock
, flags
);
2855 ret
= __clear_gcr3(domain
, pasid
);
2856 spin_unlock_irqrestore(&domain
->lock
, flags
);
2861 int amd_iommu_complete_ppr(struct pci_dev
*pdev
, u32 pasid
,
2862 int status
, int tag
)
2864 struct iommu_dev_data
*dev_data
;
2865 struct amd_iommu
*iommu
;
2866 struct iommu_cmd cmd
;
2868 dev_data
= dev_iommu_priv_get(&pdev
->dev
);
2869 iommu
= rlookup_amd_iommu(&pdev
->dev
);
2873 build_complete_ppr(&cmd
, dev_data
->devid
, pasid
, status
,
2874 tag
, dev_data
->pri_tlp
);
2876 return iommu_queue_command(iommu
, &cmd
);
2879 #ifdef CONFIG_IRQ_REMAP
2881 /*****************************************************************************
2883 * Interrupt Remapping Implementation
2885 *****************************************************************************/
2887 static struct irq_chip amd_ir_chip
;
2888 static DEFINE_SPINLOCK(iommu_table_lock
);
2890 static void iommu_flush_irt_and_complete(struct amd_iommu
*iommu
, u16 devid
)
2894 unsigned long flags
;
2895 struct iommu_cmd cmd
, cmd2
;
2897 if (iommu
->irtcachedis_enabled
)
2900 build_inv_irt(&cmd
, devid
);
2901 data
= atomic64_add_return(1, &iommu
->cmd_sem_val
);
2902 build_completion_wait(&cmd2
, iommu
, data
);
2904 raw_spin_lock_irqsave(&iommu
->lock
, flags
);
2905 ret
= __iommu_queue_command_sync(iommu
, &cmd
, true);
2908 ret
= __iommu_queue_command_sync(iommu
, &cmd2
, false);
2911 wait_on_sem(iommu
, data
);
2913 raw_spin_unlock_irqrestore(&iommu
->lock
, flags
);
2916 static void set_dte_irq_entry(struct amd_iommu
*iommu
, u16 devid
,
2917 struct irq_remap_table
*table
)
2920 struct dev_table_entry
*dev_table
= get_dev_table(iommu
);
2922 dte
= dev_table
[devid
].data
[2];
2923 dte
&= ~DTE_IRQ_PHYS_ADDR_MASK
;
2924 dte
|= iommu_virt_to_phys(table
->table
);
2925 dte
|= DTE_IRQ_REMAP_INTCTL
;
2926 dte
|= DTE_INTTABLEN
;
2927 dte
|= DTE_IRQ_REMAP_ENABLE
;
2929 dev_table
[devid
].data
[2] = dte
;
2932 static struct irq_remap_table
*get_irq_table(struct amd_iommu
*iommu
, u16 devid
)
2934 struct irq_remap_table
*table
;
2935 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
2937 if (WARN_ONCE(!pci_seg
->rlookup_table
[devid
],
2938 "%s: no iommu for devid %x:%x\n",
2939 __func__
, pci_seg
->id
, devid
))
2942 table
= pci_seg
->irq_lookup_table
[devid
];
2943 if (WARN_ONCE(!table
, "%s: no table for devid %x:%x\n",
2944 __func__
, pci_seg
->id
, devid
))
2950 static struct irq_remap_table
*__alloc_irq_table(void)
2952 struct irq_remap_table
*table
;
2954 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2958 table
->table
= kmem_cache_alloc(amd_iommu_irq_cache
, GFP_KERNEL
);
2959 if (!table
->table
) {
2963 raw_spin_lock_init(&table
->lock
);
2965 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir
))
2966 memset(table
->table
, 0,
2967 MAX_IRQS_PER_TABLE
* sizeof(u32
));
2969 memset(table
->table
, 0,
2970 (MAX_IRQS_PER_TABLE
* (sizeof(u64
) * 2)));
2974 static void set_remap_table_entry(struct amd_iommu
*iommu
, u16 devid
,
2975 struct irq_remap_table
*table
)
2977 struct amd_iommu_pci_seg
*pci_seg
= iommu
->pci_seg
;
2979 pci_seg
->irq_lookup_table
[devid
] = table
;
2980 set_dte_irq_entry(iommu
, devid
, table
);
2981 iommu_flush_dte(iommu
, devid
);
2984 static int set_remap_table_entry_alias(struct pci_dev
*pdev
, u16 alias
,
2987 struct irq_remap_table
*table
= data
;
2988 struct amd_iommu_pci_seg
*pci_seg
;
2989 struct amd_iommu
*iommu
= rlookup_amd_iommu(&pdev
->dev
);
2994 pci_seg
= iommu
->pci_seg
;
2995 pci_seg
->irq_lookup_table
[alias
] = table
;
2996 set_dte_irq_entry(iommu
, alias
, table
);
2997 iommu_flush_dte(pci_seg
->rlookup_table
[alias
], alias
);
3002 static struct irq_remap_table
*alloc_irq_table(struct amd_iommu
*iommu
,
3003 u16 devid
, struct pci_dev
*pdev
)
3005 struct irq_remap_table
*table
= NULL
;
3006 struct irq_remap_table
*new_table
= NULL
;
3007 struct amd_iommu_pci_seg
*pci_seg
;
3008 unsigned long flags
;
3011 spin_lock_irqsave(&iommu_table_lock
, flags
);
3013 pci_seg
= iommu
->pci_seg
;
3014 table
= pci_seg
->irq_lookup_table
[devid
];
3018 alias
= pci_seg
->alias_table
[devid
];
3019 table
= pci_seg
->irq_lookup_table
[alias
];
3021 set_remap_table_entry(iommu
, devid
, table
);
3024 spin_unlock_irqrestore(&iommu_table_lock
, flags
);
3026 /* Nothing there yet, allocate new irq remapping table */
3027 new_table
= __alloc_irq_table();
3031 spin_lock_irqsave(&iommu_table_lock
, flags
);
3033 table
= pci_seg
->irq_lookup_table
[devid
];
3037 table
= pci_seg
->irq_lookup_table
[alias
];
3039 set_remap_table_entry(iommu
, devid
, table
);
3047 pci_for_each_dma_alias(pdev
, set_remap_table_entry_alias
,
3050 set_remap_table_entry(iommu
, devid
, table
);
3053 set_remap_table_entry(iommu
, alias
, table
);
3056 iommu_completion_wait(iommu
);
3059 spin_unlock_irqrestore(&iommu_table_lock
, flags
);
3062 kmem_cache_free(amd_iommu_irq_cache
, new_table
->table
);
3068 static int alloc_irq_index(struct amd_iommu
*iommu
, u16 devid
, int count
,
3069 bool align
, struct pci_dev
*pdev
)
3071 struct irq_remap_table
*table
;
3072 int index
, c
, alignment
= 1;
3073 unsigned long flags
;
3075 table
= alloc_irq_table(iommu
, devid
, pdev
);
3080 alignment
= roundup_pow_of_two(count
);
3082 raw_spin_lock_irqsave(&table
->lock
, flags
);
3084 /* Scan table for free entries */
3085 for (index
= ALIGN(table
->min_index
, alignment
), c
= 0;
3086 index
< MAX_IRQS_PER_TABLE
;) {
3087 if (!iommu
->irte_ops
->is_allocated(table
, index
)) {
3091 index
= ALIGN(index
+ 1, alignment
);
3097 iommu
->irte_ops
->set_allocated(table
, index
- c
+ 1);
3109 raw_spin_unlock_irqrestore(&table
->lock
, flags
);
3114 static int modify_irte_ga(struct amd_iommu
*iommu
, u16 devid
, int index
,
3115 struct irte_ga
*irte
)
3117 struct irq_remap_table
*table
;
3118 struct irte_ga
*entry
;
3119 unsigned long flags
;
3122 table
= get_irq_table(iommu
, devid
);
3126 raw_spin_lock_irqsave(&table
->lock
, flags
);
3128 entry
= (struct irte_ga
*)table
->table
;
3129 entry
= &entry
[index
];
3132 * We use cmpxchg16 to atomically update the 128-bit IRTE,
3133 * and it cannot be updated by the hardware or other processors
3134 * behind us, so the return value of cmpxchg16 should be the
3135 * same as the old value.
3138 WARN_ON(!try_cmpxchg128(&entry
->irte
, &old
, irte
->irte
));
3140 raw_spin_unlock_irqrestore(&table
->lock
, flags
);
3142 iommu_flush_irt_and_complete(iommu
, devid
);
3147 static int modify_irte(struct amd_iommu
*iommu
,
3148 u16 devid
, int index
, union irte
*irte
)
3150 struct irq_remap_table
*table
;
3151 unsigned long flags
;
3153 table
= get_irq_table(iommu
, devid
);
3157 raw_spin_lock_irqsave(&table
->lock
, flags
);
3158 table
->table
[index
] = irte
->val
;
3159 raw_spin_unlock_irqrestore(&table
->lock
, flags
);
3161 iommu_flush_irt_and_complete(iommu
, devid
);
3166 static void free_irte(struct amd_iommu
*iommu
, u16 devid
, int index
)
3168 struct irq_remap_table
*table
;
3169 unsigned long flags
;
3171 table
= get_irq_table(iommu
, devid
);
3175 raw_spin_lock_irqsave(&table
->lock
, flags
);
3176 iommu
->irte_ops
->clear_allocated(table
, index
);
3177 raw_spin_unlock_irqrestore(&table
->lock
, flags
);
3179 iommu_flush_irt_and_complete(iommu
, devid
);
3182 static void irte_prepare(void *entry
,
3183 u32 delivery_mode
, bool dest_mode
,
3184 u8 vector
, u32 dest_apicid
, int devid
)
3186 union irte
*irte
= (union irte
*) entry
;
3189 irte
->fields
.vector
= vector
;
3190 irte
->fields
.int_type
= delivery_mode
;
3191 irte
->fields
.destination
= dest_apicid
;
3192 irte
->fields
.dm
= dest_mode
;
3193 irte
->fields
.valid
= 1;
3196 static void irte_ga_prepare(void *entry
,
3197 u32 delivery_mode
, bool dest_mode
,
3198 u8 vector
, u32 dest_apicid
, int devid
)
3200 struct irte_ga
*irte
= (struct irte_ga
*) entry
;
3204 irte
->lo
.fields_remap
.int_type
= delivery_mode
;
3205 irte
->lo
.fields_remap
.dm
= dest_mode
;
3206 irte
->hi
.fields
.vector
= vector
;
3207 irte
->lo
.fields_remap
.destination
= APICID_TO_IRTE_DEST_LO(dest_apicid
);
3208 irte
->hi
.fields
.destination
= APICID_TO_IRTE_DEST_HI(dest_apicid
);
3209 irte
->lo
.fields_remap
.valid
= 1;
3212 static void irte_activate(struct amd_iommu
*iommu
, void *entry
, u16 devid
, u16 index
)
3214 union irte
*irte
= (union irte
*) entry
;
3216 irte
->fields
.valid
= 1;
3217 modify_irte(iommu
, devid
, index
, irte
);
3220 static void irte_ga_activate(struct amd_iommu
*iommu
, void *entry
, u16 devid
, u16 index
)
3222 struct irte_ga
*irte
= (struct irte_ga
*) entry
;
3224 irte
->lo
.fields_remap
.valid
= 1;
3225 modify_irte_ga(iommu
, devid
, index
, irte
);
3228 static void irte_deactivate(struct amd_iommu
*iommu
, void *entry
, u16 devid
, u16 index
)
3230 union irte
*irte
= (union irte
*) entry
;
3232 irte
->fields
.valid
= 0;
3233 modify_irte(iommu
, devid
, index
, irte
);
3236 static void irte_ga_deactivate(struct amd_iommu
*iommu
, void *entry
, u16 devid
, u16 index
)
3238 struct irte_ga
*irte
= (struct irte_ga
*) entry
;
3240 irte
->lo
.fields_remap
.valid
= 0;
3241 modify_irte_ga(iommu
, devid
, index
, irte
);
3244 static void irte_set_affinity(struct amd_iommu
*iommu
, void *entry
, u16 devid
, u16 index
,
3245 u8 vector
, u32 dest_apicid
)
3247 union irte
*irte
= (union irte
*) entry
;
3249 irte
->fields
.vector
= vector
;
3250 irte
->fields
.destination
= dest_apicid
;
3251 modify_irte(iommu
, devid
, index
, irte
);
3254 static void irte_ga_set_affinity(struct amd_iommu
*iommu
, void *entry
, u16 devid
, u16 index
,
3255 u8 vector
, u32 dest_apicid
)
3257 struct irte_ga
*irte
= (struct irte_ga
*) entry
;
3259 if (!irte
->lo
.fields_remap
.guest_mode
) {
3260 irte
->hi
.fields
.vector
= vector
;
3261 irte
->lo
.fields_remap
.destination
=
3262 APICID_TO_IRTE_DEST_LO(dest_apicid
);
3263 irte
->hi
.fields
.destination
=
3264 APICID_TO_IRTE_DEST_HI(dest_apicid
);
3265 modify_irte_ga(iommu
, devid
, index
, irte
);
3269 #define IRTE_ALLOCATED (~1U)
3270 static void irte_set_allocated(struct irq_remap_table
*table
, int index
)
3272 table
->table
[index
] = IRTE_ALLOCATED
;
3275 static void irte_ga_set_allocated(struct irq_remap_table
*table
, int index
)
3277 struct irte_ga
*ptr
= (struct irte_ga
*)table
->table
;
3278 struct irte_ga
*irte
= &ptr
[index
];
3280 memset(&irte
->lo
.val
, 0, sizeof(u64
));
3281 memset(&irte
->hi
.val
, 0, sizeof(u64
));
3282 irte
->hi
.fields
.vector
= 0xff;
3285 static bool irte_is_allocated(struct irq_remap_table
*table
, int index
)
3287 union irte
*ptr
= (union irte
*)table
->table
;
3288 union irte
*irte
= &ptr
[index
];
3290 return irte
->val
!= 0;
3293 static bool irte_ga_is_allocated(struct irq_remap_table
*table
, int index
)
3295 struct irte_ga
*ptr
= (struct irte_ga
*)table
->table
;
3296 struct irte_ga
*irte
= &ptr
[index
];
3298 return irte
->hi
.fields
.vector
!= 0;
3301 static void irte_clear_allocated(struct irq_remap_table
*table
, int index
)
3303 table
->table
[index
] = 0;
3306 static void irte_ga_clear_allocated(struct irq_remap_table
*table
, int index
)
3308 struct irte_ga
*ptr
= (struct irte_ga
*)table
->table
;
3309 struct irte_ga
*irte
= &ptr
[index
];
3311 memset(&irte
->lo
.val
, 0, sizeof(u64
));
3312 memset(&irte
->hi
.val
, 0, sizeof(u64
));
3315 static int get_devid(struct irq_alloc_info
*info
)
3317 switch (info
->type
) {
3318 case X86_IRQ_ALLOC_TYPE_IOAPIC
:
3319 return get_ioapic_devid(info
->devid
);
3320 case X86_IRQ_ALLOC_TYPE_HPET
:
3321 return get_hpet_devid(info
->devid
);
3322 case X86_IRQ_ALLOC_TYPE_PCI_MSI
:
3323 case X86_IRQ_ALLOC_TYPE_PCI_MSIX
:
3324 return get_device_sbdf_id(msi_desc_to_dev(info
->desc
));
3331 struct irq_remap_ops amd_iommu_irq_ops
= {
3332 .prepare
= amd_iommu_prepare
,
3333 .enable
= amd_iommu_enable
,
3334 .disable
= amd_iommu_disable
,
3335 .reenable
= amd_iommu_reenable
,
3336 .enable_faulting
= amd_iommu_enable_faulting
,
3339 static void fill_msi_msg(struct msi_msg
*msg
, u32 index
)
3342 msg
->address_lo
= 0;
3343 msg
->arch_addr_lo
.base_address
= X86_MSI_BASE_ADDRESS_LOW
;
3344 msg
->address_hi
= X86_MSI_BASE_ADDRESS_HIGH
;
3347 static void irq_remapping_prepare_irte(struct amd_ir_data
*data
,
3348 struct irq_cfg
*irq_cfg
,
3349 struct irq_alloc_info
*info
,
3350 int devid
, int index
, int sub_handle
)
3352 struct irq_2_irte
*irte_info
= &data
->irq_2_irte
;
3353 struct amd_iommu
*iommu
= data
->iommu
;
3358 data
->irq_2_irte
.devid
= devid
;
3359 data
->irq_2_irte
.index
= index
+ sub_handle
;
3360 iommu
->irte_ops
->prepare(data
->entry
, apic
->delivery_mode
,
3361 apic
->dest_mode_logical
, irq_cfg
->vector
,
3362 irq_cfg
->dest_apicid
, devid
);
3364 switch (info
->type
) {
3365 case X86_IRQ_ALLOC_TYPE_IOAPIC
:
3366 case X86_IRQ_ALLOC_TYPE_HPET
:
3367 case X86_IRQ_ALLOC_TYPE_PCI_MSI
:
3368 case X86_IRQ_ALLOC_TYPE_PCI_MSIX
:
3369 fill_msi_msg(&data
->msi_entry
, irte_info
->index
);
3378 struct amd_irte_ops irte_32_ops
= {
3379 .prepare
= irte_prepare
,
3380 .activate
= irte_activate
,
3381 .deactivate
= irte_deactivate
,
3382 .set_affinity
= irte_set_affinity
,
3383 .set_allocated
= irte_set_allocated
,
3384 .is_allocated
= irte_is_allocated
,
3385 .clear_allocated
= irte_clear_allocated
,
3388 struct amd_irte_ops irte_128_ops
= {
3389 .prepare
= irte_ga_prepare
,
3390 .activate
= irte_ga_activate
,
3391 .deactivate
= irte_ga_deactivate
,
3392 .set_affinity
= irte_ga_set_affinity
,
3393 .set_allocated
= irte_ga_set_allocated
,
3394 .is_allocated
= irte_ga_is_allocated
,
3395 .clear_allocated
= irte_ga_clear_allocated
,
3398 static int irq_remapping_alloc(struct irq_domain
*domain
, unsigned int virq
,
3399 unsigned int nr_irqs
, void *arg
)
3401 struct irq_alloc_info
*info
= arg
;
3402 struct irq_data
*irq_data
;
3403 struct amd_ir_data
*data
= NULL
;
3404 struct amd_iommu
*iommu
;
3405 struct irq_cfg
*cfg
;
3406 int i
, ret
, devid
, seg
, sbdf
;
3411 if (nr_irqs
> 1 && info
->type
!= X86_IRQ_ALLOC_TYPE_PCI_MSI
)
3414 sbdf
= get_devid(info
);
3418 seg
= PCI_SBDF_TO_SEGID(sbdf
);
3419 devid
= PCI_SBDF_TO_DEVID(sbdf
);
3420 iommu
= __rlookup_amd_iommu(seg
, devid
);
3424 ret
= irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
, arg
);
3428 if (info
->type
== X86_IRQ_ALLOC_TYPE_IOAPIC
) {
3429 struct irq_remap_table
*table
;
3431 table
= alloc_irq_table(iommu
, devid
, NULL
);
3433 if (!table
->min_index
) {
3435 * Keep the first 32 indexes free for IOAPIC
3438 table
->min_index
= 32;
3439 for (i
= 0; i
< 32; ++i
)
3440 iommu
->irte_ops
->set_allocated(table
, i
);
3442 WARN_ON(table
->min_index
!= 32);
3443 index
= info
->ioapic
.pin
;
3447 } else if (info
->type
== X86_IRQ_ALLOC_TYPE_PCI_MSI
||
3448 info
->type
== X86_IRQ_ALLOC_TYPE_PCI_MSIX
) {
3449 bool align
= (info
->type
== X86_IRQ_ALLOC_TYPE_PCI_MSI
);
3451 index
= alloc_irq_index(iommu
, devid
, nr_irqs
, align
,
3452 msi_desc_to_pci_dev(info
->desc
));
3454 index
= alloc_irq_index(iommu
, devid
, nr_irqs
, false, NULL
);
3458 pr_warn("Failed to allocate IRTE\n");
3460 goto out_free_parent
;
3463 for (i
= 0; i
< nr_irqs
; i
++) {
3464 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
3465 cfg
= irq_data
? irqd_cfg(irq_data
) : NULL
;
3472 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3476 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir
))
3477 data
->entry
= kzalloc(sizeof(union irte
), GFP_KERNEL
);
3479 data
->entry
= kzalloc(sizeof(struct irte_ga
),
3486 data
->iommu
= iommu
;
3487 irq_data
->hwirq
= (devid
<< 16) + i
;
3488 irq_data
->chip_data
= data
;
3489 irq_data
->chip
= &amd_ir_chip
;
3490 irq_remapping_prepare_irte(data
, cfg
, info
, devid
, index
, i
);
3491 irq_set_status_flags(virq
+ i
, IRQ_MOVE_PCNTXT
);
3497 for (i
--; i
>= 0; i
--) {
3498 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
3500 kfree(irq_data
->chip_data
);
3502 for (i
= 0; i
< nr_irqs
; i
++)
3503 free_irte(iommu
, devid
, index
+ i
);
3505 irq_domain_free_irqs_common(domain
, virq
, nr_irqs
);
3509 static void irq_remapping_free(struct irq_domain
*domain
, unsigned int virq
,
3510 unsigned int nr_irqs
)
3512 struct irq_2_irte
*irte_info
;
3513 struct irq_data
*irq_data
;
3514 struct amd_ir_data
*data
;
3517 for (i
= 0; i
< nr_irqs
; i
++) {
3518 irq_data
= irq_domain_get_irq_data(domain
, virq
+ i
);
3519 if (irq_data
&& irq_data
->chip_data
) {
3520 data
= irq_data
->chip_data
;
3521 irte_info
= &data
->irq_2_irte
;
3522 free_irte(data
->iommu
, irte_info
->devid
, irte_info
->index
);
3527 irq_domain_free_irqs_common(domain
, virq
, nr_irqs
);
3530 static void amd_ir_update_irte(struct irq_data
*irqd
, struct amd_iommu
*iommu
,
3531 struct amd_ir_data
*ir_data
,
3532 struct irq_2_irte
*irte_info
,
3533 struct irq_cfg
*cfg
);
3535 static int irq_remapping_activate(struct irq_domain
*domain
,
3536 struct irq_data
*irq_data
, bool reserve
)
3538 struct amd_ir_data
*data
= irq_data
->chip_data
;
3539 struct irq_2_irte
*irte_info
= &data
->irq_2_irte
;
3540 struct amd_iommu
*iommu
= data
->iommu
;
3541 struct irq_cfg
*cfg
= irqd_cfg(irq_data
);
3546 iommu
->irte_ops
->activate(iommu
, data
->entry
, irte_info
->devid
,
3548 amd_ir_update_irte(irq_data
, iommu
, data
, irte_info
, cfg
);
3552 static void irq_remapping_deactivate(struct irq_domain
*domain
,
3553 struct irq_data
*irq_data
)
3555 struct amd_ir_data
*data
= irq_data
->chip_data
;
3556 struct irq_2_irte
*irte_info
= &data
->irq_2_irte
;
3557 struct amd_iommu
*iommu
= data
->iommu
;
3560 iommu
->irte_ops
->deactivate(iommu
, data
->entry
, irte_info
->devid
,
3564 static int irq_remapping_select(struct irq_domain
*d
, struct irq_fwspec
*fwspec
,
3565 enum irq_domain_bus_token bus_token
)
3567 struct amd_iommu
*iommu
;
3570 if (!amd_iommu_irq_remap
)
3573 if (x86_fwspec_is_ioapic(fwspec
))
3574 devid
= get_ioapic_devid(fwspec
->param
[0]);
3575 else if (x86_fwspec_is_hpet(fwspec
))
3576 devid
= get_hpet_devid(fwspec
->param
[0]);
3580 iommu
= __rlookup_amd_iommu((devid
>> 16), (devid
& 0xffff));
3582 return iommu
&& iommu
->ir_domain
== d
;
3585 static const struct irq_domain_ops amd_ir_domain_ops
= {
3586 .select
= irq_remapping_select
,
3587 .alloc
= irq_remapping_alloc
,
3588 .free
= irq_remapping_free
,
3589 .activate
= irq_remapping_activate
,
3590 .deactivate
= irq_remapping_deactivate
,
3593 int amd_iommu_activate_guest_mode(void *data
)
3595 struct amd_ir_data
*ir_data
= (struct amd_ir_data
*)data
;
3596 struct irte_ga
*entry
= (struct irte_ga
*) ir_data
->entry
;
3599 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
) || !entry
)
3602 valid
= entry
->lo
.fields_vapic
.valid
;
3607 entry
->lo
.fields_vapic
.valid
= valid
;
3608 entry
->lo
.fields_vapic
.guest_mode
= 1;
3609 entry
->lo
.fields_vapic
.ga_log_intr
= 1;
3610 entry
->hi
.fields
.ga_root_ptr
= ir_data
->ga_root_ptr
;
3611 entry
->hi
.fields
.vector
= ir_data
->ga_vector
;
3612 entry
->lo
.fields_vapic
.ga_tag
= ir_data
->ga_tag
;
3614 return modify_irte_ga(ir_data
->iommu
, ir_data
->irq_2_irte
.devid
,
3615 ir_data
->irq_2_irte
.index
, entry
);
3617 EXPORT_SYMBOL(amd_iommu_activate_guest_mode
);
3619 int amd_iommu_deactivate_guest_mode(void *data
)
3621 struct amd_ir_data
*ir_data
= (struct amd_ir_data
*)data
;
3622 struct irte_ga
*entry
= (struct irte_ga
*) ir_data
->entry
;
3623 struct irq_cfg
*cfg
= ir_data
->cfg
;
3626 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
) ||
3627 !entry
|| !entry
->lo
.fields_vapic
.guest_mode
)
3630 valid
= entry
->lo
.fields_remap
.valid
;
3635 entry
->lo
.fields_remap
.valid
= valid
;
3636 entry
->lo
.fields_remap
.dm
= apic
->dest_mode_logical
;
3637 entry
->lo
.fields_remap
.int_type
= apic
->delivery_mode
;
3638 entry
->hi
.fields
.vector
= cfg
->vector
;
3639 entry
->lo
.fields_remap
.destination
=
3640 APICID_TO_IRTE_DEST_LO(cfg
->dest_apicid
);
3641 entry
->hi
.fields
.destination
=
3642 APICID_TO_IRTE_DEST_HI(cfg
->dest_apicid
);
3644 return modify_irte_ga(ir_data
->iommu
, ir_data
->irq_2_irte
.devid
,
3645 ir_data
->irq_2_irte
.index
, entry
);
3647 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode
);
3649 static int amd_ir_set_vcpu_affinity(struct irq_data
*data
, void *vcpu_info
)
3652 struct amd_iommu_pi_data
*pi_data
= vcpu_info
;
3653 struct vcpu_data
*vcpu_pi_info
= pi_data
->vcpu_data
;
3654 struct amd_ir_data
*ir_data
= data
->chip_data
;
3655 struct irq_2_irte
*irte_info
= &ir_data
->irq_2_irte
;
3656 struct iommu_dev_data
*dev_data
;
3658 if (ir_data
->iommu
== NULL
)
3661 dev_data
= search_dev_data(ir_data
->iommu
, irte_info
->devid
);
3664 * This device has never been set up for guest mode.
3665 * we should not modify the IRTE
3667 if (!dev_data
|| !dev_data
->use_vapic
)
3670 ir_data
->cfg
= irqd_cfg(data
);
3671 pi_data
->ir_data
= ir_data
;
3674 * SVM tries to set up for VAPIC mode, but we are in
3675 * legacy mode. So, we force legacy mode instead.
3677 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
)) {
3678 pr_debug("%s: Fall back to using intr legacy remap\n",
3680 pi_data
->is_guest_mode
= false;
3683 pi_data
->prev_ga_tag
= ir_data
->cached_ga_tag
;
3684 if (pi_data
->is_guest_mode
) {
3685 ir_data
->ga_root_ptr
= (pi_data
->base
>> 12);
3686 ir_data
->ga_vector
= vcpu_pi_info
->vector
;
3687 ir_data
->ga_tag
= pi_data
->ga_tag
;
3688 ret
= amd_iommu_activate_guest_mode(ir_data
);
3690 ir_data
->cached_ga_tag
= pi_data
->ga_tag
;
3692 ret
= amd_iommu_deactivate_guest_mode(ir_data
);
3695 * This communicates the ga_tag back to the caller
3696 * so that it can do all the necessary clean up.
3699 ir_data
->cached_ga_tag
= 0;
3706 static void amd_ir_update_irte(struct irq_data
*irqd
, struct amd_iommu
*iommu
,
3707 struct amd_ir_data
*ir_data
,
3708 struct irq_2_irte
*irte_info
,
3709 struct irq_cfg
*cfg
)
3713 * Atomically updates the IRTE with the new destination, vector
3714 * and flushes the interrupt entry cache.
3716 iommu
->irte_ops
->set_affinity(iommu
, ir_data
->entry
, irte_info
->devid
,
3717 irte_info
->index
, cfg
->vector
,
3721 static int amd_ir_set_affinity(struct irq_data
*data
,
3722 const struct cpumask
*mask
, bool force
)
3724 struct amd_ir_data
*ir_data
= data
->chip_data
;
3725 struct irq_2_irte
*irte_info
= &ir_data
->irq_2_irte
;
3726 struct irq_cfg
*cfg
= irqd_cfg(data
);
3727 struct irq_data
*parent
= data
->parent_data
;
3728 struct amd_iommu
*iommu
= ir_data
->iommu
;
3734 ret
= parent
->chip
->irq_set_affinity(parent
, mask
, force
);
3735 if (ret
< 0 || ret
== IRQ_SET_MASK_OK_DONE
)
3738 amd_ir_update_irte(data
, iommu
, ir_data
, irte_info
, cfg
);
3740 * After this point, all the interrupts will start arriving
3741 * at the new destination. So, time to cleanup the previous
3742 * vector allocation.
3744 vector_schedule_cleanup(cfg
);
3746 return IRQ_SET_MASK_OK_DONE
;
3749 static void ir_compose_msi_msg(struct irq_data
*irq_data
, struct msi_msg
*msg
)
3751 struct amd_ir_data
*ir_data
= irq_data
->chip_data
;
3753 *msg
= ir_data
->msi_entry
;
3756 static struct irq_chip amd_ir_chip
= {
3758 .irq_ack
= apic_ack_irq
,
3759 .irq_set_affinity
= amd_ir_set_affinity
,
3760 .irq_set_vcpu_affinity
= amd_ir_set_vcpu_affinity
,
3761 .irq_compose_msi_msg
= ir_compose_msi_msg
,
3764 static const struct msi_parent_ops amdvi_msi_parent_ops
= {
3765 .supported_flags
= X86_VECTOR_MSI_FLAGS_SUPPORTED
|
3766 MSI_FLAG_MULTI_PCI_MSI
|
3769 .init_dev_msi_info
= msi_parent_init_dev_msi_info
,
3772 static const struct msi_parent_ops virt_amdvi_msi_parent_ops
= {
3773 .supported_flags
= X86_VECTOR_MSI_FLAGS_SUPPORTED
|
3774 MSI_FLAG_MULTI_PCI_MSI
,
3776 .init_dev_msi_info
= msi_parent_init_dev_msi_info
,
3779 int amd_iommu_create_irq_domain(struct amd_iommu
*iommu
)
3781 struct fwnode_handle
*fn
;
3783 fn
= irq_domain_alloc_named_id_fwnode("AMD-IR", iommu
->index
);
3786 iommu
->ir_domain
= irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0,
3787 fn
, &amd_ir_domain_ops
, iommu
);
3788 if (!iommu
->ir_domain
) {
3789 irq_domain_free_fwnode(fn
);
3793 irq_domain_update_bus_token(iommu
->ir_domain
, DOMAIN_BUS_AMDVI
);
3794 iommu
->ir_domain
->flags
|= IRQ_DOMAIN_FLAG_MSI_PARENT
|
3795 IRQ_DOMAIN_FLAG_ISOLATED_MSI
;
3797 if (amd_iommu_np_cache
)
3798 iommu
->ir_domain
->msi_parent_ops
= &virt_amdvi_msi_parent_ops
;
3800 iommu
->ir_domain
->msi_parent_ops
= &amdvi_msi_parent_ops
;
3805 int amd_iommu_update_ga(int cpu
, bool is_run
, void *data
)
3807 struct amd_ir_data
*ir_data
= (struct amd_ir_data
*)data
;
3808 struct irte_ga
*entry
= (struct irte_ga
*) ir_data
->entry
;
3810 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir
) ||
3811 !entry
|| !entry
->lo
.fields_vapic
.guest_mode
)
3814 if (!ir_data
->iommu
)
3818 entry
->lo
.fields_vapic
.destination
=
3819 APICID_TO_IRTE_DEST_LO(cpu
);
3820 entry
->hi
.fields
.destination
=
3821 APICID_TO_IRTE_DEST_HI(cpu
);
3823 entry
->lo
.fields_vapic
.is_run
= is_run
;
3825 return modify_irte_ga(ir_data
->iommu
, ir_data
->irq_2_irte
.devid
,
3826 ir_data
->irq_2_irte
.index
, entry
);
3828 EXPORT_SYMBOL(amd_iommu_update_ga
);