1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2015 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>
8 #include <linux/mmu_notifier.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
13 #include <linux/pci.h>
14 #include <linux/pci-ats.h>
15 #include <linux/dmar.h>
16 #include <linux/interrupt.h>
17 #include <linux/mm_types.h>
18 #include <linux/xarray.h>
20 #include <asm/fpu/api.h>
25 #include "../iommu-sva.h"
28 static irqreturn_t
prq_event_thread(int irq
, void *d
);
30 static DEFINE_XARRAY_ALLOC(pasid_private_array
);
31 static int pasid_private_add(ioasid_t pasid
, void *priv
)
33 return xa_alloc(&pasid_private_array
, &pasid
, priv
,
34 XA_LIMIT(pasid
, pasid
), GFP_ATOMIC
);
37 static void pasid_private_remove(ioasid_t pasid
)
39 xa_erase(&pasid_private_array
, pasid
);
42 static void *pasid_private_find(ioasid_t pasid
)
44 return xa_load(&pasid_private_array
, pasid
);
47 static struct intel_svm_dev
*
48 svm_lookup_device_by_dev(struct intel_svm
*svm
, struct device
*dev
)
50 struct intel_svm_dev
*sdev
= NULL
, *t
;
53 list_for_each_entry_rcu(t
, &svm
->devs
, list
) {
64 int intel_svm_enable_prq(struct intel_iommu
*iommu
)
66 struct iopf_queue
*iopfq
;
70 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, PRQ_ORDER
);
72 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
76 iommu
->prq
= page_address(pages
);
78 irq
= dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ
+ iommu
->seq_id
, iommu
->node
, iommu
);
80 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
87 snprintf(iommu
->iopfq_name
, sizeof(iommu
->iopfq_name
),
88 "dmar%d-iopfq", iommu
->seq_id
);
89 iopfq
= iopf_queue_alloc(iommu
->iopfq_name
);
91 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu
->name
);
95 iommu
->iopf_queue
= iopfq
;
97 snprintf(iommu
->prq_name
, sizeof(iommu
->prq_name
), "dmar%d-prq", iommu
->seq_id
);
99 ret
= request_threaded_irq(irq
, NULL
, prq_event_thread
, IRQF_ONESHOT
,
100 iommu
->prq_name
, iommu
);
102 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
106 dmar_writeq(iommu
->reg
+ DMAR_PQH_REG
, 0ULL);
107 dmar_writeq(iommu
->reg
+ DMAR_PQT_REG
, 0ULL);
108 dmar_writeq(iommu
->reg
+ DMAR_PQA_REG
, virt_to_phys(iommu
->prq
) | PRQ_ORDER
);
110 init_completion(&iommu
->prq_complete
);
115 iopf_queue_free(iommu
->iopf_queue
);
116 iommu
->iopf_queue
= NULL
;
118 dmar_free_hwirq(irq
);
121 free_pages((unsigned long)iommu
->prq
, PRQ_ORDER
);
127 int intel_svm_finish_prq(struct intel_iommu
*iommu
)
129 dmar_writeq(iommu
->reg
+ DMAR_PQH_REG
, 0ULL);
130 dmar_writeq(iommu
->reg
+ DMAR_PQT_REG
, 0ULL);
131 dmar_writeq(iommu
->reg
+ DMAR_PQA_REG
, 0ULL);
134 free_irq(iommu
->pr_irq
, iommu
);
135 dmar_free_hwirq(iommu
->pr_irq
);
139 if (iommu
->iopf_queue
) {
140 iopf_queue_free(iommu
->iopf_queue
);
141 iommu
->iopf_queue
= NULL
;
144 free_pages((unsigned long)iommu
->prq
, PRQ_ORDER
);
150 void intel_svm_check(struct intel_iommu
*iommu
)
152 if (!pasid_supported(iommu
))
155 if (cpu_feature_enabled(X86_FEATURE_GBPAGES
) &&
156 !cap_fl1gp_support(iommu
->cap
)) {
157 pr_err("%s SVM disabled, incompatible 1GB page capability\n",
162 if (cpu_feature_enabled(X86_FEATURE_LA57
) &&
163 !cap_fl5lp_support(iommu
->cap
)) {
164 pr_err("%s SVM disabled, incompatible paging mode\n",
169 iommu
->flags
|= VTD_FLAG_SVM_CAPABLE
;
172 static void __flush_svm_range_dev(struct intel_svm
*svm
,
173 struct intel_svm_dev
*sdev
,
174 unsigned long address
,
175 unsigned long pages
, int ih
)
177 struct device_domain_info
*info
= dev_iommu_priv_get(sdev
->dev
);
182 qi_flush_piotlb(sdev
->iommu
, sdev
->did
, svm
->pasid
, address
, pages
, ih
);
183 if (info
->ats_enabled
) {
184 qi_flush_dev_iotlb_pasid(sdev
->iommu
, sdev
->sid
, info
->pfsid
,
185 svm
->pasid
, sdev
->qdep
, address
,
186 order_base_2(pages
));
187 quirk_extra_dev_tlb_flush(info
, address
, order_base_2(pages
),
188 svm
->pasid
, sdev
->qdep
);
192 static void intel_flush_svm_range_dev(struct intel_svm
*svm
,
193 struct intel_svm_dev
*sdev
,
194 unsigned long address
,
195 unsigned long pages
, int ih
)
197 unsigned long shift
= ilog2(__roundup_pow_of_two(pages
));
198 unsigned long align
= (1ULL << (VTD_PAGE_SHIFT
+ shift
));
199 unsigned long start
= ALIGN_DOWN(address
, align
);
200 unsigned long end
= ALIGN(address
+ (pages
<< VTD_PAGE_SHIFT
), align
);
202 while (start
< end
) {
203 __flush_svm_range_dev(svm
, sdev
, start
, align
>> VTD_PAGE_SHIFT
, ih
);
208 static void intel_flush_svm_range(struct intel_svm
*svm
, unsigned long address
,
209 unsigned long pages
, int ih
)
211 struct intel_svm_dev
*sdev
;
214 list_for_each_entry_rcu(sdev
, &svm
->devs
, list
)
215 intel_flush_svm_range_dev(svm
, sdev
, address
, pages
, ih
);
219 /* Pages have been freed at this point */
220 static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier
*mn
,
221 struct mm_struct
*mm
,
222 unsigned long start
, unsigned long end
)
224 struct intel_svm
*svm
= container_of(mn
, struct intel_svm
, notifier
);
226 intel_flush_svm_range(svm
, start
,
227 (end
- start
+ PAGE_SIZE
- 1) >> VTD_PAGE_SHIFT
, 0);
230 static void intel_mm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
232 struct intel_svm
*svm
= container_of(mn
, struct intel_svm
, notifier
);
233 struct intel_svm_dev
*sdev
;
235 /* This might end up being called from exit_mmap(), *before* the page
236 * tables are cleared. And __mmu_notifier_release() will delete us from
237 * the list of notifiers so that our invalidate_range() callback doesn't
238 * get called when the page tables are cleared. So we need to protect
239 * against hardware accessing those page tables.
241 * We do it by clearing the entry in the PASID table and then flushing
242 * the IOTLB and the PASID table caches. This might upset hardware;
243 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
244 * page) so that we end up taking a fault that the hardware really
245 * *has* to handle gracefully without affecting other processes.
248 list_for_each_entry_rcu(sdev
, &svm
->devs
, list
)
249 intel_pasid_tear_down_entry(sdev
->iommu
, sdev
->dev
,
255 static const struct mmu_notifier_ops intel_mmuops
= {
256 .release
= intel_mm_release
,
257 .arch_invalidate_secondary_tlbs
= intel_arch_invalidate_secondary_tlbs
,
260 static int pasid_to_svm_sdev(struct device
*dev
, unsigned int pasid
,
261 struct intel_svm
**rsvm
,
262 struct intel_svm_dev
**rsdev
)
264 struct intel_svm_dev
*sdev
= NULL
;
265 struct intel_svm
*svm
;
267 if (pasid
== IOMMU_PASID_INVALID
|| pasid
>= PASID_MAX
)
270 svm
= pasid_private_find(pasid
);
278 * If we found svm for the PASID, there must be at least one device
281 if (WARN_ON(list_empty(&svm
->devs
)))
283 sdev
= svm_lookup_device_by_dev(svm
, dev
);
292 static int intel_svm_bind_mm(struct intel_iommu
*iommu
, struct device
*dev
,
293 struct mm_struct
*mm
)
295 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
296 struct intel_svm_dev
*sdev
;
297 struct intel_svm
*svm
;
298 unsigned long sflags
;
301 svm
= pasid_private_find(mm
->pasid
);
303 svm
= kzalloc(sizeof(*svm
), GFP_KERNEL
);
307 svm
->pasid
= mm
->pasid
;
309 INIT_LIST_HEAD_RCU(&svm
->devs
);
311 svm
->notifier
.ops
= &intel_mmuops
;
312 ret
= mmu_notifier_register(&svm
->notifier
, mm
);
318 ret
= pasid_private_add(svm
->pasid
, svm
);
320 mmu_notifier_unregister(&svm
->notifier
, mm
);
326 sdev
= kzalloc(sizeof(*sdev
), GFP_KERNEL
);
334 sdev
->did
= FLPT_DEFAULT_DID
;
335 sdev
->sid
= PCI_DEVID(info
->bus
, info
->devfn
);
336 init_rcu_head(&sdev
->rcu
);
337 if (info
->ats_enabled
) {
338 sdev
->qdep
= info
->ats_qdep
;
339 if (sdev
->qdep
>= QI_DEV_EIOTLB_MAX_INVS
)
343 /* Setup the pasid table: */
344 sflags
= cpu_feature_enabled(X86_FEATURE_LA57
) ? PASID_FLAG_FL5LP
: 0;
345 ret
= intel_pasid_setup_first_level(iommu
, dev
, mm
->pgd
, mm
->pasid
,
346 FLPT_DEFAULT_DID
, sflags
);
350 list_add_rcu(&sdev
->list
, &svm
->devs
);
357 if (list_empty(&svm
->devs
)) {
358 mmu_notifier_unregister(&svm
->notifier
, mm
);
359 pasid_private_remove(mm
->pasid
);
366 void intel_svm_remove_dev_pasid(struct device
*dev
, u32 pasid
)
368 struct intel_svm_dev
*sdev
;
369 struct intel_iommu
*iommu
;
370 struct intel_svm
*svm
;
371 struct mm_struct
*mm
;
373 iommu
= device_to_iommu(dev
, NULL
, NULL
);
377 if (pasid_to_svm_sdev(dev
, pasid
, &svm
, &sdev
))
382 list_del_rcu(&sdev
->list
);
383 kfree_rcu(sdev
, rcu
);
385 if (list_empty(&svm
->devs
)) {
386 if (svm
->notifier
.ops
)
387 mmu_notifier_unregister(&svm
->notifier
, mm
);
388 pasid_private_remove(svm
->pasid
);
390 * We mandate that no page faults may be outstanding
391 * for the PASID when intel_svm_unbind_mm() is called.
392 * If that is not obeyed, subtle errors will happen.
393 * Let's make them less subtle...
395 memset(svm
, 0x6b, sizeof(*svm
));
401 /* Page request queue descriptor */
402 struct page_req_dsc
{
407 u64 priv_data_present
:1;
430 static bool is_canonical_address(u64 addr
)
432 int shift
= 64 - (__VIRTUAL_MASK_SHIFT
+ 1);
433 long saddr
= (long) addr
;
435 return (((saddr
<< shift
) >> shift
) == saddr
);
439 * intel_drain_pasid_prq - Drain page requests and responses for a pasid
440 * @dev: target device
441 * @pasid: pasid for draining
443 * Drain all pending page requests and responses related to @pasid in both
444 * software and hardware. This is supposed to be called after the device
445 * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
446 * and DevTLB have been invalidated.
448 * It waits until all pending page requests for @pasid in the page fault
449 * queue are completed by the prq handling thread. Then follow the steps
450 * described in VT-d spec CH7.10 to drain all page requests and page
451 * responses pending in the hardware.
453 void intel_drain_pasid_prq(struct device
*dev
, u32 pasid
)
455 struct device_domain_info
*info
;
456 struct dmar_domain
*domain
;
457 struct intel_iommu
*iommu
;
458 struct qi_desc desc
[3];
459 struct pci_dev
*pdev
;
464 info
= dev_iommu_priv_get(dev
);
465 if (WARN_ON(!info
|| !dev_is_pci(dev
)))
468 if (!info
->pri_enabled
)
472 domain
= info
->domain
;
473 pdev
= to_pci_dev(dev
);
474 sid
= PCI_DEVID(info
->bus
, info
->devfn
);
475 did
= domain_id_iommu(domain
, iommu
);
476 qdep
= pci_ats_queue_depth(pdev
);
479 * Check and wait until all pending page requests in the queue are
480 * handled by the prq handling thread.
483 reinit_completion(&iommu
->prq_complete
);
484 tail
= dmar_readq(iommu
->reg
+ DMAR_PQT_REG
) & PRQ_RING_MASK
;
485 head
= dmar_readq(iommu
->reg
+ DMAR_PQH_REG
) & PRQ_RING_MASK
;
486 while (head
!= tail
) {
487 struct page_req_dsc
*req
;
489 req
= &iommu
->prq
[head
/ sizeof(*req
)];
490 if (!req
->pasid_present
|| req
->pasid
!= pasid
) {
491 head
= (head
+ sizeof(*req
)) & PRQ_RING_MASK
;
495 wait_for_completion(&iommu
->prq_complete
);
499 iopf_queue_flush_dev(dev
);
502 * Perform steps described in VT-d spec CH7.10 to drain page
503 * requests and responses in hardware.
505 memset(desc
, 0, sizeof(desc
));
506 desc
[0].qw0
= QI_IWD_STATUS_DATA(QI_DONE
) |
509 desc
[1].qw0
= QI_EIOTLB_PASID(pasid
) |
511 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID
) |
513 desc
[2].qw0
= QI_DEV_EIOTLB_PASID(pasid
) |
514 QI_DEV_EIOTLB_SID(sid
) |
515 QI_DEV_EIOTLB_QDEP(qdep
) |
517 QI_DEV_IOTLB_PFSID(info
->pfsid
);
519 reinit_completion(&iommu
->prq_complete
);
520 qi_submit_sync(iommu
, desc
, 3, QI_OPT_WAIT_DRAIN
);
521 if (readl(iommu
->reg
+ DMAR_PRS_REG
) & DMA_PRS_PRO
) {
522 wait_for_completion(&iommu
->prq_complete
);
527 static int prq_to_iommu_prot(struct page_req_dsc
*req
)
532 prot
|= IOMMU_FAULT_PERM_READ
;
534 prot
|= IOMMU_FAULT_PERM_WRITE
;
536 prot
|= IOMMU_FAULT_PERM_EXEC
;
538 prot
|= IOMMU_FAULT_PERM_PRIV
;
543 static int intel_svm_prq_report(struct intel_iommu
*iommu
, struct device
*dev
,
544 struct page_req_dsc
*desc
)
546 struct iommu_fault_event event
;
548 if (!dev
|| !dev_is_pci(dev
))
551 /* Fill in event data for device specific processing */
552 memset(&event
, 0, sizeof(struct iommu_fault_event
));
553 event
.fault
.type
= IOMMU_FAULT_PAGE_REQ
;
554 event
.fault
.prm
.addr
= (u64
)desc
->addr
<< VTD_PAGE_SHIFT
;
555 event
.fault
.prm
.pasid
= desc
->pasid
;
556 event
.fault
.prm
.grpid
= desc
->prg_index
;
557 event
.fault
.prm
.perm
= prq_to_iommu_prot(desc
);
560 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
;
561 if (desc
->pasid_present
) {
562 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
;
563 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID
;
565 if (desc
->priv_data_present
) {
567 * Set last page in group bit if private data is present,
568 * page response is required as it does for LPIG.
569 * iommu_report_device_fault() doesn't understand this vendor
570 * specific requirement thus we set last_page as a workaround.
572 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
;
573 event
.fault
.prm
.flags
|= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA
;
574 event
.fault
.prm
.private_data
[0] = desc
->priv_data
[0];
575 event
.fault
.prm
.private_data
[1] = desc
->priv_data
[1];
576 } else if (dmar_latency_enabled(iommu
, DMAR_LATENCY_PRQ
)) {
578 * If the private data fields are not used by hardware, use it
579 * to monitor the prq handle latency.
581 event
.fault
.prm
.private_data
[0] = ktime_to_ns(ktime_get());
584 return iommu_report_device_fault(dev
, &event
);
587 static void handle_bad_prq_event(struct intel_iommu
*iommu
,
588 struct page_req_dsc
*req
, int result
)
592 pr_err("%s: Invalid page request: %08llx %08llx\n",
593 iommu
->name
, ((unsigned long long *)req
)[0],
594 ((unsigned long long *)req
)[1]);
597 * Per VT-d spec. v3.0 ch7.7, system software must
598 * respond with page group response if private data
599 * is present (PDP) or last page in group (LPIG) bit
600 * is set. This is an additional VT-d feature beyond
603 if (!req
->lpig
&& !req
->priv_data_present
)
606 desc
.qw0
= QI_PGRP_PASID(req
->pasid
) |
607 QI_PGRP_DID(req
->rid
) |
608 QI_PGRP_PASID_P(req
->pasid_present
) |
609 QI_PGRP_PDP(req
->priv_data_present
) |
610 QI_PGRP_RESP_CODE(result
) |
612 desc
.qw1
= QI_PGRP_IDX(req
->prg_index
) |
613 QI_PGRP_LPIG(req
->lpig
);
615 if (req
->priv_data_present
) {
616 desc
.qw2
= req
->priv_data
[0];
617 desc
.qw3
= req
->priv_data
[1];
623 qi_submit_sync(iommu
, &desc
, 1, 0);
626 static irqreturn_t
prq_event_thread(int irq
, void *d
)
628 struct intel_iommu
*iommu
= d
;
629 struct page_req_dsc
*req
;
630 int head
, tail
, handled
;
631 struct pci_dev
*pdev
;
635 * Clear PPR bit before reading head/tail registers, to ensure that
636 * we get a new interrupt if needed.
638 writel(DMA_PRS_PPR
, iommu
->reg
+ DMAR_PRS_REG
);
640 tail
= dmar_readq(iommu
->reg
+ DMAR_PQT_REG
) & PRQ_RING_MASK
;
641 head
= dmar_readq(iommu
->reg
+ DMAR_PQH_REG
) & PRQ_RING_MASK
;
642 handled
= (head
!= tail
);
643 while (head
!= tail
) {
644 req
= &iommu
->prq
[head
/ sizeof(*req
)];
645 address
= (u64
)req
->addr
<< VTD_PAGE_SHIFT
;
647 if (unlikely(!req
->pasid_present
)) {
648 pr_err("IOMMU: %s: Page request without PASID\n",
651 handle_bad_prq_event(iommu
, req
, QI_RESP_INVALID
);
655 if (unlikely(!is_canonical_address(address
))) {
656 pr_err("IOMMU: %s: Address is not canonical\n",
661 if (unlikely(req
->pm_req
&& (req
->rd_req
| req
->wr_req
))) {
662 pr_err("IOMMU: %s: Page request in Privilege Mode\n",
667 if (unlikely(req
->exe_req
&& req
->rd_req
)) {
668 pr_err("IOMMU: %s: Execution request not supported\n",
673 /* Drop Stop Marker message. No need for a response. */
674 if (unlikely(req
->lpig
&& !req
->rd_req
&& !req
->wr_req
))
677 pdev
= pci_get_domain_bus_and_slot(iommu
->segment
,
678 PCI_BUS_NUM(req
->rid
),
681 * If prq is to be handled outside iommu driver via receiver of
682 * the fault notifiers, we skip the page response here.
687 if (intel_svm_prq_report(iommu
, &pdev
->dev
, req
))
688 handle_bad_prq_event(iommu
, req
, QI_RESP_INVALID
);
690 trace_prq_report(iommu
, &pdev
->dev
, req
->qw_0
, req
->qw_1
,
691 req
->priv_data
[0], req
->priv_data
[1],
692 iommu
->prq_seq_number
++);
695 head
= (head
+ sizeof(*req
)) & PRQ_RING_MASK
;
698 dmar_writeq(iommu
->reg
+ DMAR_PQH_REG
, tail
);
701 * Clear the page request overflow bit and wake up all threads that
702 * are waiting for the completion of this handling.
704 if (readl(iommu
->reg
+ DMAR_PRS_REG
) & DMA_PRS_PRO
) {
705 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
707 head
= dmar_readq(iommu
->reg
+ DMAR_PQH_REG
) & PRQ_RING_MASK
;
708 tail
= dmar_readq(iommu
->reg
+ DMAR_PQT_REG
) & PRQ_RING_MASK
;
710 iopf_queue_discard_partial(iommu
->iopf_queue
);
711 writel(DMA_PRS_PRO
, iommu
->reg
+ DMAR_PRS_REG
);
712 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
717 if (!completion_done(&iommu
->prq_complete
))
718 complete(&iommu
->prq_complete
);
720 return IRQ_RETVAL(handled
);
723 int intel_svm_page_response(struct device
*dev
,
724 struct iommu_fault_event
*evt
,
725 struct iommu_page_response
*msg
)
727 struct iommu_fault_page_request
*prm
;
728 struct intel_iommu
*iommu
;
729 bool private_present
;
736 if (!dev
|| !dev_is_pci(dev
))
739 iommu
= device_to_iommu(dev
, &bus
, &devfn
);
746 prm
= &evt
->fault
.prm
;
747 sid
= PCI_DEVID(bus
, devfn
);
748 pasid_present
= prm
->flags
& IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
;
749 private_present
= prm
->flags
& IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA
;
750 last_page
= prm
->flags
& IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
;
752 if (!pasid_present
) {
757 if (prm
->pasid
== 0 || prm
->pasid
>= PASID_MAX
) {
763 * Per VT-d spec. v3.0 ch7.7, system software must respond
764 * with page group response if private data is present (PDP)
765 * or last page in group (LPIG) bit is set. This is an
766 * additional VT-d requirement beyond PCI ATS spec.
768 if (last_page
|| private_present
) {
771 desc
.qw0
= QI_PGRP_PASID(prm
->pasid
) | QI_PGRP_DID(sid
) |
772 QI_PGRP_PASID_P(pasid_present
) |
773 QI_PGRP_PDP(private_present
) |
774 QI_PGRP_RESP_CODE(msg
->code
) |
776 desc
.qw1
= QI_PGRP_IDX(prm
->grpid
) | QI_PGRP_LPIG(last_page
);
780 if (private_present
) {
781 desc
.qw2
= prm
->private_data
[0];
782 desc
.qw3
= prm
->private_data
[1];
783 } else if (prm
->private_data
[0]) {
784 dmar_latency_update(iommu
, DMAR_LATENCY_PRQ
,
785 ktime_to_ns(ktime_get()) - prm
->private_data
[0]);
788 qi_submit_sync(iommu
, &desc
, 1, 0);
794 static int intel_svm_set_dev_pasid(struct iommu_domain
*domain
,
795 struct device
*dev
, ioasid_t pasid
)
797 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
798 struct intel_iommu
*iommu
= info
->iommu
;
799 struct mm_struct
*mm
= domain
->mm
;
801 return intel_svm_bind_mm(iommu
, dev
, mm
);
804 static void intel_svm_domain_free(struct iommu_domain
*domain
)
806 kfree(to_dmar_domain(domain
));
809 static const struct iommu_domain_ops intel_svm_domain_ops
= {
810 .set_dev_pasid
= intel_svm_set_dev_pasid
,
811 .free
= intel_svm_domain_free
814 struct iommu_domain
*intel_svm_domain_alloc(void)
816 struct dmar_domain
*domain
;
818 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
821 domain
->domain
.ops
= &intel_svm_domain_ops
;
823 return &domain
->domain
;