]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/pci/intel-iommu.c
IA64: fix swiotlb alloc_coherent for non DMA_64BIT_MASK devices
[people/arne_f/kernel.git] / drivers / pci / intel-iommu.c
CommitLineData
ba395927
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
5b6985ce 21 * Author: Fenghua Yu <fenghua.yu@intel.com>
ba395927
KA
22 */
23
24#include <linux/init.h>
25#include <linux/bitmap.h>
5e0d2a6f 26#include <linux/debugfs.h>
ba395927
KA
27#include <linux/slab.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
ba395927
KA
30#include <linux/spinlock.h>
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/dma-mapping.h>
34#include <linux/mempool.h>
5e0d2a6f 35#include <linux/timer.h>
38717946 36#include <linux/iova.h>
5d450806 37#include <linux/iommu.h>
38717946 38#include <linux/intel-iommu.h>
ba395927 39#include <asm/cacheflush.h>
46a7fa27 40#include <asm/iommu.h>
ba395927
KA
41#include "pci.h"
42
5b6985ce
FY
43#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
ba395927
KA
46#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
48
49#define IOAPIC_RANGE_START (0xfee00000)
50#define IOAPIC_RANGE_END (0xfeefffff)
51#define IOVA_START_ADDR (0x1000)
52
53#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
54
ba395927
KA
55#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
56
f27be03b
MM
57#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
58#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
59#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
5e0d2a6f 60
d9630fe9
WH
61/* global iommu list, set NULL for ignored DMAR units */
62static struct intel_iommu **g_iommus;
63
46b08e1a
MM
64/*
65 * 0: Present
66 * 1-11: Reserved
67 * 12-63: Context Ptr (12 - (haw-1))
68 * 64-127: Reserved
69 */
70struct root_entry {
71 u64 val;
72 u64 rsvd1;
73};
74#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
75static inline bool root_present(struct root_entry *root)
76{
77 return (root->val & 1);
78}
79static inline void set_root_present(struct root_entry *root)
80{
81 root->val |= 1;
82}
83static inline void set_root_value(struct root_entry *root, unsigned long value)
84{
85 root->val |= value & VTD_PAGE_MASK;
86}
87
88static inline struct context_entry *
89get_context_addr_from_root(struct root_entry *root)
90{
91 return (struct context_entry *)
92 (root_present(root)?phys_to_virt(
93 root->val & VTD_PAGE_MASK) :
94 NULL);
95}
96
7a8fc25e
MM
97/*
98 * low 64 bits:
99 * 0: present
100 * 1: fault processing disable
101 * 2-3: translation type
102 * 12-63: address space root
103 * high 64 bits:
104 * 0-2: address width
105 * 3-6: aval
106 * 8-23: domain id
107 */
108struct context_entry {
109 u64 lo;
110 u64 hi;
111};
c07e7d21
MM
112
113static inline bool context_present(struct context_entry *context)
114{
115 return (context->lo & 1);
116}
117static inline void context_set_present(struct context_entry *context)
118{
119 context->lo |= 1;
120}
121
122static inline void context_set_fault_enable(struct context_entry *context)
123{
124 context->lo &= (((u64)-1) << 2) | 1;
125}
126
7a8fc25e 127#define CONTEXT_TT_MULTI_LEVEL 0
c07e7d21
MM
128
129static inline void context_set_translation_type(struct context_entry *context,
130 unsigned long value)
131{
132 context->lo &= (((u64)-1) << 4) | 3;
133 context->lo |= (value & 3) << 2;
134}
135
136static inline void context_set_address_root(struct context_entry *context,
137 unsigned long value)
138{
139 context->lo |= value & VTD_PAGE_MASK;
140}
141
142static inline void context_set_address_width(struct context_entry *context,
143 unsigned long value)
144{
145 context->hi |= value & 7;
146}
147
148static inline void context_set_domain_id(struct context_entry *context,
149 unsigned long value)
150{
151 context->hi |= (value & ((1 << 16) - 1)) << 8;
152}
153
154static inline void context_clear_entry(struct context_entry *context)
155{
156 context->lo = 0;
157 context->hi = 0;
158}
7a8fc25e 159
622ba12a
MM
160/*
161 * 0: readable
162 * 1: writable
163 * 2-6: reserved
164 * 7: super page
165 * 8-11: available
166 * 12-63: Host physcial address
167 */
168struct dma_pte {
169 u64 val;
170};
622ba12a 171
19c239ce
MM
172static inline void dma_clear_pte(struct dma_pte *pte)
173{
174 pte->val = 0;
175}
176
177static inline void dma_set_pte_readable(struct dma_pte *pte)
178{
179 pte->val |= DMA_PTE_READ;
180}
181
182static inline void dma_set_pte_writable(struct dma_pte *pte)
183{
184 pte->val |= DMA_PTE_WRITE;
185}
186
187static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
188{
189 pte->val = (pte->val & ~3) | (prot & 3);
190}
191
192static inline u64 dma_pte_addr(struct dma_pte *pte)
193{
194 return (pte->val & VTD_PAGE_MASK);
195}
196
197static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
198{
199 pte->val |= (addr & VTD_PAGE_MASK);
200}
201
202static inline bool dma_pte_present(struct dma_pte *pte)
203{
204 return (pte->val & 3) != 0;
205}
622ba12a 206
3b5410e7 207/* devices under the same p2p bridge are owned in one domain */
cdc7b837 208#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
3b5410e7 209
1ce28feb
WH
210/* domain represents a virtual machine, more than one devices
211 * across iommus may be owned in one domain, e.g. kvm guest.
212 */
213#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
214
99126f7c
MM
215struct dmar_domain {
216 int id; /* domain id */
8c11e798 217 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
99126f7c
MM
218
219 struct list_head devices; /* all devices' list */
220 struct iova_domain iovad; /* iova's that belong to this domain */
221
222 struct dma_pte *pgd; /* virtual address */
223 spinlock_t mapping_lock; /* page table lock */
224 int gaw; /* max guest address width */
225
226 /* adjusted guest address width, 0 is level 2 30-bit */
227 int agaw;
228
3b5410e7 229 int flags; /* flags to find out type of domain */
8e604097
WH
230
231 int iommu_coherency;/* indicate coherency of iommu access */
c7151a8d
WH
232 int iommu_count; /* reference count of iommu */
233 spinlock_t iommu_lock; /* protect iommu set in domain */
fe40f1e0 234 u64 max_addr; /* maximum mapped address */
99126f7c
MM
235};
236
a647dacb
MM
237/* PCI domain-device relationship */
238struct device_domain_info {
239 struct list_head link; /* link to domain siblings */
240 struct list_head global; /* link to global list */
241 u8 bus; /* PCI bus numer */
242 u8 devfn; /* PCI devfn number */
243 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
244 struct dmar_domain *domain; /* pointer to domain */
245};
246
5e0d2a6f 247static void flush_unmaps_timeout(unsigned long data);
248
249DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
250
80b20dd8 251#define HIGH_WATER_MARK 250
252struct deferred_flush_tables {
253 int next;
254 struct iova *iova[HIGH_WATER_MARK];
255 struct dmar_domain *domain[HIGH_WATER_MARK];
256};
257
258static struct deferred_flush_tables *deferred_flush;
259
5e0d2a6f 260/* bitmap for indexing intel_iommus */
5e0d2a6f 261static int g_num_of_iommus;
262
263static DEFINE_SPINLOCK(async_umap_flush_lock);
264static LIST_HEAD(unmaps_to_do);
265
266static int timer_on;
267static long list_size;
5e0d2a6f 268
ba395927
KA
269static void domain_remove_dev_info(struct dmar_domain *domain);
270
2ae21010 271int dmar_disabled;
ba395927 272static int __initdata dmar_map_gfx = 1;
7d3b03ce 273static int dmar_forcedac;
5e0d2a6f 274static int intel_iommu_strict;
ba395927
KA
275
276#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
277static DEFINE_SPINLOCK(device_domain_lock);
278static LIST_HEAD(device_domain_list);
279
a8bcbb0d
JR
280static struct iommu_ops intel_iommu_ops;
281
ba395927
KA
282static int __init intel_iommu_setup(char *str)
283{
284 if (!str)
285 return -EINVAL;
286 while (*str) {
287 if (!strncmp(str, "off", 3)) {
288 dmar_disabled = 1;
289 printk(KERN_INFO"Intel-IOMMU: disabled\n");
290 } else if (!strncmp(str, "igfx_off", 8)) {
291 dmar_map_gfx = 0;
292 printk(KERN_INFO
293 "Intel-IOMMU: disable GFX device mapping\n");
7d3b03ce 294 } else if (!strncmp(str, "forcedac", 8)) {
5e0d2a6f 295 printk(KERN_INFO
7d3b03ce
KA
296 "Intel-IOMMU: Forcing DAC for PCI devices\n");
297 dmar_forcedac = 1;
5e0d2a6f 298 } else if (!strncmp(str, "strict", 6)) {
299 printk(KERN_INFO
300 "Intel-IOMMU: disable batched IOTLB flush\n");
301 intel_iommu_strict = 1;
ba395927
KA
302 }
303
304 str += strcspn(str, ",");
305 while (*str == ',')
306 str++;
307 }
308 return 0;
309}
310__setup("intel_iommu=", intel_iommu_setup);
311
312static struct kmem_cache *iommu_domain_cache;
313static struct kmem_cache *iommu_devinfo_cache;
314static struct kmem_cache *iommu_iova_cache;
315
eb3fa7cb
KA
316static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
317{
318 unsigned int flags;
319 void *vaddr;
320
321 /* trying to avoid low memory issues */
322 flags = current->flags & PF_MEMALLOC;
323 current->flags |= PF_MEMALLOC;
324 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
325 current->flags &= (~PF_MEMALLOC | flags);
326 return vaddr;
327}
328
329
ba395927
KA
330static inline void *alloc_pgtable_page(void)
331{
eb3fa7cb
KA
332 unsigned int flags;
333 void *vaddr;
334
335 /* trying to avoid low memory issues */
336 flags = current->flags & PF_MEMALLOC;
337 current->flags |= PF_MEMALLOC;
338 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
339 current->flags &= (~PF_MEMALLOC | flags);
340 return vaddr;
ba395927
KA
341}
342
343static inline void free_pgtable_page(void *vaddr)
344{
345 free_page((unsigned long)vaddr);
346}
347
348static inline void *alloc_domain_mem(void)
349{
eb3fa7cb 350 return iommu_kmem_cache_alloc(iommu_domain_cache);
ba395927
KA
351}
352
38717946 353static void free_domain_mem(void *vaddr)
ba395927
KA
354{
355 kmem_cache_free(iommu_domain_cache, vaddr);
356}
357
358static inline void * alloc_devinfo_mem(void)
359{
eb3fa7cb 360 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
ba395927
KA
361}
362
363static inline void free_devinfo_mem(void *vaddr)
364{
365 kmem_cache_free(iommu_devinfo_cache, vaddr);
366}
367
368struct iova *alloc_iova_mem(void)
369{
eb3fa7cb 370 return iommu_kmem_cache_alloc(iommu_iova_cache);
ba395927
KA
371}
372
373void free_iova_mem(struct iova *iova)
374{
375 kmem_cache_free(iommu_iova_cache, iova);
376}
377
1b573683
WH
378
379static inline int width_to_agaw(int width);
380
381/* calculate agaw for each iommu.
382 * "SAGAW" may be different across iommus, use a default agaw, and
383 * get a supported less agaw for iommus that don't support the default agaw.
384 */
385int iommu_calculate_agaw(struct intel_iommu *iommu)
386{
387 unsigned long sagaw;
388 int agaw = -1;
389
390 sagaw = cap_sagaw(iommu->cap);
391 for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
392 agaw >= 0; agaw--) {
393 if (test_bit(agaw, &sagaw))
394 break;
395 }
396
397 return agaw;
398}
399
8c11e798
WH
400/* in native case, each domain is related to only one iommu */
401static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
402{
403 int iommu_id;
404
1ce28feb
WH
405 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
406
8c11e798
WH
407 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
408 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
409 return NULL;
410
411 return g_iommus[iommu_id];
412}
413
8e604097
WH
414/* "Coherency" capability may be different across iommus */
415static void domain_update_iommu_coherency(struct dmar_domain *domain)
416{
417 int i;
418
419 domain->iommu_coherency = 1;
420
421 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
422 for (; i < g_num_of_iommus; ) {
423 if (!ecap_coherent(g_iommus[i]->ecap)) {
424 domain->iommu_coherency = 0;
425 break;
426 }
427 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
428 }
429}
430
c7151a8d
WH
431static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
432{
433 struct dmar_drhd_unit *drhd = NULL;
434 int i;
435
436 for_each_drhd_unit(drhd) {
437 if (drhd->ignored)
438 continue;
439
440 for (i = 0; i < drhd->devices_cnt; i++)
288e4877
DH
441 if (drhd->devices[i] &&
442 drhd->devices[i]->bus->number == bus &&
c7151a8d
WH
443 drhd->devices[i]->devfn == devfn)
444 return drhd->iommu;
445
446 if (drhd->include_all)
447 return drhd->iommu;
448 }
449
450 return NULL;
451}
452
5331fe6f
WH
453static void domain_flush_cache(struct dmar_domain *domain,
454 void *addr, int size)
455{
456 if (!domain->iommu_coherency)
457 clflush_cache_range(addr, size);
458}
459
ba395927
KA
460/* Gets context entry for a given bus and devfn */
461static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
462 u8 bus, u8 devfn)
463{
464 struct root_entry *root;
465 struct context_entry *context;
466 unsigned long phy_addr;
467 unsigned long flags;
468
469 spin_lock_irqsave(&iommu->lock, flags);
470 root = &iommu->root_entry[bus];
471 context = get_context_addr_from_root(root);
472 if (!context) {
473 context = (struct context_entry *)alloc_pgtable_page();
474 if (!context) {
475 spin_unlock_irqrestore(&iommu->lock, flags);
476 return NULL;
477 }
5b6985ce 478 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
ba395927
KA
479 phy_addr = virt_to_phys((void *)context);
480 set_root_value(root, phy_addr);
481 set_root_present(root);
482 __iommu_flush_cache(iommu, root, sizeof(*root));
483 }
484 spin_unlock_irqrestore(&iommu->lock, flags);
485 return &context[devfn];
486}
487
488static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
489{
490 struct root_entry *root;
491 struct context_entry *context;
492 int ret;
493 unsigned long flags;
494
495 spin_lock_irqsave(&iommu->lock, flags);
496 root = &iommu->root_entry[bus];
497 context = get_context_addr_from_root(root);
498 if (!context) {
499 ret = 0;
500 goto out;
501 }
c07e7d21 502 ret = context_present(&context[devfn]);
ba395927
KA
503out:
504 spin_unlock_irqrestore(&iommu->lock, flags);
505 return ret;
506}
507
508static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
509{
510 struct root_entry *root;
511 struct context_entry *context;
512 unsigned long flags;
513
514 spin_lock_irqsave(&iommu->lock, flags);
515 root = &iommu->root_entry[bus];
516 context = get_context_addr_from_root(root);
517 if (context) {
c07e7d21 518 context_clear_entry(&context[devfn]);
ba395927
KA
519 __iommu_flush_cache(iommu, &context[devfn], \
520 sizeof(*context));
521 }
522 spin_unlock_irqrestore(&iommu->lock, flags);
523}
524
525static void free_context_table(struct intel_iommu *iommu)
526{
527 struct root_entry *root;
528 int i;
529 unsigned long flags;
530 struct context_entry *context;
531
532 spin_lock_irqsave(&iommu->lock, flags);
533 if (!iommu->root_entry) {
534 goto out;
535 }
536 for (i = 0; i < ROOT_ENTRY_NR; i++) {
537 root = &iommu->root_entry[i];
538 context = get_context_addr_from_root(root);
539 if (context)
540 free_pgtable_page(context);
541 }
542 free_pgtable_page(iommu->root_entry);
543 iommu->root_entry = NULL;
544out:
545 spin_unlock_irqrestore(&iommu->lock, flags);
546}
547
548/* page table handling */
549#define LEVEL_STRIDE (9)
550#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
551
552static inline int agaw_to_level(int agaw)
553{
554 return agaw + 2;
555}
556
557static inline int agaw_to_width(int agaw)
558{
559 return 30 + agaw * LEVEL_STRIDE;
560
561}
562
563static inline int width_to_agaw(int width)
564{
565 return (width - 30) / LEVEL_STRIDE;
566}
567
568static inline unsigned int level_to_offset_bits(int level)
569{
570 return (12 + (level - 1) * LEVEL_STRIDE);
571}
572
573static inline int address_level_offset(u64 addr, int level)
574{
575 return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
576}
577
578static inline u64 level_mask(int level)
579{
580 return ((u64)-1 << level_to_offset_bits(level));
581}
582
583static inline u64 level_size(int level)
584{
585 return ((u64)1 << level_to_offset_bits(level));
586}
587
588static inline u64 align_to_level(u64 addr, int level)
589{
590 return ((addr + level_size(level) - 1) & level_mask(level));
591}
592
593static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
594{
595 int addr_width = agaw_to_width(domain->agaw);
596 struct dma_pte *parent, *pte = NULL;
597 int level = agaw_to_level(domain->agaw);
598 int offset;
599 unsigned long flags;
600
601 BUG_ON(!domain->pgd);
602
603 addr &= (((u64)1) << addr_width) - 1;
604 parent = domain->pgd;
605
606 spin_lock_irqsave(&domain->mapping_lock, flags);
607 while (level > 0) {
608 void *tmp_page;
609
610 offset = address_level_offset(addr, level);
611 pte = &parent[offset];
612 if (level == 1)
613 break;
614
19c239ce 615 if (!dma_pte_present(pte)) {
ba395927
KA
616 tmp_page = alloc_pgtable_page();
617
618 if (!tmp_page) {
619 spin_unlock_irqrestore(&domain->mapping_lock,
620 flags);
621 return NULL;
622 }
5331fe6f 623 domain_flush_cache(domain, tmp_page, PAGE_SIZE);
19c239ce 624 dma_set_pte_addr(pte, virt_to_phys(tmp_page));
ba395927
KA
625 /*
626 * high level table always sets r/w, last level page
627 * table control read/write
628 */
19c239ce
MM
629 dma_set_pte_readable(pte);
630 dma_set_pte_writable(pte);
5331fe6f 631 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927 632 }
19c239ce 633 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
634 level--;
635 }
636
637 spin_unlock_irqrestore(&domain->mapping_lock, flags);
638 return pte;
639}
640
641/* return address's pte at specific level */
642static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
643 int level)
644{
645 struct dma_pte *parent, *pte = NULL;
646 int total = agaw_to_level(domain->agaw);
647 int offset;
648
649 parent = domain->pgd;
650 while (level <= total) {
651 offset = address_level_offset(addr, total);
652 pte = &parent[offset];
653 if (level == total)
654 return pte;
655
19c239ce 656 if (!dma_pte_present(pte))
ba395927 657 break;
19c239ce 658 parent = phys_to_virt(dma_pte_addr(pte));
ba395927
KA
659 total--;
660 }
661 return NULL;
662}
663
664/* clear one page's page table */
665static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
666{
667 struct dma_pte *pte = NULL;
668
669 /* get last level pte */
670 pte = dma_addr_level_pte(domain, addr, 1);
671
672 if (pte) {
19c239ce 673 dma_clear_pte(pte);
5331fe6f 674 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927
KA
675 }
676}
677
678/* clear last level pte, a tlb flush should be followed */
679static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
680{
681 int addr_width = agaw_to_width(domain->agaw);
682
683 start &= (((u64)1) << addr_width) - 1;
684 end &= (((u64)1) << addr_width) - 1;
685 /* in case it's partial page */
5b6985ce
FY
686 start = PAGE_ALIGN(start);
687 end &= PAGE_MASK;
ba395927
KA
688
689 /* we don't need lock here, nobody else touches the iova range */
690 while (start < end) {
691 dma_pte_clear_one(domain, start);
5b6985ce 692 start += VTD_PAGE_SIZE;
ba395927
KA
693 }
694}
695
696/* free page table pages. last level pte should already be cleared */
697static void dma_pte_free_pagetable(struct dmar_domain *domain,
698 u64 start, u64 end)
699{
700 int addr_width = agaw_to_width(domain->agaw);
701 struct dma_pte *pte;
702 int total = agaw_to_level(domain->agaw);
703 int level;
704 u64 tmp;
705
706 start &= (((u64)1) << addr_width) - 1;
707 end &= (((u64)1) << addr_width) - 1;
708
709 /* we don't need lock here, nobody else touches the iova range */
710 level = 2;
711 while (level <= total) {
712 tmp = align_to_level(start, level);
713 if (tmp >= end || (tmp + level_size(level) > end))
714 return;
715
716 while (tmp < end) {
717 pte = dma_addr_level_pte(domain, tmp, level);
718 if (pte) {
719 free_pgtable_page(
19c239ce
MM
720 phys_to_virt(dma_pte_addr(pte)));
721 dma_clear_pte(pte);
5331fe6f 722 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927
KA
723 }
724 tmp += level_size(level);
725 }
726 level++;
727 }
728 /* free pgd */
729 if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
730 free_pgtable_page(domain->pgd);
731 domain->pgd = NULL;
732 }
733}
734
735/* iommu handling */
736static int iommu_alloc_root_entry(struct intel_iommu *iommu)
737{
738 struct root_entry *root;
739 unsigned long flags;
740
741 root = (struct root_entry *)alloc_pgtable_page();
742 if (!root)
743 return -ENOMEM;
744
5b6985ce 745 __iommu_flush_cache(iommu, root, ROOT_SIZE);
ba395927
KA
746
747 spin_lock_irqsave(&iommu->lock, flags);
748 iommu->root_entry = root;
749 spin_unlock_irqrestore(&iommu->lock, flags);
750
751 return 0;
752}
753
ba395927
KA
754static void iommu_set_root_entry(struct intel_iommu *iommu)
755{
756 void *addr;
757 u32 cmd, sts;
758 unsigned long flag;
759
760 addr = iommu->root_entry;
761
762 spin_lock_irqsave(&iommu->register_lock, flag);
763 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
764
765 cmd = iommu->gcmd | DMA_GCMD_SRTP;
766 writel(cmd, iommu->reg + DMAR_GCMD_REG);
767
768 /* Make sure hardware complete it */
769 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
770 readl, (sts & DMA_GSTS_RTPS), sts);
771
772 spin_unlock_irqrestore(&iommu->register_lock, flag);
773}
774
775static void iommu_flush_write_buffer(struct intel_iommu *iommu)
776{
777 u32 val;
778 unsigned long flag;
779
780 if (!cap_rwbf(iommu->cap))
781 return;
782 val = iommu->gcmd | DMA_GCMD_WBF;
783
784 spin_lock_irqsave(&iommu->register_lock, flag);
785 writel(val, iommu->reg + DMAR_GCMD_REG);
786
787 /* Make sure hardware complete it */
788 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
789 readl, (!(val & DMA_GSTS_WBFS)), val);
790
791 spin_unlock_irqrestore(&iommu->register_lock, flag);
792}
793
794/* return value determine if we need a write buffer flush */
795static int __iommu_flush_context(struct intel_iommu *iommu,
796 u16 did, u16 source_id, u8 function_mask, u64 type,
797 int non_present_entry_flush)
798{
799 u64 val = 0;
800 unsigned long flag;
801
802 /*
803 * In the non-present entry flush case, if hardware doesn't cache
804 * non-present entry we do nothing and if hardware cache non-present
805 * entry, we flush entries of domain 0 (the domain id is used to cache
806 * any non-present entries)
807 */
808 if (non_present_entry_flush) {
809 if (!cap_caching_mode(iommu->cap))
810 return 1;
811 else
812 did = 0;
813 }
814
815 switch (type) {
816 case DMA_CCMD_GLOBAL_INVL:
817 val = DMA_CCMD_GLOBAL_INVL;
818 break;
819 case DMA_CCMD_DOMAIN_INVL:
820 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
821 break;
822 case DMA_CCMD_DEVICE_INVL:
823 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
824 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
825 break;
826 default:
827 BUG();
828 }
829 val |= DMA_CCMD_ICC;
830
831 spin_lock_irqsave(&iommu->register_lock, flag);
832 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
833
834 /* Make sure hardware complete it */
835 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
836 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
837
838 spin_unlock_irqrestore(&iommu->register_lock, flag);
839
4d235ba6 840 /* flush context entry will implicitly flush write buffer */
ba395927
KA
841 return 0;
842}
843
ba395927
KA
844/* return value determine if we need a write buffer flush */
845static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
846 u64 addr, unsigned int size_order, u64 type,
847 int non_present_entry_flush)
848{
849 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
850 u64 val = 0, val_iva = 0;
851 unsigned long flag;
852
853 /*
854 * In the non-present entry flush case, if hardware doesn't cache
855 * non-present entry we do nothing and if hardware cache non-present
856 * entry, we flush entries of domain 0 (the domain id is used to cache
857 * any non-present entries)
858 */
859 if (non_present_entry_flush) {
860 if (!cap_caching_mode(iommu->cap))
861 return 1;
862 else
863 did = 0;
864 }
865
866 switch (type) {
867 case DMA_TLB_GLOBAL_FLUSH:
868 /* global flush doesn't need set IVA_REG */
869 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
870 break;
871 case DMA_TLB_DSI_FLUSH:
872 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
873 break;
874 case DMA_TLB_PSI_FLUSH:
875 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
876 /* Note: always flush non-leaf currently */
877 val_iva = size_order | addr;
878 break;
879 default:
880 BUG();
881 }
882 /* Note: set drain read/write */
883#if 0
884 /*
885 * This is probably to be super secure.. Looks like we can
886 * ignore it without any impact.
887 */
888 if (cap_read_drain(iommu->cap))
889 val |= DMA_TLB_READ_DRAIN;
890#endif
891 if (cap_write_drain(iommu->cap))
892 val |= DMA_TLB_WRITE_DRAIN;
893
894 spin_lock_irqsave(&iommu->register_lock, flag);
895 /* Note: Only uses first TLB reg currently */
896 if (val_iva)
897 dmar_writeq(iommu->reg + tlb_offset, val_iva);
898 dmar_writeq(iommu->reg + tlb_offset + 8, val);
899
900 /* Make sure hardware complete it */
901 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
902 dmar_readq, (!(val & DMA_TLB_IVT)), val);
903
904 spin_unlock_irqrestore(&iommu->register_lock, flag);
905
906 /* check IOTLB invalidation granularity */
907 if (DMA_TLB_IAIG(val) == 0)
908 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
909 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
910 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
5b6985ce
FY
911 (unsigned long long)DMA_TLB_IIRG(type),
912 (unsigned long long)DMA_TLB_IAIG(val));
4d235ba6 913 /* flush iotlb entry will implicitly flush write buffer */
ba395927
KA
914 return 0;
915}
916
ba395927
KA
917static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
918 u64 addr, unsigned int pages, int non_present_entry_flush)
919{
f76aec76 920 unsigned int mask;
ba395927 921
5b6985ce 922 BUG_ON(addr & (~VTD_PAGE_MASK));
ba395927
KA
923 BUG_ON(pages == 0);
924
925 /* Fallback to domain selective flush if no PSI support */
926 if (!cap_pgsel_inv(iommu->cap))
a77b67d4
YS
927 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
928 DMA_TLB_DSI_FLUSH,
929 non_present_entry_flush);
ba395927
KA
930
931 /*
932 * PSI requires page size to be 2 ^ x, and the base address is naturally
933 * aligned to the size
934 */
f76aec76 935 mask = ilog2(__roundup_pow_of_two(pages));
ba395927 936 /* Fallback to domain selective flush if size is too big */
f76aec76 937 if (mask > cap_max_amask_val(iommu->cap))
a77b67d4
YS
938 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
939 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
ba395927 940
a77b67d4
YS
941 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
942 DMA_TLB_PSI_FLUSH,
943 non_present_entry_flush);
ba395927
KA
944}
945
f8bab735 946static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
947{
948 u32 pmen;
949 unsigned long flags;
950
951 spin_lock_irqsave(&iommu->register_lock, flags);
952 pmen = readl(iommu->reg + DMAR_PMEN_REG);
953 pmen &= ~DMA_PMEN_EPM;
954 writel(pmen, iommu->reg + DMAR_PMEN_REG);
955
956 /* wait for the protected region status bit to clear */
957 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
958 readl, !(pmen & DMA_PMEN_PRS), pmen);
959
960 spin_unlock_irqrestore(&iommu->register_lock, flags);
961}
962
ba395927
KA
963static int iommu_enable_translation(struct intel_iommu *iommu)
964{
965 u32 sts;
966 unsigned long flags;
967
968 spin_lock_irqsave(&iommu->register_lock, flags);
969 writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
970
971 /* Make sure hardware complete it */
972 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
973 readl, (sts & DMA_GSTS_TES), sts);
974
975 iommu->gcmd |= DMA_GCMD_TE;
976 spin_unlock_irqrestore(&iommu->register_lock, flags);
977 return 0;
978}
979
980static int iommu_disable_translation(struct intel_iommu *iommu)
981{
982 u32 sts;
983 unsigned long flag;
984
985 spin_lock_irqsave(&iommu->register_lock, flag);
986 iommu->gcmd &= ~DMA_GCMD_TE;
987 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
988
989 /* Make sure hardware complete it */
990 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
991 readl, (!(sts & DMA_GSTS_TES)), sts);
992
993 spin_unlock_irqrestore(&iommu->register_lock, flag);
994 return 0;
995}
996
3460a6d9
KA
997/* iommu interrupt handling. Most stuff are MSI-like. */
998
d94afc6c 999static const char *fault_reason_strings[] =
3460a6d9
KA
1000{
1001 "Software",
1002 "Present bit in root entry is clear",
1003 "Present bit in context entry is clear",
1004 "Invalid context entry",
1005 "Access beyond MGAW",
1006 "PTE Write access is not set",
1007 "PTE Read access is not set",
1008 "Next page table ptr is invalid",
1009 "Root table address invalid",
1010 "Context table ptr is invalid",
1011 "non-zero reserved fields in RTP",
1012 "non-zero reserved fields in CTP",
1013 "non-zero reserved fields in PTE",
3460a6d9 1014};
f8bab735 1015#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
3460a6d9 1016
d94afc6c 1017const char *dmar_get_fault_reason(u8 fault_reason)
3460a6d9 1018{
d94afc6c 1019 if (fault_reason > MAX_FAULT_REASON_IDX)
1020 return "Unknown";
3460a6d9
KA
1021 else
1022 return fault_reason_strings[fault_reason];
1023}
1024
1025void dmar_msi_unmask(unsigned int irq)
1026{
1027 struct intel_iommu *iommu = get_irq_data(irq);
1028 unsigned long flag;
1029
1030 /* unmask it */
1031 spin_lock_irqsave(&iommu->register_lock, flag);
1032 writel(0, iommu->reg + DMAR_FECTL_REG);
1033 /* Read a reg to force flush the post write */
1034 readl(iommu->reg + DMAR_FECTL_REG);
1035 spin_unlock_irqrestore(&iommu->register_lock, flag);
1036}
1037
1038void dmar_msi_mask(unsigned int irq)
1039{
1040 unsigned long flag;
1041 struct intel_iommu *iommu = get_irq_data(irq);
1042
1043 /* mask it */
1044 spin_lock_irqsave(&iommu->register_lock, flag);
1045 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1046 /* Read a reg to force flush the post write */
1047 readl(iommu->reg + DMAR_FECTL_REG);
1048 spin_unlock_irqrestore(&iommu->register_lock, flag);
1049}
1050
1051void dmar_msi_write(int irq, struct msi_msg *msg)
1052{
1053 struct intel_iommu *iommu = get_irq_data(irq);
1054 unsigned long flag;
1055
1056 spin_lock_irqsave(&iommu->register_lock, flag);
1057 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1058 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1059 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1060 spin_unlock_irqrestore(&iommu->register_lock, flag);
1061}
1062
1063void dmar_msi_read(int irq, struct msi_msg *msg)
1064{
1065 struct intel_iommu *iommu = get_irq_data(irq);
1066 unsigned long flag;
1067
1068 spin_lock_irqsave(&iommu->register_lock, flag);
1069 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1070 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1071 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1072 spin_unlock_irqrestore(&iommu->register_lock, flag);
1073}
1074
1075static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
5b6985ce 1076 u8 fault_reason, u16 source_id, unsigned long long addr)
3460a6d9 1077{
d94afc6c 1078 const char *reason;
3460a6d9
KA
1079
1080 reason = dmar_get_fault_reason(fault_reason);
1081
1082 printk(KERN_ERR
1083 "DMAR:[%s] Request device [%02x:%02x.%d] "
1084 "fault addr %llx \n"
1085 "DMAR:[fault reason %02d] %s\n",
1086 (type ? "DMA Read" : "DMA Write"),
1087 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1088 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1089 return 0;
1090}
1091
1092#define PRIMARY_FAULT_REG_LEN (16)
1093static irqreturn_t iommu_page_fault(int irq, void *dev_id)
1094{
1095 struct intel_iommu *iommu = dev_id;
1096 int reg, fault_index;
1097 u32 fault_status;
1098 unsigned long flag;
1099
1100 spin_lock_irqsave(&iommu->register_lock, flag);
1101 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1102
1103 /* TBD: ignore advanced fault log currently */
1104 if (!(fault_status & DMA_FSTS_PPF))
1105 goto clear_overflow;
1106
1107 fault_index = dma_fsts_fault_record_index(fault_status);
1108 reg = cap_fault_reg_offset(iommu->cap);
1109 while (1) {
1110 u8 fault_reason;
1111 u16 source_id;
1112 u64 guest_addr;
1113 int type;
1114 u32 data;
1115
1116 /* highest 32 bits */
1117 data = readl(iommu->reg + reg +
1118 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1119 if (!(data & DMA_FRCD_F))
1120 break;
1121
1122 fault_reason = dma_frcd_fault_reason(data);
1123 type = dma_frcd_type(data);
1124
1125 data = readl(iommu->reg + reg +
1126 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1127 source_id = dma_frcd_source_id(data);
1128
1129 guest_addr = dmar_readq(iommu->reg + reg +
1130 fault_index * PRIMARY_FAULT_REG_LEN);
1131 guest_addr = dma_frcd_page_addr(guest_addr);
1132 /* clear the fault */
1133 writel(DMA_FRCD_F, iommu->reg + reg +
1134 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1135
1136 spin_unlock_irqrestore(&iommu->register_lock, flag);
1137
1138 iommu_page_fault_do_one(iommu, type, fault_reason,
1139 source_id, guest_addr);
1140
1141 fault_index++;
1142 if (fault_index > cap_num_fault_regs(iommu->cap))
1143 fault_index = 0;
1144 spin_lock_irqsave(&iommu->register_lock, flag);
1145 }
1146clear_overflow:
1147 /* clear primary fault overflow */
1148 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1149 if (fault_status & DMA_FSTS_PFO)
1150 writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
1151
1152 spin_unlock_irqrestore(&iommu->register_lock, flag);
1153 return IRQ_HANDLED;
1154}
1155
1156int dmar_set_interrupt(struct intel_iommu *iommu)
1157{
1158 int irq, ret;
1159
1160 irq = create_irq();
1161 if (!irq) {
1162 printk(KERN_ERR "IOMMU: no free vectors\n");
1163 return -EINVAL;
1164 }
1165
1166 set_irq_data(irq, iommu);
1167 iommu->irq = irq;
1168
1169 ret = arch_setup_dmar_msi(irq);
1170 if (ret) {
1171 set_irq_data(irq, NULL);
1172 iommu->irq = 0;
1173 destroy_irq(irq);
1174 return 0;
1175 }
1176
1177 /* Force fault register is cleared */
1178 iommu_page_fault(irq, iommu);
1179
1180 ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
1181 if (ret)
1182 printk(KERN_ERR "IOMMU: can't request irq\n");
1183 return ret;
1184}
1185
ba395927
KA
1186static int iommu_init_domains(struct intel_iommu *iommu)
1187{
1188 unsigned long ndomains;
1189 unsigned long nlongs;
1190
1191 ndomains = cap_ndoms(iommu->cap);
1192 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1193 nlongs = BITS_TO_LONGS(ndomains);
1194
1195 /* TBD: there might be 64K domains,
1196 * consider other allocation for future chip
1197 */
1198 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1199 if (!iommu->domain_ids) {
1200 printk(KERN_ERR "Allocating domain id array failed\n");
1201 return -ENOMEM;
1202 }
1203 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1204 GFP_KERNEL);
1205 if (!iommu->domains) {
1206 printk(KERN_ERR "Allocating domain array failed\n");
1207 kfree(iommu->domain_ids);
1208 return -ENOMEM;
1209 }
1210
e61d98d8
SS
1211 spin_lock_init(&iommu->lock);
1212
ba395927
KA
1213 /*
1214 * if Caching mode is set, then invalid translations are tagged
1215 * with domainid 0. Hence we need to pre-allocate it.
1216 */
1217 if (cap_caching_mode(iommu->cap))
1218 set_bit(0, iommu->domain_ids);
1219 return 0;
1220}
ba395927 1221
ba395927
KA
1222
1223static void domain_exit(struct dmar_domain *domain);
5e98c4b1 1224static void vm_domain_exit(struct dmar_domain *domain);
e61d98d8
SS
1225
1226void free_dmar_iommu(struct intel_iommu *iommu)
ba395927
KA
1227{
1228 struct dmar_domain *domain;
1229 int i;
c7151a8d 1230 unsigned long flags;
ba395927 1231
ba395927
KA
1232 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1233 for (; i < cap_ndoms(iommu->cap); ) {
1234 domain = iommu->domains[i];
1235 clear_bit(i, iommu->domain_ids);
c7151a8d
WH
1236
1237 spin_lock_irqsave(&domain->iommu_lock, flags);
5e98c4b1
WH
1238 if (--domain->iommu_count == 0) {
1239 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1240 vm_domain_exit(domain);
1241 else
1242 domain_exit(domain);
1243 }
c7151a8d
WH
1244 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1245
ba395927
KA
1246 i = find_next_bit(iommu->domain_ids,
1247 cap_ndoms(iommu->cap), i+1);
1248 }
1249
1250 if (iommu->gcmd & DMA_GCMD_TE)
1251 iommu_disable_translation(iommu);
1252
1253 if (iommu->irq) {
1254 set_irq_data(iommu->irq, NULL);
1255 /* This will mask the irq */
1256 free_irq(iommu->irq, iommu);
1257 destroy_irq(iommu->irq);
1258 }
1259
1260 kfree(iommu->domains);
1261 kfree(iommu->domain_ids);
1262
d9630fe9
WH
1263 g_iommus[iommu->seq_id] = NULL;
1264
1265 /* if all iommus are freed, free g_iommus */
1266 for (i = 0; i < g_num_of_iommus; i++) {
1267 if (g_iommus[i])
1268 break;
1269 }
1270
1271 if (i == g_num_of_iommus)
1272 kfree(g_iommus);
1273
ba395927
KA
1274 /* free context mapping */
1275 free_context_table(iommu);
ba395927
KA
1276}
1277
1278static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
1279{
1280 unsigned long num;
1281 unsigned long ndomains;
1282 struct dmar_domain *domain;
1283 unsigned long flags;
1284
1285 domain = alloc_domain_mem();
1286 if (!domain)
1287 return NULL;
1288
1289 ndomains = cap_ndoms(iommu->cap);
1290
1291 spin_lock_irqsave(&iommu->lock, flags);
1292 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1293 if (num >= ndomains) {
1294 spin_unlock_irqrestore(&iommu->lock, flags);
1295 free_domain_mem(domain);
1296 printk(KERN_ERR "IOMMU: no free domain ids\n");
1297 return NULL;
1298 }
1299
1300 set_bit(num, iommu->domain_ids);
1301 domain->id = num;
8c11e798
WH
1302 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1303 set_bit(iommu->seq_id, &domain->iommu_bmp);
d71a2f33 1304 domain->flags = 0;
ba395927
KA
1305 iommu->domains[num] = domain;
1306 spin_unlock_irqrestore(&iommu->lock, flags);
1307
1308 return domain;
1309}
1310
1311static void iommu_free_domain(struct dmar_domain *domain)
1312{
1313 unsigned long flags;
8c11e798
WH
1314 struct intel_iommu *iommu;
1315
1316 iommu = domain_get_iommu(domain);
ba395927 1317
8c11e798
WH
1318 spin_lock_irqsave(&iommu->lock, flags);
1319 clear_bit(domain->id, iommu->domain_ids);
1320 spin_unlock_irqrestore(&iommu->lock, flags);
ba395927
KA
1321}
1322
1323static struct iova_domain reserved_iova_list;
8a443df4
MG
1324static struct lock_class_key reserved_alloc_key;
1325static struct lock_class_key reserved_rbtree_key;
ba395927
KA
1326
1327static void dmar_init_reserved_ranges(void)
1328{
1329 struct pci_dev *pdev = NULL;
1330 struct iova *iova;
1331 int i;
1332 u64 addr, size;
1333
f661197e 1334 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
ba395927 1335
8a443df4
MG
1336 lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
1337 &reserved_alloc_key);
1338 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1339 &reserved_rbtree_key);
1340
ba395927
KA
1341 /* IOAPIC ranges shouldn't be accessed by DMA */
1342 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1343 IOVA_PFN(IOAPIC_RANGE_END));
1344 if (!iova)
1345 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1346
1347 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1348 for_each_pci_dev(pdev) {
1349 struct resource *r;
1350
1351 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1352 r = &pdev->resource[i];
1353 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1354 continue;
1355 addr = r->start;
5b6985ce 1356 addr &= PAGE_MASK;
ba395927 1357 size = r->end - addr;
5b6985ce 1358 size = PAGE_ALIGN(size);
ba395927
KA
1359 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1360 IOVA_PFN(size + addr) - 1);
1361 if (!iova)
1362 printk(KERN_ERR "Reserve iova failed\n");
1363 }
1364 }
1365
1366}
1367
1368static void domain_reserve_special_ranges(struct dmar_domain *domain)
1369{
1370 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1371}
1372
1373static inline int guestwidth_to_adjustwidth(int gaw)
1374{
1375 int agaw;
1376 int r = (gaw - 12) % 9;
1377
1378 if (r == 0)
1379 agaw = gaw;
1380 else
1381 agaw = gaw + 9 - r;
1382 if (agaw > 64)
1383 agaw = 64;
1384 return agaw;
1385}
1386
1387static int domain_init(struct dmar_domain *domain, int guest_width)
1388{
1389 struct intel_iommu *iommu;
1390 int adjust_width, agaw;
1391 unsigned long sagaw;
1392
f661197e 1393 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
ba395927 1394 spin_lock_init(&domain->mapping_lock);
c7151a8d 1395 spin_lock_init(&domain->iommu_lock);
ba395927
KA
1396
1397 domain_reserve_special_ranges(domain);
1398
1399 /* calculate AGAW */
8c11e798 1400 iommu = domain_get_iommu(domain);
ba395927
KA
1401 if (guest_width > cap_mgaw(iommu->cap))
1402 guest_width = cap_mgaw(iommu->cap);
1403 domain->gaw = guest_width;
1404 adjust_width = guestwidth_to_adjustwidth(guest_width);
1405 agaw = width_to_agaw(adjust_width);
1406 sagaw = cap_sagaw(iommu->cap);
1407 if (!test_bit(agaw, &sagaw)) {
1408 /* hardware doesn't support it, choose a bigger one */
1409 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1410 agaw = find_next_bit(&sagaw, 5, agaw);
1411 if (agaw >= 5)
1412 return -ENODEV;
1413 }
1414 domain->agaw = agaw;
1415 INIT_LIST_HEAD(&domain->devices);
1416
8e604097
WH
1417 if (ecap_coherent(iommu->ecap))
1418 domain->iommu_coherency = 1;
1419 else
1420 domain->iommu_coherency = 0;
1421
c7151a8d
WH
1422 domain->iommu_count = 1;
1423
ba395927
KA
1424 /* always allocate the top pgd */
1425 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1426 if (!domain->pgd)
1427 return -ENOMEM;
5b6985ce 1428 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
ba395927
KA
1429 return 0;
1430}
1431
1432static void domain_exit(struct dmar_domain *domain)
1433{
1434 u64 end;
1435
1436 /* Domain 0 is reserved, so dont process it */
1437 if (!domain)
1438 return;
1439
1440 domain_remove_dev_info(domain);
1441 /* destroy iovas */
1442 put_iova_domain(&domain->iovad);
1443 end = DOMAIN_MAX_ADDR(domain->gaw);
5b6985ce 1444 end = end & (~PAGE_MASK);
ba395927
KA
1445
1446 /* clear ptes */
1447 dma_pte_clear_range(domain, 0, end);
1448
1449 /* free page tables */
1450 dma_pte_free_pagetable(domain, 0, end);
1451
1452 iommu_free_domain(domain);
1453 free_domain_mem(domain);
1454}
1455
1456static int domain_context_mapping_one(struct dmar_domain *domain,
1457 u8 bus, u8 devfn)
1458{
1459 struct context_entry *context;
ba395927 1460 unsigned long flags;
5331fe6f 1461 struct intel_iommu *iommu;
ea6606b0
WH
1462 struct dma_pte *pgd;
1463 unsigned long num;
1464 unsigned long ndomains;
1465 int id;
1466 int agaw;
ba395927
KA
1467
1468 pr_debug("Set context mapping for %02x:%02x.%d\n",
1469 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1470 BUG_ON(!domain->pgd);
5331fe6f
WH
1471
1472 iommu = device_to_iommu(bus, devfn);
1473 if (!iommu)
1474 return -ENODEV;
1475
ba395927
KA
1476 context = device_to_context_entry(iommu, bus, devfn);
1477 if (!context)
1478 return -ENOMEM;
1479 spin_lock_irqsave(&iommu->lock, flags);
c07e7d21 1480 if (context_present(context)) {
ba395927
KA
1481 spin_unlock_irqrestore(&iommu->lock, flags);
1482 return 0;
1483 }
1484
ea6606b0
WH
1485 id = domain->id;
1486 pgd = domain->pgd;
1487
1488 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
1489 int found = 0;
1490
1491 /* find an available domain id for this device in iommu */
1492 ndomains = cap_ndoms(iommu->cap);
1493 num = find_first_bit(iommu->domain_ids, ndomains);
1494 for (; num < ndomains; ) {
1495 if (iommu->domains[num] == domain) {
1496 id = num;
1497 found = 1;
1498 break;
1499 }
1500 num = find_next_bit(iommu->domain_ids,
1501 cap_ndoms(iommu->cap), num+1);
1502 }
1503
1504 if (found == 0) {
1505 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1506 if (num >= ndomains) {
1507 spin_unlock_irqrestore(&iommu->lock, flags);
1508 printk(KERN_ERR "IOMMU: no free domain ids\n");
1509 return -EFAULT;
1510 }
1511
1512 set_bit(num, iommu->domain_ids);
1513 iommu->domains[num] = domain;
1514 id = num;
1515 }
1516
1517 /* Skip top levels of page tables for
1518 * iommu which has less agaw than default.
1519 */
1520 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1521 pgd = phys_to_virt(dma_pte_addr(pgd));
1522 if (!dma_pte_present(pgd)) {
1523 spin_unlock_irqrestore(&iommu->lock, flags);
1524 return -ENOMEM;
1525 }
1526 }
1527 }
1528
1529 context_set_domain_id(context, id);
1530 context_set_address_width(context, iommu->agaw);
1531 context_set_address_root(context, virt_to_phys(pgd));
c07e7d21
MM
1532 context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
1533 context_set_fault_enable(context);
1534 context_set_present(context);
5331fe6f 1535 domain_flush_cache(domain, context, sizeof(*context));
ba395927
KA
1536
1537 /* it's a non-present to present mapping */
a77b67d4
YS
1538 if (iommu->flush.flush_context(iommu, domain->id,
1539 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1540 DMA_CCMD_DEVICE_INVL, 1))
ba395927
KA
1541 iommu_flush_write_buffer(iommu);
1542 else
a77b67d4
YS
1543 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1544
ba395927 1545 spin_unlock_irqrestore(&iommu->lock, flags);
c7151a8d
WH
1546
1547 spin_lock_irqsave(&domain->iommu_lock, flags);
1548 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1549 domain->iommu_count++;
1550 domain_update_iommu_coherency(domain);
1551 }
1552 spin_unlock_irqrestore(&domain->iommu_lock, flags);
ba395927
KA
1553 return 0;
1554}
1555
1556static int
1557domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
1558{
1559 int ret;
1560 struct pci_dev *tmp, *parent;
1561
1562 ret = domain_context_mapping_one(domain, pdev->bus->number,
1563 pdev->devfn);
1564 if (ret)
1565 return ret;
1566
1567 /* dependent device mapping */
1568 tmp = pci_find_upstream_pcie_bridge(pdev);
1569 if (!tmp)
1570 return 0;
1571 /* Secondary interface's bus number and devfn 0 */
1572 parent = pdev->bus->self;
1573 while (parent != tmp) {
1574 ret = domain_context_mapping_one(domain, parent->bus->number,
1575 parent->devfn);
1576 if (ret)
1577 return ret;
1578 parent = parent->bus->self;
1579 }
1580 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1581 return domain_context_mapping_one(domain,
1582 tmp->subordinate->number, 0);
1583 else /* this is a legacy PCI bridge */
1584 return domain_context_mapping_one(domain,
1585 tmp->bus->number, tmp->devfn);
1586}
1587
5331fe6f 1588static int domain_context_mapped(struct pci_dev *pdev)
ba395927
KA
1589{
1590 int ret;
1591 struct pci_dev *tmp, *parent;
5331fe6f
WH
1592 struct intel_iommu *iommu;
1593
1594 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
1595 if (!iommu)
1596 return -ENODEV;
ba395927 1597
8c11e798 1598 ret = device_context_mapped(iommu,
ba395927
KA
1599 pdev->bus->number, pdev->devfn);
1600 if (!ret)
1601 return ret;
1602 /* dependent device mapping */
1603 tmp = pci_find_upstream_pcie_bridge(pdev);
1604 if (!tmp)
1605 return ret;
1606 /* Secondary interface's bus number and devfn 0 */
1607 parent = pdev->bus->self;
1608 while (parent != tmp) {
8c11e798 1609 ret = device_context_mapped(iommu, parent->bus->number,
ba395927
KA
1610 parent->devfn);
1611 if (!ret)
1612 return ret;
1613 parent = parent->bus->self;
1614 }
1615 if (tmp->is_pcie)
8c11e798 1616 return device_context_mapped(iommu,
ba395927
KA
1617 tmp->subordinate->number, 0);
1618 else
8c11e798 1619 return device_context_mapped(iommu,
ba395927
KA
1620 tmp->bus->number, tmp->devfn);
1621}
1622
1623static int
1624domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1625 u64 hpa, size_t size, int prot)
1626{
1627 u64 start_pfn, end_pfn;
1628 struct dma_pte *pte;
1629 int index;
5b6985ce
FY
1630 int addr_width = agaw_to_width(domain->agaw);
1631
1632 hpa &= (((u64)1) << addr_width) - 1;
ba395927
KA
1633
1634 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1635 return -EINVAL;
5b6985ce
FY
1636 iova &= PAGE_MASK;
1637 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1638 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
ba395927
KA
1639 index = 0;
1640 while (start_pfn < end_pfn) {
5b6985ce 1641 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
ba395927
KA
1642 if (!pte)
1643 return -ENOMEM;
1644 /* We don't need lock here, nobody else
1645 * touches the iova range
1646 */
19c239ce
MM
1647 BUG_ON(dma_pte_addr(pte));
1648 dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
1649 dma_set_pte_prot(pte, prot);
5331fe6f 1650 domain_flush_cache(domain, pte, sizeof(*pte));
ba395927
KA
1651 start_pfn++;
1652 index++;
1653 }
1654 return 0;
1655}
1656
c7151a8d 1657static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
ba395927 1658{
c7151a8d
WH
1659 if (!iommu)
1660 return;
8c11e798
WH
1661
1662 clear_context_table(iommu, bus, devfn);
1663 iommu->flush.flush_context(iommu, 0, 0, 0,
a77b67d4 1664 DMA_CCMD_GLOBAL_INVL, 0);
8c11e798 1665 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
a77b67d4 1666 DMA_TLB_GLOBAL_FLUSH, 0);
ba395927
KA
1667}
1668
1669static void domain_remove_dev_info(struct dmar_domain *domain)
1670{
1671 struct device_domain_info *info;
1672 unsigned long flags;
c7151a8d 1673 struct intel_iommu *iommu;
ba395927
KA
1674
1675 spin_lock_irqsave(&device_domain_lock, flags);
1676 while (!list_empty(&domain->devices)) {
1677 info = list_entry(domain->devices.next,
1678 struct device_domain_info, link);
1679 list_del(&info->link);
1680 list_del(&info->global);
1681 if (info->dev)
358dd8ac 1682 info->dev->dev.archdata.iommu = NULL;
ba395927
KA
1683 spin_unlock_irqrestore(&device_domain_lock, flags);
1684
c7151a8d
WH
1685 iommu = device_to_iommu(info->bus, info->devfn);
1686 iommu_detach_dev(iommu, info->bus, info->devfn);
ba395927
KA
1687 free_devinfo_mem(info);
1688
1689 spin_lock_irqsave(&device_domain_lock, flags);
1690 }
1691 spin_unlock_irqrestore(&device_domain_lock, flags);
1692}
1693
1694/*
1695 * find_domain
358dd8ac 1696 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
ba395927 1697 */
38717946 1698static struct dmar_domain *
ba395927
KA
1699find_domain(struct pci_dev *pdev)
1700{
1701 struct device_domain_info *info;
1702
1703 /* No lock here, assumes no domain exit in normal case */
358dd8ac 1704 info = pdev->dev.archdata.iommu;
ba395927
KA
1705 if (info)
1706 return info->domain;
1707 return NULL;
1708}
1709
ba395927
KA
1710/* domain is initialized */
1711static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1712{
1713 struct dmar_domain *domain, *found = NULL;
1714 struct intel_iommu *iommu;
1715 struct dmar_drhd_unit *drhd;
1716 struct device_domain_info *info, *tmp;
1717 struct pci_dev *dev_tmp;
1718 unsigned long flags;
1719 int bus = 0, devfn = 0;
1720
1721 domain = find_domain(pdev);
1722 if (domain)
1723 return domain;
1724
1725 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1726 if (dev_tmp) {
1727 if (dev_tmp->is_pcie) {
1728 bus = dev_tmp->subordinate->number;
1729 devfn = 0;
1730 } else {
1731 bus = dev_tmp->bus->number;
1732 devfn = dev_tmp->devfn;
1733 }
1734 spin_lock_irqsave(&device_domain_lock, flags);
1735 list_for_each_entry(info, &device_domain_list, global) {
1736 if (info->bus == bus && info->devfn == devfn) {
1737 found = info->domain;
1738 break;
1739 }
1740 }
1741 spin_unlock_irqrestore(&device_domain_lock, flags);
1742 /* pcie-pci bridge already has a domain, uses it */
1743 if (found) {
1744 domain = found;
1745 goto found_domain;
1746 }
1747 }
1748
1749 /* Allocate new domain for the device */
1750 drhd = dmar_find_matched_drhd_unit(pdev);
1751 if (!drhd) {
1752 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1753 pci_name(pdev));
1754 return NULL;
1755 }
1756 iommu = drhd->iommu;
1757
1758 domain = iommu_alloc_domain(iommu);
1759 if (!domain)
1760 goto error;
1761
1762 if (domain_init(domain, gaw)) {
1763 domain_exit(domain);
1764 goto error;
1765 }
1766
1767 /* register pcie-to-pci device */
1768 if (dev_tmp) {
1769 info = alloc_devinfo_mem();
1770 if (!info) {
1771 domain_exit(domain);
1772 goto error;
1773 }
1774 info->bus = bus;
1775 info->devfn = devfn;
1776 info->dev = NULL;
1777 info->domain = domain;
1778 /* This domain is shared by devices under p2p bridge */
3b5410e7 1779 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
ba395927
KA
1780
1781 /* pcie-to-pci bridge already has a domain, uses it */
1782 found = NULL;
1783 spin_lock_irqsave(&device_domain_lock, flags);
1784 list_for_each_entry(tmp, &device_domain_list, global) {
1785 if (tmp->bus == bus && tmp->devfn == devfn) {
1786 found = tmp->domain;
1787 break;
1788 }
1789 }
1790 if (found) {
1791 free_devinfo_mem(info);
1792 domain_exit(domain);
1793 domain = found;
1794 } else {
1795 list_add(&info->link, &domain->devices);
1796 list_add(&info->global, &device_domain_list);
1797 }
1798 spin_unlock_irqrestore(&device_domain_lock, flags);
1799 }
1800
1801found_domain:
1802 info = alloc_devinfo_mem();
1803 if (!info)
1804 goto error;
1805 info->bus = pdev->bus->number;
1806 info->devfn = pdev->devfn;
1807 info->dev = pdev;
1808 info->domain = domain;
1809 spin_lock_irqsave(&device_domain_lock, flags);
1810 /* somebody is fast */
1811 found = find_domain(pdev);
1812 if (found != NULL) {
1813 spin_unlock_irqrestore(&device_domain_lock, flags);
1814 if (found != domain) {
1815 domain_exit(domain);
1816 domain = found;
1817 }
1818 free_devinfo_mem(info);
1819 return domain;
1820 }
1821 list_add(&info->link, &domain->devices);
1822 list_add(&info->global, &device_domain_list);
358dd8ac 1823 pdev->dev.archdata.iommu = info;
ba395927
KA
1824 spin_unlock_irqrestore(&device_domain_lock, flags);
1825 return domain;
1826error:
1827 /* recheck it here, maybe others set it */
1828 return find_domain(pdev);
1829}
1830
5b6985ce
FY
1831static int iommu_prepare_identity_map(struct pci_dev *pdev,
1832 unsigned long long start,
1833 unsigned long long end)
ba395927
KA
1834{
1835 struct dmar_domain *domain;
1836 unsigned long size;
5b6985ce 1837 unsigned long long base;
ba395927
KA
1838 int ret;
1839
1840 printk(KERN_INFO
1841 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1842 pci_name(pdev), start, end);
1843 /* page table init */
1844 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1845 if (!domain)
1846 return -ENOMEM;
1847
1848 /* The address might not be aligned */
5b6985ce 1849 base = start & PAGE_MASK;
ba395927 1850 size = end - base;
5b6985ce 1851 size = PAGE_ALIGN(size);
ba395927
KA
1852 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1853 IOVA_PFN(base + size) - 1)) {
1854 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1855 ret = -ENOMEM;
1856 goto error;
1857 }
1858
1859 pr_debug("Mapping reserved region %lx@%llx for %s\n",
1860 size, base, pci_name(pdev));
1861 /*
1862 * RMRR range might have overlap with physical memory range,
1863 * clear it first
1864 */
1865 dma_pte_clear_range(domain, base, base + size);
1866
1867 ret = domain_page_mapping(domain, base, base, size,
1868 DMA_PTE_READ|DMA_PTE_WRITE);
1869 if (ret)
1870 goto error;
1871
1872 /* context entry init */
1873 ret = domain_context_mapping(domain, pdev);
1874 if (!ret)
1875 return 0;
1876error:
1877 domain_exit(domain);
1878 return ret;
1879
1880}
1881
1882static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
1883 struct pci_dev *pdev)
1884{
358dd8ac 1885 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba395927
KA
1886 return 0;
1887 return iommu_prepare_identity_map(pdev, rmrr->base_address,
1888 rmrr->end_address + 1);
1889}
1890
e820482c 1891#ifdef CONFIG_DMAR_GFX_WA
d52d53b8
YL
1892struct iommu_prepare_data {
1893 struct pci_dev *pdev;
1894 int ret;
1895};
1896
1897static int __init iommu_prepare_work_fn(unsigned long start_pfn,
1898 unsigned long end_pfn, void *datax)
1899{
1900 struct iommu_prepare_data *data;
1901
1902 data = (struct iommu_prepare_data *)datax;
1903
1904 data->ret = iommu_prepare_identity_map(data->pdev,
1905 start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
1906 return data->ret;
1907
1908}
1909
1910static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
1911{
1912 int nid;
1913 struct iommu_prepare_data data;
1914
1915 data.pdev = pdev;
1916 data.ret = 0;
1917
1918 for_each_online_node(nid) {
1919 work_with_active_regions(nid, iommu_prepare_work_fn, &data);
1920 if (data.ret)
1921 return data.ret;
1922 }
1923 return data.ret;
1924}
1925
e820482c
KA
1926static void __init iommu_prepare_gfx_mapping(void)
1927{
1928 struct pci_dev *pdev = NULL;
e820482c
KA
1929 int ret;
1930
1931 for_each_pci_dev(pdev) {
358dd8ac 1932 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
e820482c
KA
1933 !IS_GFX_DEVICE(pdev))
1934 continue;
1935 printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
1936 pci_name(pdev));
d52d53b8
YL
1937 ret = iommu_prepare_with_active_regions(pdev);
1938 if (ret)
1939 printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
e820482c
KA
1940 }
1941}
2abd7e16
MM
1942#else /* !CONFIG_DMAR_GFX_WA */
1943static inline void iommu_prepare_gfx_mapping(void)
1944{
1945 return;
1946}
e820482c
KA
1947#endif
1948
49a0429e
KA
1949#ifdef CONFIG_DMAR_FLOPPY_WA
1950static inline void iommu_prepare_isa(void)
1951{
1952 struct pci_dev *pdev;
1953 int ret;
1954
1955 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
1956 if (!pdev)
1957 return;
1958
1959 printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
1960 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
1961
1962 if (ret)
1963 printk("IOMMU: Failed to create 0-64M identity map, "
1964 "floppy might not work\n");
1965
1966}
1967#else
1968static inline void iommu_prepare_isa(void)
1969{
1970 return;
1971}
1972#endif /* !CONFIG_DMAR_FLPY_WA */
1973
519a0549 1974static int __init init_dmars(void)
ba395927
KA
1975{
1976 struct dmar_drhd_unit *drhd;
1977 struct dmar_rmrr_unit *rmrr;
1978 struct pci_dev *pdev;
1979 struct intel_iommu *iommu;
80b20dd8 1980 int i, ret, unit = 0;
ba395927
KA
1981
1982 /*
1983 * for each drhd
1984 * allocate root
1985 * initialize and program root entry to not present
1986 * endfor
1987 */
1988 for_each_drhd_unit(drhd) {
5e0d2a6f 1989 g_num_of_iommus++;
1990 /*
1991 * lock not needed as this is only incremented in the single
1992 * threaded kernel __init code path all other access are read
1993 * only
1994 */
1995 }
1996
d9630fe9
WH
1997 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
1998 GFP_KERNEL);
1999 if (!g_iommus) {
2000 printk(KERN_ERR "Allocating global iommu array failed\n");
2001 ret = -ENOMEM;
2002 goto error;
2003 }
2004
80b20dd8 2005 deferred_flush = kzalloc(g_num_of_iommus *
2006 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2007 if (!deferred_flush) {
d9630fe9 2008 kfree(g_iommus);
5e0d2a6f 2009 ret = -ENOMEM;
2010 goto error;
2011 }
2012
5e0d2a6f 2013 for_each_drhd_unit(drhd) {
2014 if (drhd->ignored)
2015 continue;
1886e8a9
SS
2016
2017 iommu = drhd->iommu;
d9630fe9 2018 g_iommus[iommu->seq_id] = iommu;
ba395927 2019
e61d98d8
SS
2020 ret = iommu_init_domains(iommu);
2021 if (ret)
2022 goto error;
2023
ba395927
KA
2024 /*
2025 * TBD:
2026 * we could share the same root & context tables
2027 * amoung all IOMMU's. Need to Split it later.
2028 */
2029 ret = iommu_alloc_root_entry(iommu);
2030 if (ret) {
2031 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2032 goto error;
2033 }
2034 }
2035
a77b67d4
YS
2036 for_each_drhd_unit(drhd) {
2037 if (drhd->ignored)
2038 continue;
2039
2040 iommu = drhd->iommu;
2041 if (dmar_enable_qi(iommu)) {
2042 /*
2043 * Queued Invalidate not enabled, use Register Based
2044 * Invalidate
2045 */
2046 iommu->flush.flush_context = __iommu_flush_context;
2047 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2048 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
b4e0f9eb
FT
2049 "invalidation\n",
2050 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2051 } else {
2052 iommu->flush.flush_context = qi_flush_context;
2053 iommu->flush.flush_iotlb = qi_flush_iotlb;
2054 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
b4e0f9eb
FT
2055 "invalidation\n",
2056 (unsigned long long)drhd->reg_base_addr);
a77b67d4
YS
2057 }
2058 }
2059
ba395927
KA
2060 /*
2061 * For each rmrr
2062 * for each dev attached to rmrr
2063 * do
2064 * locate drhd for dev, alloc domain for dev
2065 * allocate free domain
2066 * allocate page table entries for rmrr
2067 * if context not allocated for bus
2068 * allocate and init context
2069 * set present in root table for this bus
2070 * init context with domain, translation etc
2071 * endfor
2072 * endfor
2073 */
2074 for_each_rmrr_units(rmrr) {
ba395927
KA
2075 for (i = 0; i < rmrr->devices_cnt; i++) {
2076 pdev = rmrr->devices[i];
2077 /* some BIOS lists non-exist devices in DMAR table */
2078 if (!pdev)
2079 continue;
2080 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2081 if (ret)
2082 printk(KERN_ERR
2083 "IOMMU: mapping reserved region failed\n");
2084 }
2085 }
2086
e820482c
KA
2087 iommu_prepare_gfx_mapping();
2088
49a0429e
KA
2089 iommu_prepare_isa();
2090
ba395927
KA
2091 /*
2092 * for each drhd
2093 * enable fault log
2094 * global invalidate context cache
2095 * global invalidate iotlb
2096 * enable translation
2097 */
2098 for_each_drhd_unit(drhd) {
2099 if (drhd->ignored)
2100 continue;
2101 iommu = drhd->iommu;
2102 sprintf (iommu->name, "dmar%d", unit++);
2103
2104 iommu_flush_write_buffer(iommu);
2105
3460a6d9
KA
2106 ret = dmar_set_interrupt(iommu);
2107 if (ret)
2108 goto error;
2109
ba395927
KA
2110 iommu_set_root_entry(iommu);
2111
a77b67d4
YS
2112 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
2113 0);
2114 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
2115 0);
f8bab735 2116 iommu_disable_protect_mem_regions(iommu);
2117
ba395927
KA
2118 ret = iommu_enable_translation(iommu);
2119 if (ret)
2120 goto error;
2121 }
2122
2123 return 0;
2124error:
2125 for_each_drhd_unit(drhd) {
2126 if (drhd->ignored)
2127 continue;
2128 iommu = drhd->iommu;
2129 free_iommu(iommu);
2130 }
d9630fe9 2131 kfree(g_iommus);
ba395927
KA
2132 return ret;
2133}
2134
2135static inline u64 aligned_size(u64 host_addr, size_t size)
2136{
2137 u64 addr;
5b6985ce
FY
2138 addr = (host_addr & (~PAGE_MASK)) + size;
2139 return PAGE_ALIGN(addr);
ba395927
KA
2140}
2141
2142struct iova *
f76aec76 2143iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
ba395927 2144{
ba395927
KA
2145 struct iova *piova;
2146
2147 /* Make sure it's in range */
ba395927 2148 end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
f76aec76 2149 if (!size || (IOVA_START_ADDR + size > end))
ba395927
KA
2150 return NULL;
2151
2152 piova = alloc_iova(&domain->iovad,
5b6985ce 2153 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
ba395927
KA
2154 return piova;
2155}
2156
f76aec76
KA
2157static struct iova *
2158__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
bb9e6d65 2159 size_t size, u64 dma_mask)
ba395927 2160{
ba395927 2161 struct pci_dev *pdev = to_pci_dev(dev);
ba395927 2162 struct iova *iova = NULL;
ba395927 2163
bb9e6d65
FT
2164 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
2165 iova = iommu_alloc_iova(domain, size, dma_mask);
2166 else {
ba395927
KA
2167 /*
2168 * First try to allocate an io virtual address in
2169 * DMA_32BIT_MASK and if that fails then try allocating
3609801e 2170 * from higher range
ba395927 2171 */
f76aec76 2172 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
ba395927 2173 if (!iova)
bb9e6d65 2174 iova = iommu_alloc_iova(domain, size, dma_mask);
ba395927
KA
2175 }
2176
2177 if (!iova) {
2178 printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
f76aec76
KA
2179 return NULL;
2180 }
2181
2182 return iova;
2183}
2184
2185static struct dmar_domain *
2186get_valid_domain_for_dev(struct pci_dev *pdev)
2187{
2188 struct dmar_domain *domain;
2189 int ret;
2190
2191 domain = get_domain_for_dev(pdev,
2192 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2193 if (!domain) {
2194 printk(KERN_ERR
2195 "Allocating domain for %s failed", pci_name(pdev));
4fe05bbc 2196 return NULL;
ba395927
KA
2197 }
2198
2199 /* make sure context mapping is ok */
5331fe6f 2200 if (unlikely(!domain_context_mapped(pdev))) {
ba395927 2201 ret = domain_context_mapping(domain, pdev);
f76aec76
KA
2202 if (ret) {
2203 printk(KERN_ERR
2204 "Domain context map for %s failed",
2205 pci_name(pdev));
4fe05bbc 2206 return NULL;
f76aec76 2207 }
ba395927
KA
2208 }
2209
f76aec76
KA
2210 return domain;
2211}
2212
bb9e6d65
FT
2213static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2214 size_t size, int dir, u64 dma_mask)
f76aec76
KA
2215{
2216 struct pci_dev *pdev = to_pci_dev(hwdev);
f76aec76 2217 struct dmar_domain *domain;
5b6985ce 2218 phys_addr_t start_paddr;
f76aec76
KA
2219 struct iova *iova;
2220 int prot = 0;
6865f0d1 2221 int ret;
8c11e798 2222 struct intel_iommu *iommu;
f76aec76
KA
2223
2224 BUG_ON(dir == DMA_NONE);
358dd8ac 2225 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
6865f0d1 2226 return paddr;
f76aec76
KA
2227
2228 domain = get_valid_domain_for_dev(pdev);
2229 if (!domain)
2230 return 0;
2231
8c11e798 2232 iommu = domain_get_iommu(domain);
6865f0d1 2233 size = aligned_size((u64)paddr, size);
f76aec76 2234
bb9e6d65 2235 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
f76aec76
KA
2236 if (!iova)
2237 goto error;
2238
5b6985ce 2239 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
f76aec76 2240
ba395927
KA
2241 /*
2242 * Check if DMAR supports zero-length reads on write only
2243 * mappings..
2244 */
2245 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2246 !cap_zlr(iommu->cap))
ba395927
KA
2247 prot |= DMA_PTE_READ;
2248 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2249 prot |= DMA_PTE_WRITE;
2250 /*
6865f0d1 2251 * paddr - (paddr + size) might be partial page, we should map the whole
ba395927 2252 * page. Note: if two part of one page are separately mapped, we
6865f0d1 2253 * might have two guest_addr mapping to the same host paddr, but this
ba395927
KA
2254 * is not a big problem
2255 */
6865f0d1 2256 ret = domain_page_mapping(domain, start_paddr,
5b6985ce 2257 ((u64)paddr) & PAGE_MASK, size, prot);
ba395927
KA
2258 if (ret)
2259 goto error;
2260
f76aec76 2261 /* it's a non-present to present mapping */
8c11e798 2262 ret = iommu_flush_iotlb_psi(iommu, domain->id,
5b6985ce 2263 start_paddr, size >> VTD_PAGE_SHIFT, 1);
f76aec76 2264 if (ret)
8c11e798 2265 iommu_flush_write_buffer(iommu);
f76aec76 2266
5b6985ce 2267 return start_paddr + ((u64)paddr & (~PAGE_MASK));
ba395927 2268
ba395927 2269error:
f76aec76
KA
2270 if (iova)
2271 __free_iova(&domain->iovad, iova);
ba395927 2272 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
5b6985ce 2273 pci_name(pdev), size, (unsigned long long)paddr, dir);
ba395927
KA
2274 return 0;
2275}
2276
ffbbef5c
FT
2277static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2278 unsigned long offset, size_t size,
2279 enum dma_data_direction dir,
2280 struct dma_attrs *attrs)
2281{
2282 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2283 dir, to_pci_dev(dev)->dma_mask);
2284}
2285
bb9e6d65
FT
2286dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
2287 size_t size, int dir)
2288{
2289 return __intel_map_single(hwdev, paddr, size, dir,
2290 to_pci_dev(hwdev)->dma_mask);
2291}
2292
5e0d2a6f 2293static void flush_unmaps(void)
2294{
80b20dd8 2295 int i, j;
5e0d2a6f 2296
5e0d2a6f 2297 timer_on = 0;
2298
2299 /* just flush them all */
2300 for (i = 0; i < g_num_of_iommus; i++) {
a2bb8459
WH
2301 struct intel_iommu *iommu = g_iommus[i];
2302 if (!iommu)
2303 continue;
c42d9f32 2304
a2bb8459 2305 if (deferred_flush[i].next) {
a77b67d4
YS
2306 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2307 DMA_TLB_GLOBAL_FLUSH, 0);
80b20dd8 2308 for (j = 0; j < deferred_flush[i].next; j++) {
2309 __free_iova(&deferred_flush[i].domain[j]->iovad,
2310 deferred_flush[i].iova[j]);
2311 }
2312 deferred_flush[i].next = 0;
2313 }
5e0d2a6f 2314 }
2315
5e0d2a6f 2316 list_size = 0;
5e0d2a6f 2317}
2318
2319static void flush_unmaps_timeout(unsigned long data)
2320{
80b20dd8 2321 unsigned long flags;
2322
2323 spin_lock_irqsave(&async_umap_flush_lock, flags);
5e0d2a6f 2324 flush_unmaps();
80b20dd8 2325 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
5e0d2a6f 2326}
2327
2328static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2329{
2330 unsigned long flags;
80b20dd8 2331 int next, iommu_id;
8c11e798 2332 struct intel_iommu *iommu;
5e0d2a6f 2333
2334 spin_lock_irqsave(&async_umap_flush_lock, flags);
80b20dd8 2335 if (list_size == HIGH_WATER_MARK)
2336 flush_unmaps();
2337
8c11e798
WH
2338 iommu = domain_get_iommu(dom);
2339 iommu_id = iommu->seq_id;
c42d9f32 2340
80b20dd8 2341 next = deferred_flush[iommu_id].next;
2342 deferred_flush[iommu_id].domain[next] = dom;
2343 deferred_flush[iommu_id].iova[next] = iova;
2344 deferred_flush[iommu_id].next++;
5e0d2a6f 2345
2346 if (!timer_on) {
2347 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2348 timer_on = 1;
2349 }
2350 list_size++;
2351 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2352}
2353
ffbbef5c
FT
2354static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2355 size_t size, enum dma_data_direction dir,
2356 struct dma_attrs *attrs)
ba395927 2357{
ba395927 2358 struct pci_dev *pdev = to_pci_dev(dev);
f76aec76
KA
2359 struct dmar_domain *domain;
2360 unsigned long start_addr;
ba395927 2361 struct iova *iova;
8c11e798 2362 struct intel_iommu *iommu;
ba395927 2363
358dd8ac 2364 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
f76aec76 2365 return;
ba395927
KA
2366 domain = find_domain(pdev);
2367 BUG_ON(!domain);
2368
8c11e798
WH
2369 iommu = domain_get_iommu(domain);
2370
ba395927 2371 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
f76aec76 2372 if (!iova)
ba395927 2373 return;
ba395927 2374
5b6985ce 2375 start_addr = iova->pfn_lo << PAGE_SHIFT;
f76aec76 2376 size = aligned_size((u64)dev_addr, size);
ba395927 2377
f76aec76 2378 pr_debug("Device %s unmapping: %lx@%llx\n",
5b6985ce 2379 pci_name(pdev), size, (unsigned long long)start_addr);
ba395927 2380
f76aec76
KA
2381 /* clear the whole page */
2382 dma_pte_clear_range(domain, start_addr, start_addr + size);
2383 /* free page tables */
2384 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
5e0d2a6f 2385 if (intel_iommu_strict) {
8c11e798 2386 if (iommu_flush_iotlb_psi(iommu,
5b6985ce 2387 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
8c11e798 2388 iommu_flush_write_buffer(iommu);
5e0d2a6f 2389 /* free iova */
2390 __free_iova(&domain->iovad, iova);
2391 } else {
2392 add_unmap(domain, iova);
2393 /*
2394 * queue up the release of the unmap to save the 1/6th of the
2395 * cpu used up by the iotlb flush operation...
2396 */
5e0d2a6f 2397 }
ba395927
KA
2398}
2399
ffbbef5c
FT
2400void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
2401 int dir)
2402{
2403 intel_unmap_page(dev, dev_addr, size, dir, NULL);
2404}
2405
5b6985ce
FY
2406void *intel_alloc_coherent(struct device *hwdev, size_t size,
2407 dma_addr_t *dma_handle, gfp_t flags)
ba395927
KA
2408{
2409 void *vaddr;
2410 int order;
2411
5b6985ce 2412 size = PAGE_ALIGN(size);
ba395927
KA
2413 order = get_order(size);
2414 flags &= ~(GFP_DMA | GFP_DMA32);
2415
2416 vaddr = (void *)__get_free_pages(flags, order);
2417 if (!vaddr)
2418 return NULL;
2419 memset(vaddr, 0, size);
2420
bb9e6d65
FT
2421 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2422 DMA_BIDIRECTIONAL,
2423 hwdev->coherent_dma_mask);
ba395927
KA
2424 if (*dma_handle)
2425 return vaddr;
2426 free_pages((unsigned long)vaddr, order);
2427 return NULL;
2428}
2429
5b6985ce
FY
2430void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2431 dma_addr_t dma_handle)
ba395927
KA
2432{
2433 int order;
2434
5b6985ce 2435 size = PAGE_ALIGN(size);
ba395927
KA
2436 order = get_order(size);
2437
2438 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
2439 free_pages((unsigned long)vaddr, order);
2440}
2441
12d4d40e 2442#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
5b6985ce
FY
2443
2444void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
160c1d8e
FT
2445 int nelems, enum dma_data_direction dir,
2446 struct dma_attrs *attrs)
ba395927
KA
2447{
2448 int i;
2449 struct pci_dev *pdev = to_pci_dev(hwdev);
2450 struct dmar_domain *domain;
f76aec76
KA
2451 unsigned long start_addr;
2452 struct iova *iova;
2453 size_t size = 0;
2454 void *addr;
c03ab37c 2455 struct scatterlist *sg;
8c11e798 2456 struct intel_iommu *iommu;
ba395927 2457
358dd8ac 2458 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba395927
KA
2459 return;
2460
2461 domain = find_domain(pdev);
8c11e798
WH
2462 BUG_ON(!domain);
2463
2464 iommu = domain_get_iommu(domain);
ba395927 2465
c03ab37c 2466 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
f76aec76
KA
2467 if (!iova)
2468 return;
c03ab37c 2469 for_each_sg(sglist, sg, nelems, i) {
f76aec76
KA
2470 addr = SG_ENT_VIRT_ADDRESS(sg);
2471 size += aligned_size((u64)addr, sg->length);
2472 }
2473
5b6985ce 2474 start_addr = iova->pfn_lo << PAGE_SHIFT;
f76aec76
KA
2475
2476 /* clear the whole page */
2477 dma_pte_clear_range(domain, start_addr, start_addr + size);
2478 /* free page tables */
2479 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2480
8c11e798 2481 if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
5b6985ce 2482 size >> VTD_PAGE_SHIFT, 0))
8c11e798 2483 iommu_flush_write_buffer(iommu);
f76aec76
KA
2484
2485 /* free iova */
2486 __free_iova(&domain->iovad, iova);
ba395927
KA
2487}
2488
ba395927 2489static int intel_nontranslate_map_sg(struct device *hddev,
c03ab37c 2490 struct scatterlist *sglist, int nelems, int dir)
ba395927
KA
2491{
2492 int i;
c03ab37c 2493 struct scatterlist *sg;
ba395927 2494
c03ab37c 2495 for_each_sg(sglist, sg, nelems, i) {
12d4d40e 2496 BUG_ON(!sg_page(sg));
c03ab37c
FT
2497 sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
2498 sg->dma_length = sg->length;
ba395927
KA
2499 }
2500 return nelems;
2501}
2502
5b6985ce 2503int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
160c1d8e 2504 enum dma_data_direction dir, struct dma_attrs *attrs)
ba395927
KA
2505{
2506 void *addr;
2507 int i;
ba395927
KA
2508 struct pci_dev *pdev = to_pci_dev(hwdev);
2509 struct dmar_domain *domain;
f76aec76
KA
2510 size_t size = 0;
2511 int prot = 0;
2512 size_t offset = 0;
2513 struct iova *iova = NULL;
2514 int ret;
c03ab37c 2515 struct scatterlist *sg;
f76aec76 2516 unsigned long start_addr;
8c11e798 2517 struct intel_iommu *iommu;
ba395927
KA
2518
2519 BUG_ON(dir == DMA_NONE);
358dd8ac 2520 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
c03ab37c 2521 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
ba395927 2522
f76aec76
KA
2523 domain = get_valid_domain_for_dev(pdev);
2524 if (!domain)
2525 return 0;
2526
8c11e798
WH
2527 iommu = domain_get_iommu(domain);
2528
c03ab37c 2529 for_each_sg(sglist, sg, nelems, i) {
ba395927 2530 addr = SG_ENT_VIRT_ADDRESS(sg);
f76aec76
KA
2531 addr = (void *)virt_to_phys(addr);
2532 size += aligned_size((u64)addr, sg->length);
2533 }
2534
bb9e6d65 2535 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
f76aec76 2536 if (!iova) {
c03ab37c 2537 sglist->dma_length = 0;
f76aec76
KA
2538 return 0;
2539 }
2540
2541 /*
2542 * Check if DMAR supports zero-length reads on write only
2543 * mappings..
2544 */
2545 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798 2546 !cap_zlr(iommu->cap))
f76aec76
KA
2547 prot |= DMA_PTE_READ;
2548 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2549 prot |= DMA_PTE_WRITE;
2550
5b6985ce 2551 start_addr = iova->pfn_lo << PAGE_SHIFT;
f76aec76 2552 offset = 0;
c03ab37c 2553 for_each_sg(sglist, sg, nelems, i) {
f76aec76
KA
2554 addr = SG_ENT_VIRT_ADDRESS(sg);
2555 addr = (void *)virt_to_phys(addr);
2556 size = aligned_size((u64)addr, sg->length);
2557 ret = domain_page_mapping(domain, start_addr + offset,
5b6985ce 2558 ((u64)addr) & PAGE_MASK,
f76aec76
KA
2559 size, prot);
2560 if (ret) {
2561 /* clear the page */
2562 dma_pte_clear_range(domain, start_addr,
2563 start_addr + offset);
2564 /* free page tables */
2565 dma_pte_free_pagetable(domain, start_addr,
2566 start_addr + offset);
2567 /* free iova */
2568 __free_iova(&domain->iovad, iova);
ba395927
KA
2569 return 0;
2570 }
f76aec76 2571 sg->dma_address = start_addr + offset +
5b6985ce 2572 ((u64)addr & (~PAGE_MASK));
ba395927 2573 sg->dma_length = sg->length;
f76aec76 2574 offset += size;
ba395927
KA
2575 }
2576
ba395927 2577 /* it's a non-present to present mapping */
8c11e798 2578 if (iommu_flush_iotlb_psi(iommu, domain->id,
5b6985ce 2579 start_addr, offset >> VTD_PAGE_SHIFT, 1))
8c11e798 2580 iommu_flush_write_buffer(iommu);
ba395927
KA
2581 return nelems;
2582}
2583
160c1d8e 2584struct dma_map_ops intel_dma_ops = {
ba395927
KA
2585 .alloc_coherent = intel_alloc_coherent,
2586 .free_coherent = intel_free_coherent,
ba395927
KA
2587 .map_sg = intel_map_sg,
2588 .unmap_sg = intel_unmap_sg,
ffbbef5c
FT
2589 .map_page = intel_map_page,
2590 .unmap_page = intel_unmap_page,
ba395927
KA
2591};
2592
2593static inline int iommu_domain_cache_init(void)
2594{
2595 int ret = 0;
2596
2597 iommu_domain_cache = kmem_cache_create("iommu_domain",
2598 sizeof(struct dmar_domain),
2599 0,
2600 SLAB_HWCACHE_ALIGN,
2601
2602 NULL);
2603 if (!iommu_domain_cache) {
2604 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2605 ret = -ENOMEM;
2606 }
2607
2608 return ret;
2609}
2610
2611static inline int iommu_devinfo_cache_init(void)
2612{
2613 int ret = 0;
2614
2615 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2616 sizeof(struct device_domain_info),
2617 0,
2618 SLAB_HWCACHE_ALIGN,
ba395927
KA
2619 NULL);
2620 if (!iommu_devinfo_cache) {
2621 printk(KERN_ERR "Couldn't create devinfo cache\n");
2622 ret = -ENOMEM;
2623 }
2624
2625 return ret;
2626}
2627
2628static inline int iommu_iova_cache_init(void)
2629{
2630 int ret = 0;
2631
2632 iommu_iova_cache = kmem_cache_create("iommu_iova",
2633 sizeof(struct iova),
2634 0,
2635 SLAB_HWCACHE_ALIGN,
ba395927
KA
2636 NULL);
2637 if (!iommu_iova_cache) {
2638 printk(KERN_ERR "Couldn't create iova cache\n");
2639 ret = -ENOMEM;
2640 }
2641
2642 return ret;
2643}
2644
2645static int __init iommu_init_mempool(void)
2646{
2647 int ret;
2648 ret = iommu_iova_cache_init();
2649 if (ret)
2650 return ret;
2651
2652 ret = iommu_domain_cache_init();
2653 if (ret)
2654 goto domain_error;
2655
2656 ret = iommu_devinfo_cache_init();
2657 if (!ret)
2658 return ret;
2659
2660 kmem_cache_destroy(iommu_domain_cache);
2661domain_error:
2662 kmem_cache_destroy(iommu_iova_cache);
2663
2664 return -ENOMEM;
2665}
2666
2667static void __init iommu_exit_mempool(void)
2668{
2669 kmem_cache_destroy(iommu_devinfo_cache);
2670 kmem_cache_destroy(iommu_domain_cache);
2671 kmem_cache_destroy(iommu_iova_cache);
2672
2673}
2674
ba395927
KA
2675static void __init init_no_remapping_devices(void)
2676{
2677 struct dmar_drhd_unit *drhd;
2678
2679 for_each_drhd_unit(drhd) {
2680 if (!drhd->include_all) {
2681 int i;
2682 for (i = 0; i < drhd->devices_cnt; i++)
2683 if (drhd->devices[i] != NULL)
2684 break;
2685 /* ignore DMAR unit if no pci devices exist */
2686 if (i == drhd->devices_cnt)
2687 drhd->ignored = 1;
2688 }
2689 }
2690
2691 if (dmar_map_gfx)
2692 return;
2693
2694 for_each_drhd_unit(drhd) {
2695 int i;
2696 if (drhd->ignored || drhd->include_all)
2697 continue;
2698
2699 for (i = 0; i < drhd->devices_cnt; i++)
2700 if (drhd->devices[i] &&
2701 !IS_GFX_DEVICE(drhd->devices[i]))
2702 break;
2703
2704 if (i < drhd->devices_cnt)
2705 continue;
2706
2707 /* bypass IOMMU if it is just for gfx devices */
2708 drhd->ignored = 1;
2709 for (i = 0; i < drhd->devices_cnt; i++) {
2710 if (!drhd->devices[i])
2711 continue;
358dd8ac 2712 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
ba395927
KA
2713 }
2714 }
2715}
2716
2717int __init intel_iommu_init(void)
2718{
2719 int ret = 0;
2720
ba395927
KA
2721 if (dmar_table_init())
2722 return -ENODEV;
2723
1886e8a9
SS
2724 if (dmar_dev_scope_init())
2725 return -ENODEV;
2726
2ae21010
SS
2727 /*
2728 * Check the need for DMA-remapping initialization now.
2729 * Above initialization will also be used by Interrupt-remapping.
2730 */
2731 if (no_iommu || swiotlb || dmar_disabled)
2732 return -ENODEV;
2733
ba395927
KA
2734 iommu_init_mempool();
2735 dmar_init_reserved_ranges();
2736
2737 init_no_remapping_devices();
2738
2739 ret = init_dmars();
2740 if (ret) {
2741 printk(KERN_ERR "IOMMU: dmar init failed\n");
2742 put_iova_domain(&reserved_iova_list);
2743 iommu_exit_mempool();
2744 return ret;
2745 }
2746 printk(KERN_INFO
2747 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2748
5e0d2a6f 2749 init_timer(&unmap_timer);
ba395927
KA
2750 force_iommu = 1;
2751 dma_ops = &intel_dma_ops;
a8bcbb0d
JR
2752
2753 register_iommu(&intel_iommu_ops);
2754
ba395927
KA
2755 return 0;
2756}
e820482c 2757
c7151a8d
WH
2758static int vm_domain_add_dev_info(struct dmar_domain *domain,
2759 struct pci_dev *pdev)
2760{
2761 struct device_domain_info *info;
2762 unsigned long flags;
2763
2764 info = alloc_devinfo_mem();
2765 if (!info)
2766 return -ENOMEM;
2767
2768 info->bus = pdev->bus->number;
2769 info->devfn = pdev->devfn;
2770 info->dev = pdev;
2771 info->domain = domain;
2772
2773 spin_lock_irqsave(&device_domain_lock, flags);
2774 list_add(&info->link, &domain->devices);
2775 list_add(&info->global, &device_domain_list);
2776 pdev->dev.archdata.iommu = info;
2777 spin_unlock_irqrestore(&device_domain_lock, flags);
2778
2779 return 0;
2780}
2781
2782static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
2783 struct pci_dev *pdev)
2784{
2785 struct device_domain_info *info;
2786 struct intel_iommu *iommu;
2787 unsigned long flags;
2788 int found = 0;
2789 struct list_head *entry, *tmp;
2790
2791 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
2792 if (!iommu)
2793 return;
2794
2795 spin_lock_irqsave(&device_domain_lock, flags);
2796 list_for_each_safe(entry, tmp, &domain->devices) {
2797 info = list_entry(entry, struct device_domain_info, link);
2798 if (info->bus == pdev->bus->number &&
2799 info->devfn == pdev->devfn) {
2800 list_del(&info->link);
2801 list_del(&info->global);
2802 if (info->dev)
2803 info->dev->dev.archdata.iommu = NULL;
2804 spin_unlock_irqrestore(&device_domain_lock, flags);
2805
2806 iommu_detach_dev(iommu, info->bus, info->devfn);
2807 free_devinfo_mem(info);
2808
2809 spin_lock_irqsave(&device_domain_lock, flags);
2810
2811 if (found)
2812 break;
2813 else
2814 continue;
2815 }
2816
2817 /* if there is no other devices under the same iommu
2818 * owned by this domain, clear this iommu in iommu_bmp
2819 * update iommu count and coherency
2820 */
2821 if (device_to_iommu(info->bus, info->devfn) == iommu)
2822 found = 1;
2823 }
2824
2825 if (found == 0) {
2826 unsigned long tmp_flags;
2827 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
2828 clear_bit(iommu->seq_id, &domain->iommu_bmp);
2829 domain->iommu_count--;
2830 domain_update_iommu_coherency(domain);
2831 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
2832 }
2833
2834 spin_unlock_irqrestore(&device_domain_lock, flags);
2835}
2836
2837static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
2838{
2839 struct device_domain_info *info;
2840 struct intel_iommu *iommu;
2841 unsigned long flags1, flags2;
2842
2843 spin_lock_irqsave(&device_domain_lock, flags1);
2844 while (!list_empty(&domain->devices)) {
2845 info = list_entry(domain->devices.next,
2846 struct device_domain_info, link);
2847 list_del(&info->link);
2848 list_del(&info->global);
2849 if (info->dev)
2850 info->dev->dev.archdata.iommu = NULL;
2851
2852 spin_unlock_irqrestore(&device_domain_lock, flags1);
2853
2854 iommu = device_to_iommu(info->bus, info->devfn);
2855 iommu_detach_dev(iommu, info->bus, info->devfn);
2856
2857 /* clear this iommu in iommu_bmp, update iommu count
2858 * and coherency
2859 */
2860 spin_lock_irqsave(&domain->iommu_lock, flags2);
2861 if (test_and_clear_bit(iommu->seq_id,
2862 &domain->iommu_bmp)) {
2863 domain->iommu_count--;
2864 domain_update_iommu_coherency(domain);
2865 }
2866 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
2867
2868 free_devinfo_mem(info);
2869 spin_lock_irqsave(&device_domain_lock, flags1);
2870 }
2871 spin_unlock_irqrestore(&device_domain_lock, flags1);
2872}
2873
5e98c4b1
WH
2874/* domain id for virtual machine, it won't be set in context */
2875static unsigned long vm_domid;
2876
fe40f1e0
WH
2877static int vm_domain_min_agaw(struct dmar_domain *domain)
2878{
2879 int i;
2880 int min_agaw = domain->agaw;
2881
2882 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
2883 for (; i < g_num_of_iommus; ) {
2884 if (min_agaw > g_iommus[i]->agaw)
2885 min_agaw = g_iommus[i]->agaw;
2886
2887 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
2888 }
2889
2890 return min_agaw;
2891}
2892
5e98c4b1
WH
2893static struct dmar_domain *iommu_alloc_vm_domain(void)
2894{
2895 struct dmar_domain *domain;
2896
2897 domain = alloc_domain_mem();
2898 if (!domain)
2899 return NULL;
2900
2901 domain->id = vm_domid++;
2902 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
2903 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
2904
2905 return domain;
2906}
2907
2908static int vm_domain_init(struct dmar_domain *domain, int guest_width)
2909{
2910 int adjust_width;
2911
2912 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
2913 spin_lock_init(&domain->mapping_lock);
2914 spin_lock_init(&domain->iommu_lock);
2915
2916 domain_reserve_special_ranges(domain);
2917
2918 /* calculate AGAW */
2919 domain->gaw = guest_width;
2920 adjust_width = guestwidth_to_adjustwidth(guest_width);
2921 domain->agaw = width_to_agaw(adjust_width);
2922
2923 INIT_LIST_HEAD(&domain->devices);
2924
2925 domain->iommu_count = 0;
2926 domain->iommu_coherency = 0;
fe40f1e0 2927 domain->max_addr = 0;
5e98c4b1
WH
2928
2929 /* always allocate the top pgd */
2930 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
2931 if (!domain->pgd)
2932 return -ENOMEM;
2933 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
2934 return 0;
2935}
2936
2937static void iommu_free_vm_domain(struct dmar_domain *domain)
2938{
2939 unsigned long flags;
2940 struct dmar_drhd_unit *drhd;
2941 struct intel_iommu *iommu;
2942 unsigned long i;
2943 unsigned long ndomains;
2944
2945 for_each_drhd_unit(drhd) {
2946 if (drhd->ignored)
2947 continue;
2948 iommu = drhd->iommu;
2949
2950 ndomains = cap_ndoms(iommu->cap);
2951 i = find_first_bit(iommu->domain_ids, ndomains);
2952 for (; i < ndomains; ) {
2953 if (iommu->domains[i] == domain) {
2954 spin_lock_irqsave(&iommu->lock, flags);
2955 clear_bit(i, iommu->domain_ids);
2956 iommu->domains[i] = NULL;
2957 spin_unlock_irqrestore(&iommu->lock, flags);
2958 break;
2959 }
2960 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
2961 }
2962 }
2963}
2964
2965static void vm_domain_exit(struct dmar_domain *domain)
2966{
2967 u64 end;
2968
2969 /* Domain 0 is reserved, so dont process it */
2970 if (!domain)
2971 return;
2972
2973 vm_domain_remove_all_dev_info(domain);
2974 /* destroy iovas */
2975 put_iova_domain(&domain->iovad);
2976 end = DOMAIN_MAX_ADDR(domain->gaw);
2977 end = end & (~VTD_PAGE_MASK);
2978
2979 /* clear ptes */
2980 dma_pte_clear_range(domain, 0, end);
2981
2982 /* free page tables */
2983 dma_pte_free_pagetable(domain, 0, end);
2984
2985 iommu_free_vm_domain(domain);
2986 free_domain_mem(domain);
2987}
2988
5d450806 2989static int intel_iommu_domain_init(struct iommu_domain *domain)
38717946 2990{
5d450806 2991 struct dmar_domain *dmar_domain;
38717946 2992
5d450806
JR
2993 dmar_domain = iommu_alloc_vm_domain();
2994 if (!dmar_domain) {
38717946 2995 printk(KERN_ERR
5d450806
JR
2996 "intel_iommu_domain_init: dmar_domain == NULL\n");
2997 return -ENOMEM;
38717946 2998 }
5d450806 2999 if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
38717946 3000 printk(KERN_ERR
5d450806
JR
3001 "intel_iommu_domain_init() failed\n");
3002 vm_domain_exit(dmar_domain);
3003 return -ENOMEM;
38717946 3004 }
5d450806 3005 domain->priv = dmar_domain;
faa3d6f5 3006
5d450806 3007 return 0;
38717946 3008}
38717946 3009
5d450806 3010static void intel_iommu_domain_destroy(struct iommu_domain *domain)
38717946 3011{
5d450806
JR
3012 struct dmar_domain *dmar_domain = domain->priv;
3013
3014 domain->priv = NULL;
3015 vm_domain_exit(dmar_domain);
38717946 3016}
38717946 3017
4c5478c9
JR
3018static int intel_iommu_attach_device(struct iommu_domain *domain,
3019 struct device *dev)
38717946 3020{
4c5478c9
JR
3021 struct dmar_domain *dmar_domain = domain->priv;
3022 struct pci_dev *pdev = to_pci_dev(dev);
fe40f1e0
WH
3023 struct intel_iommu *iommu;
3024 int addr_width;
3025 u64 end;
faa3d6f5
WH
3026 int ret;
3027
3028 /* normally pdev is not mapped */
3029 if (unlikely(domain_context_mapped(pdev))) {
3030 struct dmar_domain *old_domain;
3031
3032 old_domain = find_domain(pdev);
3033 if (old_domain) {
4c5478c9 3034 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
faa3d6f5
WH
3035 vm_domain_remove_one_dev_info(old_domain, pdev);
3036 else
3037 domain_remove_dev_info(old_domain);
3038 }
3039 }
3040
fe40f1e0
WH
3041 iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
3042 if (!iommu)
3043 return -ENODEV;
3044
3045 /* check if this iommu agaw is sufficient for max mapped address */
3046 addr_width = agaw_to_width(iommu->agaw);
3047 end = DOMAIN_MAX_ADDR(addr_width);
3048 end = end & VTD_PAGE_MASK;
4c5478c9 3049 if (end < dmar_domain->max_addr) {
fe40f1e0
WH
3050 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3051 "sufficient for the mapped address (%llx)\n",
4c5478c9 3052 __func__, iommu->agaw, dmar_domain->max_addr);
fe40f1e0
WH
3053 return -EFAULT;
3054 }
3055
4c5478c9 3056 ret = domain_context_mapping(dmar_domain, pdev);
faa3d6f5
WH
3057 if (ret)
3058 return ret;
3059
4c5478c9 3060 ret = vm_domain_add_dev_info(dmar_domain, pdev);
faa3d6f5 3061 return ret;
38717946 3062}
38717946 3063
4c5478c9
JR
3064static void intel_iommu_detach_device(struct iommu_domain *domain,
3065 struct device *dev)
38717946 3066{
4c5478c9
JR
3067 struct dmar_domain *dmar_domain = domain->priv;
3068 struct pci_dev *pdev = to_pci_dev(dev);
3069
3070 vm_domain_remove_one_dev_info(dmar_domain, pdev);
faa3d6f5 3071}
c7151a8d 3072
dde57a21
JR
3073static int intel_iommu_map_range(struct iommu_domain *domain,
3074 unsigned long iova, phys_addr_t hpa,
3075 size_t size, int iommu_prot)
faa3d6f5 3076{
dde57a21 3077 struct dmar_domain *dmar_domain = domain->priv;
fe40f1e0
WH
3078 u64 max_addr;
3079 int addr_width;
dde57a21 3080 int prot = 0;
faa3d6f5 3081 int ret;
fe40f1e0 3082
dde57a21
JR
3083 if (iommu_prot & IOMMU_READ)
3084 prot |= DMA_PTE_READ;
3085 if (iommu_prot & IOMMU_WRITE)
3086 prot |= DMA_PTE_WRITE;
3087
fe40f1e0 3088 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
dde57a21 3089 if (dmar_domain->max_addr < max_addr) {
fe40f1e0
WH
3090 int min_agaw;
3091 u64 end;
3092
3093 /* check if minimum agaw is sufficient for mapped address */
dde57a21 3094 min_agaw = vm_domain_min_agaw(dmar_domain);
fe40f1e0
WH
3095 addr_width = agaw_to_width(min_agaw);
3096 end = DOMAIN_MAX_ADDR(addr_width);
3097 end = end & VTD_PAGE_MASK;
3098 if (end < max_addr) {
3099 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3100 "sufficient for the mapped address (%llx)\n",
3101 __func__, min_agaw, max_addr);
3102 return -EFAULT;
3103 }
dde57a21 3104 dmar_domain->max_addr = max_addr;
fe40f1e0
WH
3105 }
3106
dde57a21 3107 ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
faa3d6f5 3108 return ret;
38717946 3109}
38717946 3110
dde57a21
JR
3111static void intel_iommu_unmap_range(struct iommu_domain *domain,
3112 unsigned long iova, size_t size)
38717946 3113{
dde57a21 3114 struct dmar_domain *dmar_domain = domain->priv;
faa3d6f5
WH
3115 dma_addr_t base;
3116
3117 /* The address might not be aligned */
3118 base = iova & VTD_PAGE_MASK;
3119 size = VTD_PAGE_ALIGN(size);
dde57a21 3120 dma_pte_clear_range(dmar_domain, base, base + size);
fe40f1e0 3121
dde57a21
JR
3122 if (dmar_domain->max_addr == base + size)
3123 dmar_domain->max_addr = base;
38717946 3124}
38717946 3125
d14d6577
JR
3126static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3127 unsigned long iova)
38717946 3128{
d14d6577 3129 struct dmar_domain *dmar_domain = domain->priv;
38717946 3130 struct dma_pte *pte;
faa3d6f5 3131 u64 phys = 0;
38717946 3132
d14d6577 3133 pte = addr_to_dma_pte(dmar_domain, iova);
38717946 3134 if (pte)
faa3d6f5 3135 phys = dma_pte_addr(pte);
38717946 3136
faa3d6f5 3137 return phys;
38717946 3138}
a8bcbb0d
JR
3139
3140static struct iommu_ops intel_iommu_ops = {
3141 .domain_init = intel_iommu_domain_init,
3142 .domain_destroy = intel_iommu_domain_destroy,
3143 .attach_dev = intel_iommu_attach_device,
3144 .detach_dev = intel_iommu_detach_device,
3145 .map = intel_iommu_map_range,
3146 .unmap = intel_iommu_unmap_range,
3147 .iova_to_phys = intel_iommu_iova_to_phys,
3148};