]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - drivers/iommu/amd/iommu.c
Merge tag 'iommu-updates-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[thirdparty/linux.git] / drivers / iommu / amd / iommu.c
index b399c57413784688e69beaa54f2fed5b23d0462c..fcc987f5d4edc3ae87335fceed57eb26d7ba9b00 100644 (file)
@@ -45,8 +45,6 @@
 
 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
 
-#define LOOP_TIMEOUT   100000
-
 /* IO virtual address start page frame number */
 #define IOVA_START_PFN         (1)
 #define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
@@ -68,7 +66,6 @@ LIST_HEAD(acpihid_map);
 const struct iommu_ops amd_iommu_ops;
 const struct iommu_dirty_ops amd_dirty_ops;
 
-static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
 
 /*
@@ -81,7 +78,6 @@ struct iommu_cmd {
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void detach_device(struct device *dev);
-static int domain_enable_v2(struct protection_domain *domain, int pasids);
 
 /****************************************************************************
  *
@@ -324,24 +320,141 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
        return entry->group;
 }
 
-static bool pci_iommuv2_capable(struct pci_dev *pdev)
+static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
 {
-       static const int caps[] = {
-               PCI_EXT_CAP_ID_PRI,
-               PCI_EXT_CAP_ID_PASID,
-       };
-       int i, pos;
+       return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
+}
 
-       if (!pci_ats_supported(pdev))
-               return false;
+static u32 pdev_get_caps(struct pci_dev *pdev)
+{
+       int features;
+       u32 flags = 0;
+
+       if (pci_ats_supported(pdev))
+               flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+       if (pci_pri_supported(pdev))
+               flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+       features = pci_pasid_features(pdev);
+       if (features >= 0) {
+               flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+               if (features & PCI_PASID_CAP_EXEC)
+                       flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
 
-       for (i = 0; i < 2; ++i) {
-               pos = pci_find_ext_capability(pdev, caps[i]);
-               if (pos == 0)
-                       return false;
+               if (features & PCI_PASID_CAP_PRIV)
+                       flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
        }
 
-       return true;
+       return flags;
+}
+
+static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+       int ret = -EINVAL;
+
+       if (dev_data->ats_enabled)
+               return 0;
+
+       if (amd_iommu_iotlb_sup &&
+           (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
+               ret = pci_enable_ats(pdev, PAGE_SHIFT);
+               if (!ret) {
+                       dev_data->ats_enabled = 1;
+                       dev_data->ats_qdep    = pci_ats_queue_depth(pdev);
+               }
+       }
+
+       return ret;
+}
+
+static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data->ats_enabled) {
+               pci_disable_ats(pdev);
+               dev_data->ats_enabled = 0;
+       }
+}
+
+int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+       int ret = -EINVAL;
+
+       if (dev_data->pri_enabled)
+               return 0;
+
+       if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
+               /*
+                * First reset the PRI state of the device.
+                * FIXME: Hardcode number of outstanding requests for now
+                */
+               if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
+                       dev_data->pri_enabled = 1;
+                       dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
+
+                       ret = 0;
+               }
+       }
+
+       return ret;
+}
+
+void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data->pri_enabled) {
+               pci_disable_pri(pdev);
+               dev_data->pri_enabled = 0;
+       }
+}
+
+static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+       int ret = -EINVAL;
+
+       if (dev_data->pasid_enabled)
+               return 0;
+
+       if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
+               /* Only allow access to user-accessible pages */
+               ret = pci_enable_pasid(pdev, 0);
+               if (!ret)
+                       dev_data->pasid_enabled = 1;
+       }
+
+       return ret;
+}
+
+static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data->pasid_enabled) {
+               pci_disable_pasid(pdev);
+               dev_data->pasid_enabled = 0;
+       }
+}
+
+static void pdev_enable_caps(struct pci_dev *pdev)
+{
+       pdev_enable_cap_ats(pdev);
+       pdev_enable_cap_pasid(pdev);
+       amd_iommu_pdev_enable_cap_pri(pdev);
+
+}
+
+static void pdev_disable_caps(struct pci_dev *pdev)
+{
+       pdev_disable_cap_ats(pdev);
+       pdev_disable_cap_pasid(pdev);
+       amd_iommu_pdev_disable_cap_pri(pdev);
 }
 
 /*
@@ -401,8 +514,8 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
         * it'll be forced to go into translation mode.
         */
        if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
-           dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
-               dev_data->iommu_v2 = iommu->is_iommu_v2;
+           dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
+               dev_data->flags = pdev_get_caps(to_pci_dev(dev));
        }
 
        dev_iommu_priv_set(dev, dev_data);
@@ -703,24 +816,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
        writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 }
 
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
-{
-       struct amd_iommu_fault fault;
-
-       if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
-               pr_err_ratelimited("Unknown PPR request received\n");
-               return;
-       }
-
-       fault.address   = raw[1];
-       fault.pasid     = PPR_PASID(raw[0]);
-       fault.sbdf      = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
-       fault.tag       = PPR_TAG(raw[0]);
-       fault.flags     = PPR_FLAGS(raw[0]);
-
-       atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
-}
-
 static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 {
        u32 head, tail;
@@ -766,8 +861,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
                head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
                writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 
-               /* Handle PPR entry */
-               iommu_handle_ppr_entry(iommu, entry);
+               /* TODO: PPR Handler will be added when we add IOPF support */
 
                /* Refresh ring-buffer information */
                head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
@@ -1096,7 +1190,7 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
 }
 
 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
-                              int status, int tag, bool gn)
+                              int status, int tag, u8 gn)
 {
        memset(cmd, 0, sizeof(*cmd));
 
@@ -1300,7 +1394,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
 
 void iommu_flush_all_caches(struct amd_iommu *iommu)
 {
-       if (iommu_feature(iommu, FEATURE_IA)) {
+       if (check_feature(FEATURE_IA)) {
                amd_iommu_flush_all(iommu);
        } else {
                amd_iommu_flush_dte_all(iommu);
@@ -1319,7 +1413,7 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
        struct iommu_cmd cmd;
        int qdep;
 
-       qdep     = dev_data->ats.qdep;
+       qdep     = dev_data->ats_qdep;
        iommu    = rlookup_amd_iommu(dev_data->dev);
        if (!iommu)
                return -EINVAL;
@@ -1370,7 +1464,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
                        return ret;
        }
 
-       if (dev_data->ats.enabled)
+       if (dev_data->ats_enabled)
                ret = device_flush_iotlb(dev_data, 0, ~0UL);
 
        return ret;
@@ -1403,7 +1497,7 @@ static void __domain_flush_pages(struct protection_domain *domain,
 
        list_for_each_entry(dev_data, &domain->dev_list, list) {
 
-               if (!dev_data->ats.enabled)
+               if (!dev_data->ats_enabled)
                        continue;
 
                ret |= device_flush_iotlb(dev_data, address, size);
@@ -1579,6 +1673,42 @@ static void free_gcr3_table(struct protection_domain *domain)
        free_page((unsigned long)domain->gcr3_tbl);
 }
 
+/*
+ * Number of GCR3 table levels required. Level must be 4-Kbyte
+ * page and can contain up to 512 entries.
+ */
+static int get_gcr3_levels(int pasids)
+{
+       int levels;
+
+       if (pasids == -1)
+               return amd_iommu_max_glx_val;
+
+       levels = get_count_order(pasids);
+
+       return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
+}
+
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+{
+       int levels = get_gcr3_levels(pasids);
+
+       if (levels > amd_iommu_max_glx_val)
+               return -EINVAL;
+
+       domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
+       if (domain->gcr3_tbl == NULL)
+               return -ENOMEM;
+
+       domain->glx      = levels;
+       domain->flags   |= PD_IOMMUV2_MASK;
+
+       amd_iommu_domain_update(domain);
+
+       return 0;
+}
+
 static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
                          struct protection_domain *domain, bool ats, bool ppr)
 {
@@ -1607,10 +1737,8 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
        if (ats)
                flags |= DTE_FLAG_IOTLB;
 
-       if (ppr) {
-               if (iommu_feature(iommu, FEATURE_EPHSUP))
-                       pte_root |= 1ULL << DEV_ENTRY_PPR;
-       }
+       if (ppr)
+               pte_root |= 1ULL << DEV_ENTRY_PPR;
 
        if (domain->dirty_tracking)
                pte_root |= DTE_FLAG_HAD;
@@ -1690,7 +1818,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        iommu = rlookup_amd_iommu(dev_data->dev);
        if (!iommu)
                return;
-       ats   = dev_data->ats.enabled;
+       ats   = dev_data->ats_enabled;
 
        /* Update data structures */
        dev_data->domain = domain;
@@ -1706,7 +1834,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
 
        /* Update device table */
        set_dte_entry(iommu, dev_data->devid, domain,
-                     ats, dev_data->iommu_v2);
+                     ats, dev_data->ppr);
        clone_aliases(iommu, dev_data->dev);
 
        device_flush_dte(dev_data);
@@ -1741,48 +1869,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
        domain->dev_cnt                 -= 1;
 }
 
-static void pdev_iommuv2_disable(struct pci_dev *pdev)
-{
-       pci_disable_ats(pdev);
-       pci_disable_pri(pdev);
-       pci_disable_pasid(pdev);
-}
-
-static int pdev_pri_ats_enable(struct pci_dev *pdev)
-{
-       int ret;
-
-       /* Only allow access to user-accessible pages */
-       ret = pci_enable_pasid(pdev, 0);
-       if (ret)
-               return ret;
-
-       /* First reset the PRI state of the device */
-       ret = pci_reset_pri(pdev);
-       if (ret)
-               goto out_err_pasid;
-
-       /* Enable PRI */
-       /* FIXME: Hardcode number of outstanding requests for now */
-       ret = pci_enable_pri(pdev, 32);
-       if (ret)
-               goto out_err_pasid;
-
-       ret = pci_enable_ats(pdev, PAGE_SHIFT);
-       if (ret)
-               goto out_err_pri;
-
-       return 0;
-
-out_err_pri:
-       pci_disable_pri(pdev);
-
-out_err_pasid:
-       pci_disable_pasid(pdev);
-
-       return ret;
-}
-
 /*
  * If a device is not yet associated with a domain, this function makes the
  * device visible in the domain
@@ -1791,9 +1877,8 @@ static int attach_device(struct device *dev,
                         struct protection_domain *domain)
 {
        struct iommu_dev_data *dev_data;
-       struct pci_dev *pdev;
        unsigned long flags;
-       int ret;
+       int ret = 0;
 
        spin_lock_irqsave(&domain->lock, flags);
 
@@ -1801,45 +1886,13 @@ static int attach_device(struct device *dev,
 
        spin_lock(&dev_data->lock);
 
-       ret = -EBUSY;
-       if (dev_data->domain != NULL)
+       if (dev_data->domain != NULL) {
+               ret = -EBUSY;
                goto out;
-
-       if (!dev_is_pci(dev))
-               goto skip_ats_check;
-
-       pdev = to_pci_dev(dev);
-       if (domain->flags & PD_IOMMUV2_MASK) {
-               struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
-
-               ret = -EINVAL;
-
-               /*
-                * In case of using AMD_IOMMU_V1 page table mode and the device
-                * is enabling for PPR/ATS support (using v2 table),
-                * we need to make sure that the domain type is identity map.
-                */
-               if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
-                   def_domain->type != IOMMU_DOMAIN_IDENTITY) {
-                       goto out;
-               }
-
-               if (dev_data->iommu_v2) {
-                       if (pdev_pri_ats_enable(pdev) != 0)
-                               goto out;
-
-                       dev_data->ats.enabled = true;
-                       dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
-                       dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
-               }
-       } else if (amd_iommu_iotlb_sup &&
-                  pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
-               dev_data->ats.enabled = true;
-               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
        }
 
-skip_ats_check:
-       ret = 0;
+       if (dev_is_pci(dev))
+               pdev_enable_caps(to_pci_dev(dev));
 
        do_attach(dev_data, domain);
 
@@ -1887,15 +1940,8 @@ static void detach_device(struct device *dev)
 
        do_detach(dev_data);
 
-       if (!dev_is_pci(dev))
-               goto out;
-
-       if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
-               pdev_iommuv2_disable(to_pci_dev(dev));
-       else if (dev_data->ats.enabled)
-               pci_disable_ats(to_pci_dev(dev));
-
-       dev_data->ats.enabled = false;
+       if (dev_is_pci(dev))
+               pdev_disable_caps(to_pci_dev(dev));
 
 out:
        spin_unlock(&dev_data->lock);
@@ -1985,7 +2031,7 @@ static void update_device_table(struct protection_domain *domain)
                if (!iommu)
                        continue;
                set_dte_entry(iommu, dev_data->devid, domain,
-                             dev_data->ats.enabled, dev_data->iommu_v2);
+                             dev_data->ats_enabled, dev_data->ppr);
                clone_aliases(iommu, dev_data->dev);
        }
 }
@@ -2019,9 +2065,11 @@ void amd_iommu_domain_update(struct protection_domain *domain)
 static void cleanup_domain(struct protection_domain *domain)
 {
        struct iommu_dev_data *entry;
-       unsigned long flags;
 
-       spin_lock_irqsave(&domain->lock, flags);
+       lockdep_assert_held(&domain->lock);
+
+       if (!domain->dev_cnt)
+               return;
 
        while (!list_empty(&domain->dev_list)) {
                entry = list_first_entry(&domain->dev_list,
@@ -2029,8 +2077,7 @@ static void cleanup_domain(struct protection_domain *domain)
                BUG_ON(!entry->domain);
                do_detach(entry);
        }
-
-       spin_unlock_irqrestore(&domain->lock, flags);
+       WARN_ON(domain->dev_cnt != 0);
 }
 
 static void protection_domain_free(struct protection_domain *domain)
@@ -2041,6 +2088,12 @@ static void protection_domain_free(struct protection_domain *domain)
        if (domain->iop.pgtbl_cfg.tlb)
                free_io_pgtable_ops(&domain->iop.iop.ops);
 
+       if (domain->flags & PD_IOMMUV2_MASK)
+               free_gcr3_table(domain);
+
+       if (domain->iop.root)
+               free_page((unsigned long)domain->iop.root);
+
        if (domain->id)
                domain_id_free(domain->id);
 
@@ -2053,18 +2106,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
 
        BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
 
-       spin_lock_init(&domain->lock);
-       domain->id = domain_id_alloc();
-       if (!domain->id)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&domain->dev_list);
-
        if (mode != PAGE_MODE_NONE) {
                pt_root = (void *)get_zeroed_page(GFP_KERNEL);
-               if (!pt_root) {
-                       domain_id_free(domain->id);
+               if (!pt_root)
                        return -ENOMEM;
-               }
        }
 
        amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@@ -2074,20 +2119,12 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
 
 static int protection_domain_init_v2(struct protection_domain *domain)
 {
-       spin_lock_init(&domain->lock);
-       domain->id = domain_id_alloc();
-       if (!domain->id)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&domain->dev_list);
-
        domain->flags |= PD_GIOV_MASK;
 
        domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
 
-       if (domain_enable_v2(domain, 1)) {
-               domain_id_free(domain->id);
+       if (setup_gcr3_table(domain, 1))
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -2097,57 +2134,60 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
        struct io_pgtable_ops *pgtbl_ops;
        struct protection_domain *domain;
        int pgtable;
-       int mode = DEFAULT_PGTABLE_LEVEL;
        int ret;
 
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return NULL;
+
+       domain->id = domain_id_alloc();
+       if (!domain->id)
+               goto out_err;
+
+       spin_lock_init(&domain->lock);
+       INIT_LIST_HEAD(&domain->dev_list);
+       domain->nid = NUMA_NO_NODE;
+
+       switch (type) {
+       /* No need to allocate io pgtable ops in passthrough mode */
+       case IOMMU_DOMAIN_IDENTITY:
+               return domain;
+       case IOMMU_DOMAIN_DMA:
+               pgtable = amd_iommu_pgtable;
+               break;
        /*
-        * Force IOMMU v1 page table when iommu=pt and
-        * when allocating domain for pass-through devices.
+        * Force IOMMU v1 page table when allocating
+        * domain for pass-through devices.
         */
-       if (type == IOMMU_DOMAIN_IDENTITY) {
-               pgtable = AMD_IOMMU_V1;
-               mode = PAGE_MODE_NONE;
-       } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+       case IOMMU_DOMAIN_UNMANAGED:
                pgtable = AMD_IOMMU_V1;
-       } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
-               pgtable = amd_iommu_pgtable;
-       } else {
-               return NULL;
+               break;
+       default:
+               goto out_err;
        }
 
-       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-       if (!domain)
-               return NULL;
-
        switch (pgtable) {
        case AMD_IOMMU_V1:
-               ret = protection_domain_init_v1(domain, mode);
+               ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
                break;
        case AMD_IOMMU_V2:
                ret = protection_domain_init_v2(domain);
                break;
        default:
                ret = -EINVAL;
+               break;
        }
 
        if (ret)
                goto out_err;
 
-       /* No need to allocate io pgtable ops in passthrough mode */
-       if (type == IOMMU_DOMAIN_IDENTITY)
-               return domain;
-
-       domain->nid = NUMA_NO_NODE;
-
        pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
-       if (!pgtbl_ops) {
-               domain_id_free(domain->id);
+       if (!pgtbl_ops)
                goto out_err;
-       }
 
        return domain;
 out_err:
-       kfree(domain);
+       protection_domain_free(domain);
        return NULL;
 }
 
@@ -2236,19 +2276,18 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
 static void amd_iommu_domain_free(struct iommu_domain *dom)
 {
        struct protection_domain *domain;
+       unsigned long flags;
 
-       domain = to_pdomain(dom);
+       if (!dom)
+               return;
 
-       if (domain->dev_cnt > 0)
-               cleanup_domain(domain);
+       domain = to_pdomain(dom);
 
-       BUG_ON(domain->dev_cnt != 0);
+       spin_lock_irqsave(&domain->lock, flags);
 
-       if (!dom)
-               return;
+       cleanup_domain(domain);
 
-       if (domain->flags & PD_IOMMUV2_MASK)
-               free_gcr3_table(domain);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        protection_domain_free(domain);
 }
@@ -2296,14 +2335,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
        return ret;
 }
 
-static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
-                                    unsigned long iova, size_t size)
+static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+                                   unsigned long iova, size_t size)
 {
        struct protection_domain *domain = to_pdomain(dom);
        struct io_pgtable_ops *ops = &domain->iop.iop.ops;
 
        if (ops->map_pages)
                domain_flush_np_cache(domain, iova, size);
+       return 0;
 }
 
 static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
@@ -2541,7 +2581,6 @@ bool amd_iommu_is_attach_deferred(struct device *dev)
 
        return dev_data->defer_attach;
 }
-EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
 
 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
@@ -2581,7 +2620,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
         *    and require remapping.
         *  - SNP is enabled, because it prohibits DTE[Mode]=0.
         */
-       if (dev_data->iommu_v2 &&
+       if (pdev_pasid_supported(dev_data) &&
            !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
            !amd_iommu_snp_en) {
                return IOMMU_DOMAIN_IDENTITY;
@@ -2626,93 +2665,6 @@ const struct iommu_ops amd_iommu_ops = {
        }
 };
 
-/*****************************************************************************
- *
- * The next functions do a basic initialization of IOMMU for pass through
- * mode
- *
- * In passthrough mode the IOMMU is initialized and enabled but not used for
- * DMA-API translation.
- *
- *****************************************************************************/
-
-/* IOMMUv2 specific functions */
-int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
-
-int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
-
-void amd_iommu_domain_direct_map(struct iommu_domain *dom)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       if (domain->iop.pgtbl_cfg.tlb)
-               free_io_pgtable_ops(&domain->iop.iop.ops);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-}
-EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int domain_enable_v2(struct protection_domain *domain, int pasids)
-{
-       int levels;
-
-       /* Number of GCR3 table levels required */
-       for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
-               levels += 1;
-
-       if (levels > amd_iommu_max_glx_val)
-               return -EINVAL;
-
-       domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
-       if (domain->gcr3_tbl == NULL)
-               return -ENOMEM;
-
-       domain->glx      = levels;
-       domain->flags   |= PD_IOMMUV2_MASK;
-
-       amd_iommu_domain_update(domain);
-
-       return 0;
-}
-
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
-{
-       struct protection_domain *pdom = to_pdomain(dom);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&pdom->lock, flags);
-
-       /*
-        * Save us all sanity checks whether devices already in the
-        * domain support IOMMUv2. Just force that the domain has no
-        * devices attached when it is switched into IOMMUv2 mode.
-        */
-       ret = -EBUSY;
-       if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
-               goto out;
-
-       if (!pdom->gcr3_tbl)
-               ret = domain_enable_v2(pdom, pasids);
-
-out:
-       spin_unlock_irqrestore(&pdom->lock, flags);
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
-
 static int __flush_pasid(struct protection_domain *domain, u32 pasid,
                         u64 address, bool size)
 {
@@ -2750,10 +2702,10 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
                   There might be non-IOMMUv2 capable devices in an IOMMUv2
                 * domain.
                 */
-               if (!dev_data->ats.enabled)
+               if (!dev_data->ats_enabled)
                        continue;
 
-               qdep  = dev_data->ats.qdep;
+               qdep  = dev_data->ats_qdep;
                iommu = rlookup_amd_iommu(dev_data->dev);
                if (!iommu)
                        continue;
@@ -2794,7 +2746,6 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_flush_page);
 
 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
 {
@@ -2814,7 +2765,6 @@ int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_flush_tlb);
 
 static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
 {
@@ -2894,7 +2844,6 @@ int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
 
 int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
 {
@@ -2908,7 +2857,6 @@ int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
 
 int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
                           int status, int tag)
@@ -2927,49 +2875,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
 
        return iommu_queue_command(iommu, &cmd);
 }
-EXPORT_SYMBOL(amd_iommu_complete_ppr);
-
-int amd_iommu_device_info(struct pci_dev *pdev,
-                          struct amd_iommu_device_info *info)
-{
-       int max_pasids;
-       int pos;
-
-       if (pdev == NULL || info == NULL)
-               return -EINVAL;
-
-       if (!amd_iommu_v2_supported())
-               return -EINVAL;
-
-       memset(info, 0, sizeof(*info));
-
-       if (pci_ats_supported(pdev))
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (pos)
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (pos) {
-               int features;
-
-               max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
-               max_pasids = min(max_pasids, (1 << 20));
-
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
-               info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
-
-               features = pci_pasid_features(pdev);
-               if (features & PCI_PASID_CAP_EXEC)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
-               if (features & PCI_PASID_CAP_PRIV)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_device_info);
 
 #ifdef CONFIG_IRQ_REMAP