void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size);
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
+ size_t size = pgcount << __ffs(pgsize);
+ unsigned long o_iova = iova;
BUG_ON(!IS_ALIGNED(iova, pgsize));
BUG_ON(!IS_ALIGNED(paddr, pgsize));
* Updates and flushing already happened in
* increase_address_space().
*/
- amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_pages(dom, o_iova, size);
spin_unlock_irqrestore(&dom->lock, flags);
}
WARN_ON(ret);
}
-static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size)
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
{
if (likely(!amd_iommu_np_cache)) {
__domain_flush_pages(domain, address, size);
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+static void amd_iommu_domain_flush_all(struct protection_domain *domain)
{
- domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
+ amd_iommu_domain_flush_pages(domain, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- domain_flush_pages(domain, iova, size);
+ amd_iommu_domain_flush_pages(domain, iova, size);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
device_flush_dte(dev_data);
/* Flush IOTLB and wait for the flushes to finish */
- amd_iommu_domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_all(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_tlb_pde(domain);
+ amd_iommu_domain_flush_all(domain);
}
/*****************************************************************************
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
if (domain_flush)
- amd_iommu_domain_flush_tlb_pde(pdomain);
+ amd_iommu_domain_flush_all(pdomain);
pdomain->dirty_tracking = enable;
spin_unlock_irqrestore(&pdomain->lock, flags);
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- amd_iommu_domain_flush_tlb_pde(dom);
+ amd_iommu_domain_flush_all(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_pages(dom, gather->start, gather->end - gather->start + 1);
+ amd_iommu_domain_flush_pages(dom, gather->start,
+ gather->end - gather->start + 1);
spin_unlock_irqrestore(&dom->lock, flags);
}