}
static void qi_batch_add_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid,
- u64 addr, unsigned long npages, bool ih,
+ u64 addr, unsigned int size_order, bool ih,
struct qi_batch *batch)
{
- if (!npages)
- return;
-
- qi_desc_piotlb(did, pasid, addr, npages, ih, &batch->descs[batch->index]);
+ qi_desc_piotlb(did, pasid, addr, size_order, ih,
+ &batch->descs[batch->index]);
qi_batch_increment_index(iommu, batch);
}
tag->pasid, domain->qi_batch);
else
qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid,
- addr, pages, ih, domain->qi_batch);
+ addr, mask, ih, domain->qi_batch);
return;
}
/* Page-selective-within-PASID IOTLB invalidation */
static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
- unsigned long npages, bool ih,
+ unsigned int size_order, bool ih,
struct qi_desc *desc)
{
- int mask = ilog2(__roundup_pow_of_two(npages));
- unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
-
- if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
- addr = ALIGN_DOWN(addr, align);
-
+ /*
+ * calculate_psi_aligned_address() must be used for addr and size_order
+ */
desc->qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
desc->qw1 = QI_EIOTLB_ADDR(addr) | QI_EIOTLB_IH(ih) |
- QI_EIOTLB_AM(mask);
+ QI_EIOTLB_AM(size_order);
}
static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,