* Note:
* The old value for GCR3 table and GPT have been cleared from caller.
*/
-static void set_dte_gcr3_table(struct amd_iommu *iommu,
- struct iommu_dev_data *dev_data,
- struct dev_table_entry *target)
+static void set_dte_gcr3_table(struct iommu_dev_data *dev_data,
+ struct dev_table_entry *new)
{
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
- u64 gcr3;
+ u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
- if (!gcr3_info->gcr3_tbl)
- return;
-
- pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n",
- __func__, dev_data->devid, gcr3_info->glx,
- (unsigned long long)gcr3_info->gcr3_tbl);
-
- gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+ new->data[0] |= DTE_FLAG_TV |
+ (dev_data->ppr ? DTE_FLAG_PPR : 0) |
+ (pdom_is_v2_pgtbl_mode(dev_data->domain) ? DTE_FLAG_GIOV : 0) |
+ DTE_FLAG_GV |
+ FIELD_PREP(DTE_GLX, gcr3_info->glx) |
+ FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12) |
+ DTE_FLAG_IR | DTE_FLAG_IW;
- target->data[0] |= DTE_FLAG_GV |
- FIELD_PREP(DTE_GLX, gcr3_info->glx) |
- FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12);
- if (pdom_is_v2_pgtbl_mode(dev_data->domain))
- target->data[0] |= DTE_FLAG_GIOV;
-
- target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
- FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
+ new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, dev_data->gcr3_info.domid) |
+ FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) |
+ (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0) |
+ FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31);
/* Guest page table can only support 4 and 5 levels */
if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL)
- target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
+ new->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL);
else
- target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
+ new->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL);
+}
+
+void amd_iommu_set_dte_v1(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain, u16 domid,
+ struct pt_iommu_amdv1_hw_info *pt_info,
+ struct dev_table_entry *new)
+{
+ u64 host_pt_root = __sme_set(pt_info->host_pt_root);
+
+ /* Note Dirty tracking is used for v1 table only for now */
+ new->data[0] |= DTE_FLAG_TV |
+ FIELD_PREP(DTE_MODE_MASK, pt_info->mode) |
+ (domain->dirty_tracking ? DTE_FLAG_HAD : 0) |
+ FIELD_PREP(DTE_HOST_TRP, host_pt_root >> 12) |
+ DTE_FLAG_IR | DTE_FLAG_IW;
+
+ new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domid) |
+ (dev_data->ats_enabled ? DTE_FLAG_IOTLB : 0);
+}
+
+static void set_dte_v1(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain, u16 domid,
+ phys_addr_t top_paddr, unsigned int top_level,
+ struct dev_table_entry *new)
+{
+ struct pt_iommu_amdv1_hw_info pt_info;
+
+ /*
+ * When updating the IO pagetable, the new top and level
+ * are provided as parameters. For other operations i.e.
+ * device attach, retrieve the current pagetable info
+ * via the IOMMU PT API.
+ */
+ if (top_paddr) {
+ pt_info.host_pt_root = top_paddr;
+ pt_info.mode = top_level + 1;
+ } else {
+ WARN_ON(top_paddr || top_level);
+ pt_iommu_amdv1_hw_info(&domain->amdv1, &pt_info);
+ }
+
+ amd_iommu_set_dte_v1(dev_data, domain, domid, &pt_info, new);
+}
+
+static void set_dte_passthrough(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain,
+ struct dev_table_entry *new)
+{
+ new->data[0] |= DTE_FLAG_TV | DTE_FLAG_IR | DTE_FLAG_IW;
+
+ new->data[1] |= FIELD_PREP(DTE_DOMID_MASK, domain->id) |
+ (dev_data->ats_enabled) ? DTE_FLAG_IOTLB : 0;
}
static void set_dte_entry(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data,
phys_addr_t top_paddr, unsigned int top_level)
{
- u16 domid;
u32 old_domid;
struct dev_table_entry new = {};
struct protection_domain *domain = dev_data->domain;
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
- struct pt_iommu_amdv1_hw_info pt_info;
amd_iommu_make_clear_dte(dev_data, &new);
- if (gcr3_info && gcr3_info->gcr3_tbl)
- domid = dev_data->gcr3_info.domid;
- else {
- domid = domain->id;
-
- if (domain->domain.type & __IOMMU_DOMAIN_PAGING) {
- /*
- * When updating the IO pagetable, the new top and level
- * are provided as parameters. For other operations i.e.
- * device attach, retrieve the current pagetable info
- * via the IOMMU PT API.
- */
- if (top_paddr) {
- pt_info.host_pt_root = top_paddr;
- pt_info.mode = top_level + 1;
- } else {
- WARN_ON(top_paddr || top_level);
- pt_iommu_amdv1_hw_info(&domain->amdv1,
- &pt_info);
- }
-
- new.data[0] |= __sme_set(pt_info.host_pt_root) |
- (pt_info.mode & DEV_ENTRY_MODE_MASK)
- << DEV_ENTRY_MODE_SHIFT;
- }
- }
-
- new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
-
- /*
- * When SNP is enabled, we can only support TV=1 with non-zero domain ID.
- * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in
- * do_iommu_domain_alloc().
- */
- WARN_ON(amd_iommu_snp_en && (domid == 0));
- new.data[0] |= DTE_FLAG_TV;
-
- if (dev_data->ppr)
- new.data[0] |= 1ULL << DEV_ENTRY_PPR;
-
- if (domain->dirty_tracking)
- new.data[0] |= DTE_FLAG_HAD;
-
- if (dev_data->ats_enabled)
- new.data[1] |= DTE_FLAG_IOTLB;
-
old_domid = READ_ONCE(dte->data[1]) & DTE_DOMID_MASK;
- new.data[1] |= domid;
-
- set_dte_gcr3_table(iommu, dev_data, &new);
+ if (gcr3_info->gcr3_tbl)
+ set_dte_gcr3_table(dev_data, &new);
+ else if (domain->domain.type == IOMMU_DOMAIN_IDENTITY)
+ set_dte_passthrough(dev_data, domain, &new);
+ else if ((domain->domain.type & __IOMMU_DOMAIN_PAGING) &&
+ domain->pd_mode == PD_MODE_V1)
+ set_dte_v1(dev_data, domain, domain->id, top_paddr, top_level, &new);
+ else
+ WARN_ON(true);
amd_iommu_update_dte(iommu, dev_data, &new);