QLIST_ENTRY(AMDVIAddressSpace) next;
/* Record DMA translation ranges */
IOVATree *iova_tree;
+ /* DMA address translation active */
+ bool addr_translation;
};
/* AMDVI cache entry */
}
}
+/*
+ * Toggle between address translation and passthrough modes by enabling the
+ * corresponding memory regions.
+ */
+static void amdvi_switch_address_space(AMDVIAddressSpace *amdvi_as)
+{
+ if (amdvi_as->addr_translation) {
+ /* Enabling DMA region */
+ memory_region_set_enabled(&amdvi_as->iommu_nodma, false);
+ memory_region_set_enabled(MEMORY_REGION(&amdvi_as->iommu), true);
+ } else {
+ /* Disabling DMA region, using passthrough */
+ memory_region_set_enabled(MEMORY_REGION(&amdvi_as->iommu), false);
+ memory_region_set_enabled(&amdvi_as->iommu_nodma, true);
+ }
+}
+
/* log error without aborting since linux seems to be using reserved bits */
static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
{
iommu_as[devfn]->iommu_state = s;
iommu_as[devfn]->notifier_flags = IOMMU_NOTIFIER_NONE;
iommu_as[devfn]->iova_tree = iova_tree_new();
+ iommu_as[devfn]->addr_translation = false;
amdvi_dev_as = iommu_as[devfn];
AMDVI_INT_ADDR_FIRST,
&amdvi_dev_as->iommu_ir, 1);
- memory_region_set_enabled(&amdvi_dev_as->iommu_nodma, false);
- memory_region_set_enabled(MEMORY_REGION(&amdvi_dev_as->iommu), true);
+ amdvi_switch_address_space(amdvi_dev_as);
}
return &iommu_as[devfn]->as;
}