#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "trace.h"
#include "hw/arm/smmuv3.h"
#include "hw/core/iommu.h"
#include "hw/pci-host/gpex.h"
#include "hw/vfio/pci.h"
+#include "smmuv3-internal.h"
#include "smmuv3-accel.h"
/*
return accel_dev;
}
+static uint32_t smmuv3_accel_gbpa_hwpt(SMMUv3State *s, SMMUv3AccelState *accel)
+{
+ return FIELD_EX32(s->gbpa, GBPA, ABORT) ?
+ accel->abort_hwpt_id : accel->bypass_hwpt_id;
+}
+
+static bool
+smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
+ Error **errp)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ struct iommu_hwpt_arm_smmuv3 bypass_data = {
+ .ste = { SMMU_STE_CFG_BYPASS | SMMU_STE_VALID, 0x0ULL },
+ };
+ struct iommu_hwpt_arm_smmuv3 abort_data = {
+ .ste = { SMMU_STE_VALID, 0x0ULL },
+ };
+ uint32_t s2_hwpt_id = idev->hwpt_id;
+ uint32_t viommu_id, hwpt_id;
+ IOMMUFDViommu *viommu;
+
+ if (!iommufd_backend_alloc_viommu(idev->iommufd, idev->devid,
+ IOMMU_VIOMMU_TYPE_ARM_SMMUV3,
+ s2_hwpt_id, &viommu_id, errp)) {
+ return false;
+ }
+
+ viommu = g_new0(IOMMUFDViommu, 1);
+ viommu->viommu_id = viommu_id;
+ viommu->s2_hwpt_id = s2_hwpt_id;
+ viommu->iommufd = idev->iommufd;
+
+ /*
+ * Pre-allocate HWPTs for S1 bypass and abort cases. These will be attached
+ * later for guest STEs or GBPAs that require bypass or abort configuration.
+ */
+ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, viommu_id,
+ 0, IOMMU_HWPT_DATA_ARM_SMMUV3,
+ sizeof(abort_data), &abort_data,
+ &accel->abort_hwpt_id, errp)) {
+ goto free_viommu;
+ }
+
+ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, viommu_id,
+ 0, IOMMU_HWPT_DATA_ARM_SMMUV3,
+ sizeof(bypass_data), &bypass_data,
+ &accel->bypass_hwpt_id, errp)) {
+ goto free_abort_hwpt;
+ }
+
+ /* Attach a HWPT based on SMMUv3 GBPA.ABORT value */
+ hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
+ if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
+ goto free_bypass_hwpt;
+ }
+ accel->viommu = viommu;
+ return true;
+
+free_bypass_hwpt:
+ iommufd_backend_free_id(idev->iommufd, accel->bypass_hwpt_id);
+free_abort_hwpt:
+ iommufd_backend_free_id(idev->iommufd, accel->abort_hwpt_id);
+free_viommu:
+ iommufd_backend_free_id(idev->iommufd, viommu->viommu_id);
+ g_free(viommu);
+ return false;
+}
+
+static bool smmuv3_accel_set_iommu_device(PCIBus *bus, void *opaque, int devfn,
+ HostIOMMUDevice *hiod, Error **errp)
+{
+ HostIOMMUDeviceIOMMUFD *idev = HOST_IOMMU_DEVICE_IOMMUFD(hiod);
+ SMMUState *bs = opaque;
+ SMMUv3State *s = ARM_SMMUV3(bs);
+ SMMUPciBus *sbus = smmu_get_sbus(bs, bus);
+ SMMUv3AccelDevice *accel_dev = smmuv3_accel_get_dev(bs, sbus, bus, devfn);
+
+ if (!idev) {
+ return true;
+ }
+
+ if (accel_dev->idev) {
+ if (accel_dev->idev != idev) {
+ error_setg(errp, "Device already has an associated idev 0x%x",
+ idev->devid);
+ return false;
+ }
+ return true;
+ }
+
+ if (s->s_accel->viommu) {
+ goto done;
+ }
+
+ if (!smmuv3_accel_alloc_viommu(s, idev, errp)) {
+ error_append_hint(errp, "Unable to alloc vIOMMU: idev devid 0x%x: ",
+ idev->devid);
+ return false;
+ }
+
+done:
+ accel_dev->idev = idev;
+ accel_dev->s_accel = s->s_accel;
+ QLIST_INSERT_HEAD(&s->s_accel->device_list, accel_dev, next);
+ trace_smmuv3_accel_set_iommu_device(devfn, idev->devid);
+ return true;
+}
+
+static void smmuv3_accel_unset_iommu_device(PCIBus *bus, void *opaque,
+ int devfn)
+{
+ SMMUState *bs = opaque;
+ SMMUPciBus *sbus = g_hash_table_lookup(bs->smmu_pcibus_by_busptr, bus);
+ HostIOMMUDeviceIOMMUFD *idev;
+ SMMUv3AccelDevice *accel_dev;
+ SMMUv3AccelState *accel;
+ SMMUDevice *sdev;
+
+ if (!sbus) {
+ return;
+ }
+
+ sdev = sbus->pbdev[devfn];
+ if (!sdev) {
+ return;
+ }
+
+ accel_dev = container_of(sdev, SMMUv3AccelDevice, sdev);
+ idev = accel_dev->idev;
+ accel = accel_dev->s_accel;
+ /* Re-attach the default s2 hwpt id */
+ if (!host_iommu_device_iommufd_attach_hwpt(idev, idev->hwpt_id, NULL)) {
+ error_report("Unable to attach the default HW pagetable: idev devid "
+ "0x%x", idev->devid);
+ }
+
+ accel_dev->idev = NULL;
+ accel_dev->s_accel = NULL;
+ QLIST_REMOVE(accel_dev, next);
+ trace_smmuv3_accel_unset_iommu_device(devfn, idev->devid);
+
+ if (QLIST_EMPTY(&accel->device_list)) {
+ iommufd_backend_free_id(accel->viommu->iommufd, accel->bypass_hwpt_id);
+ iommufd_backend_free_id(accel->viommu->iommufd, accel->abort_hwpt_id);
+ iommufd_backend_free_id(accel->viommu->iommufd,
+ accel->viommu->viommu_id);
+ g_free(accel->viommu);
+ accel->viommu = NULL;
+ }
+}
+
/*
* Only allow PCIe bridges, pxb-pcie roots, and GPEX roots so vfio-pci
* endpoints can sit downstream. Accelerated SMMUv3 requires a vfio-pci
.supports_address_space = smmuv3_accel_supports_as,
.get_address_space = smmuv3_accel_find_add_as,
.get_viommu_flags = smmuv3_accel_get_viommu_flags,
+ .set_iommu_device = smmuv3_accel_set_iommu_device,
+ .unset_iommu_device = smmuv3_accel_unset_iommu_device,
};
static void smmuv3_accel_as_init(SMMUv3State *s)
{
SMMUState *bs = ARM_SMMU(s);
+ s->s_accel = g_new0(SMMUv3AccelState, 1);
bs->iommu_ops = &smmuv3_accel_ops;
smmuv3_accel_as_init(s);
}