accel->abort_hwpt_id : accel->bypass_hwpt_id;
}
+static bool
+smmuv3_accel_alloc_vdev(SMMUv3AccelDevice *accel_dev, int sid, Error **errp)
+{
+ SMMUv3AccelState *accel = accel_dev->s_accel;
+ HostIOMMUDeviceIOMMUFD *idev = accel_dev->idev;
+ IOMMUFDVdev *vdev = accel_dev->vdev;
+ uint32_t vdevice_id;
+
+ if (!idev || vdev) {
+ return true;
+ }
+
+ if (!iommufd_backend_alloc_vdev(idev->iommufd, idev->devid,
+ accel->viommu->viommu_id, sid,
+ &vdevice_id, errp)) {
+ return false;
+ }
+
+ vdev = g_new(IOMMUFDVdev, 1);
+ vdev->vdevice_id = vdevice_id;
+ vdev->virt_id = sid;
+ accel_dev->vdev = vdev;
+ return true;
+}
+
+static SMMUS1Hwpt *
+smmuv3_accel_dev_alloc_translate(SMMUv3AccelDevice *accel_dev, STE *ste,
+ Error **errp)
+{
+ uint64_t ste_0 = (uint64_t)ste->word[0] | (uint64_t)ste->word[1] << 32;
+ uint64_t ste_1 = (uint64_t)ste->word[2] | (uint64_t)ste->word[3] << 32;
+ HostIOMMUDeviceIOMMUFD *idev = accel_dev->idev;
+ SMMUv3AccelState *accel = accel_dev->s_accel;
+ struct iommu_hwpt_arm_smmuv3 nested_data = {
+ .ste = {
+ cpu_to_le64(ste_0 & STE0_MASK),
+ cpu_to_le64(ste_1 & STE1_MASK),
+ },
+ };
+ uint32_t hwpt_id = 0, flags = 0;
+ SMMUS1Hwpt *s1_hwpt;
+
+ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid,
+ accel->viommu->viommu_id, flags,
+ IOMMU_HWPT_DATA_ARM_SMMUV3,
+ sizeof(nested_data), &nested_data,
+ &hwpt_id, errp)) {
+ return NULL;
+ }
+
+ s1_hwpt = g_new0(SMMUS1Hwpt, 1);
+ s1_hwpt->hwpt_id = hwpt_id;
+ trace_smmuv3_accel_translate_ste(accel_dev->vdev->virt_id, hwpt_id,
+ nested_data.ste[1], nested_data.ste[0]);
+ return s1_hwpt;
+}
+
+bool smmuv3_accel_install_ste(SMMUv3State *s, SMMUDevice *sdev, int sid,
+ Error **errp)
+{
+ SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid,
+ .inval_ste_allowed = true};
+ SMMUv3AccelState *accel = s->s_accel;
+ SMMUv3AccelDevice *accel_dev;
+ HostIOMMUDeviceIOMMUFD *idev;
+ uint32_t config, hwpt_id = 0;
+ SMMUS1Hwpt *s1_hwpt = NULL;
+ const char *type;
+ STE ste;
+
+ if (!accel || !accel->viommu) {
+ return true;
+ }
+
+ accel_dev = container_of(sdev, SMMUv3AccelDevice, sdev);
+ if (!accel_dev->s_accel) {
+ return true;
+ }
+
+ idev = accel_dev->idev;
+ if (!smmuv3_accel_alloc_vdev(accel_dev, sid, errp)) {
+ return false;
+ }
+
+ if (smmu_find_ste(sdev->smmu, sid, &ste, &event)) {
+ /* No STE found, nothing to install */
+ return true;
+ }
+
+ /*
+ * Install the STE based on SMMU enabled/config:
+ * - attach a pre-allocated HWPT for abort/bypass
+ * - or a new HWPT for translate STE
+ *
+ * Note: The vdev remains associated with accel_dev even if HWPT
+ * attach/alloc fails, since the Guest–Host SID mapping stays
+ * valid as long as the device is behind the accelerated SMMUv3.
+ */
+ if (!smmu_enabled(s)) {
+ hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel);
+ } else {
+ config = STE_CONFIG(&ste);
+
+ if (!STE_VALID(&ste) || STE_CFG_ABORT(config)) {
+ hwpt_id = accel->abort_hwpt_id;
+ } else if (STE_CFG_BYPASS(config)) {
+ hwpt_id = accel->bypass_hwpt_id;
+ } else if (STE_CFG_S1_TRANSLATE(config)) {
+ s1_hwpt = smmuv3_accel_dev_alloc_translate(accel_dev, &ste, errp);
+ if (!s1_hwpt) {
+ return false;
+ }
+ hwpt_id = s1_hwpt->hwpt_id;
+ }
+ }
+
+ if (!hwpt_id) {
+ error_setg(errp, "Invalid STE config for sid 0x%x",
+ smmu_get_sid(&accel_dev->sdev));
+ return false;
+ }
+
+ if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) {
+ if (s1_hwpt) {
+ iommufd_backend_free_id(idev->iommufd, s1_hwpt->hwpt_id);
+ g_free(s1_hwpt);
+ }
+ return false;
+ }
+
+ /* Free the previous s1_hwpt */
+ if (accel_dev->s1_hwpt) {
+ iommufd_backend_free_id(idev->iommufd, accel_dev->s1_hwpt->hwpt_id);
+ g_free(accel_dev->s1_hwpt);
+ }
+
+ accel_dev->s1_hwpt = s1_hwpt;
+ if (hwpt_id == accel->abort_hwpt_id) {
+ type = "abort";
+ } else if (hwpt_id == accel->bypass_hwpt_id) {
+ type = "bypass";
+ } else {
+ type = "translate";
+ }
+
+ trace_smmuv3_accel_install_ste(sid, type, hwpt_id);
+ return true;
+}
+
+bool smmuv3_accel_install_ste_range(SMMUv3State *s, SMMUSIDRange *range,
+ Error **errp)
+{
+ SMMUv3AccelState *accel = s->s_accel;
+ SMMUv3AccelDevice *accel_dev;
+ Error *local_err = NULL;
+ bool all_ok = true;
+
+ if (!accel || !accel->viommu) {
+ return true;
+ }
+
+ QLIST_FOREACH(accel_dev, &accel->device_list, next) {
+ uint32_t sid = smmu_get_sid(&accel_dev->sdev);
+
+ if (sid >= range->start && sid <= range->end) {
+ if (!smmuv3_accel_install_ste(s, &accel_dev->sdev,
+ sid, &local_err)) {
+ error_append_hint(&local_err, "Device 0x%x: Failed to install "
+ "STE\n", sid);
+ error_report_err(local_err);
+ local_err = NULL;
+ all_ok = false;
+ }
+ }
+ }
+
+ if (!all_ok) {
+ error_setg(errp, "Failed to install all STEs properly");
+ }
+ return all_ok;
+}
+
static bool
smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev,
Error **errp)
HostIOMMUDeviceIOMMUFD *idev;
SMMUv3AccelDevice *accel_dev;
SMMUv3AccelState *accel;
+ IOMMUFDVdev *vdev;
SMMUDevice *sdev;
if (!sbus) {
"0x%x", idev->devid);
}
+ if (accel_dev->s1_hwpt) {
+ iommufd_backend_free_id(accel_dev->idev->iommufd,
+ accel_dev->s1_hwpt->hwpt_id);
+ g_free(accel_dev->s1_hwpt);
+ accel_dev->s1_hwpt = NULL;
+ }
+
+ vdev = accel_dev->vdev;
+ if (vdev) {
+ iommufd_backend_free_id(accel->viommu->iommufd, vdev->vdevice_id);
+ g_free(vdev);
+ accel_dev->vdev = NULL;
+ }
+
accel_dev->idev = NULL;
accel_dev->s_accel = NULL;
QLIST_REMOVE(accel_dev, next);
#define STE_CFG_S2_ENABLED(config) (config & 0x2)
#define STE_CFG_ABORT(config) (!(config & 0x4))
#define STE_CFG_BYPASS(config) (config == 0x4)
+#define STE_CFG_S1_TRANSLATE(config) (config == 0x5)
#define SMMU_STE_VALID (1ULL << 0)
#define SMMU_STE_CFG_BYPASS (1ULL << 3)
+#define STE0_V MAKE_64BIT_MASK(0, 1)
+#define STE0_CONFIG MAKE_64BIT_MASK(1, 3)
+#define STE0_S1FMT MAKE_64BIT_MASK(4, 2)
+#define STE0_CTXPTR MAKE_64BIT_MASK(6, 50)
+#define STE0_S1CDMAX MAKE_64BIT_MASK(59, 5)
+#define STE0_MASK (STE0_S1CDMAX | STE0_CTXPTR | STE0_S1FMT | STE0_CONFIG | \
+ STE0_V)
+
+#define STE1_S1DSS MAKE_64BIT_MASK(0, 2)
+#define STE1_S1CIR MAKE_64BIT_MASK(2, 2)
+#define STE1_S1COR MAKE_64BIT_MASK(4, 2)
+#define STE1_S1CSH MAKE_64BIT_MASK(6, 2)
+#define STE1_S1STALLD MAKE_64BIT_MASK(27, 1)
+#define STE1_EATS MAKE_64BIT_MASK(28, 2)
+#define STE1_MASK (STE1_EATS | STE1_S1STALLD | STE1_S1CSH | STE1_S1COR | \
+ STE1_S1CIR | STE1_S1DSS)
+
/* Update STE fields */
#define STE_SET_VALID(ste, v) \
((ste)->word[0] = FIELD_DP32((ste)->word[0], STE_0, VALID, (v)))