spin_lock_init(&domain->lock);
spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);
+ INIT_LIST_HEAD(&domain->s1_domains);
+ spin_lock_init(&domain->s1_lock);
domain->nid = dev_to_node(dev);
domain->use_first_level = first_stage;
+ domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+ domain->domain.ops = intel_iommu_ops.default_domain_ops;
+
/* calculate the address width */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
}
static struct iommu_domain *
-intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
- const struct iommu_user_data *user_data)
+intel_iommu_domain_alloc_first_stage(struct device *dev,
+ struct intel_iommu *iommu, u32 flags)
+{
+ struct dmar_domain *dmar_domain;
+
+ if (flags & ~IOMMU_HWPT_ALLOC_PASID)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Only SL is available in legacy mode */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dmar_domain = paging_domain_alloc(dev, true);
+ if (IS_ERR(dmar_domain))
+ return ERR_CAST(dmar_domain);
+ return &dmar_domain->domain;
+}
+
+static struct iommu_domain *
+intel_iommu_domain_alloc_second_stage(struct device *dev,
+ struct intel_iommu *iommu, u32 flags)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
- struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
- bool first_stage;
if (flags &
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID)))
return ERR_PTR(-EOPNOTSUPP);
- if (nested_parent && !nested_supported(iommu))
- return ERR_PTR(-EOPNOTSUPP);
- if (user_data || (dirty_tracking && !ssads_supported(iommu)))
+
+ if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
+ !nested_supported(iommu)) ||
+ ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
+ !ssads_supported(iommu)))
return ERR_PTR(-EOPNOTSUPP);
- /*
- * Always allocate the guest compatible page table unless
- * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING
- * is specified.
- */
- if (nested_parent || dirty_tracking) {
- if (!sm_supported(iommu) || !ecap_slts(iommu->ecap))
- return ERR_PTR(-EOPNOTSUPP);
- first_stage = false;
- } else {
- first_stage = first_level_by_default(iommu);
- }
+ /* Legacy mode always supports second stage */
+ if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
+ return ERR_PTR(-EOPNOTSUPP);
- dmar_domain = paging_domain_alloc(dev, first_stage);
+ dmar_domain = paging_domain_alloc(dev, false);
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
- domain = &dmar_domain->domain;
- domain->type = IOMMU_DOMAIN_UNMANAGED;
- domain->owner = &intel_iommu_ops;
- domain->ops = intel_iommu_ops.default_domain_ops;
-
- if (nested_parent) {
- dmar_domain->nested_parent = true;
- INIT_LIST_HEAD(&dmar_domain->s1_domains);
- spin_lock_init(&dmar_domain->s1_lock);
- }
- if (dirty_tracking) {
- if (dmar_domain->use_first_level) {
- iommu_domain_free(domain);
- return ERR_PTR(-EOPNOTSUPP);
- }
- domain->dirty_ops = &intel_dirty_ops;
- }
+ dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
- return domain;
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ dmar_domain->domain.dirty_ops = &intel_dirty_ops;
+
+ return &dmar_domain->domain;
+}
+
+static struct iommu_domain *
+intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
+ const struct iommu_user_data *user_data)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct iommu_domain *domain;
+
+ if (user_data)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Prefer first stage if possible by default. */
+ domain = intel_iommu_domain_alloc_first_stage(dev, iommu, flags);
+ if (domain != ERR_PTR(-EOPNOTSUPP))
+ return domain;
+ return intel_iommu_domain_alloc_second_stage(dev, iommu, flags);
}
static void intel_iommu_domain_free(struct iommu_domain *domain)