return 0;
}
+static int arm_smmu_stream_id_cmp(const void *_l, const void *_r)
+{
+ const typeof_member(struct arm_smmu_stream, id) *l = _l;
+ const typeof_member(struct arm_smmu_stream, id) *r = _r;
+
+ return cmp_int(*l, *r);
+}
+
static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
struct arm_smmu_master *master)
{
int i;
int ret = 0;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+ bool ats_supported = dev_is_pci(master->dev) &&
+ pci_ats_supported(to_pci_dev(master->dev));
master->streams = kzalloc_objs(*master->streams, fwspec->num_ids);
if (!master->streams)
return -ENOMEM;
master->num_streams = fwspec->num_ids;
- mutex_lock(&smmu->streams_mutex);
+ if (!ats_supported) {
+ /* Base case has 1 ASID entry or maximum 2 VMID entries */
+ master->build_invs = arm_smmu_invs_alloc(2);
+ } else {
+ /* ATS case adds num_ids of entries, on top of the base case */
+ master->build_invs = arm_smmu_invs_alloc(2 + fwspec->num_ids);
+ }
+ if (!master->build_invs) {
+ kfree(master->streams);
+ return -ENOMEM;
+ }
+
for (i = 0; i < fwspec->num_ids; i++) {
struct arm_smmu_stream *new_stream = &master->streams[i];
- struct rb_node *existing;
- u32 sid = fwspec->ids[i];
- new_stream->id = sid;
+ new_stream->id = fwspec->ids[i];
new_stream->master = master;
+ }
+
+ /* Put the ids into order for sorted to_merge/to_unref arrays */
+ sort_nonatomic(master->streams, master->num_streams,
+ sizeof(master->streams[0]), arm_smmu_stream_id_cmp,
+ NULL);
+
+ mutex_lock(&smmu->streams_mutex);
+ for (i = 0; i < fwspec->num_ids; i++) {
+ struct arm_smmu_stream *new_stream = &master->streams[i];
+ struct rb_node *existing;
+ u32 sid = new_stream->id;
ret = arm_smmu_init_sid_strtab(smmu, sid);
if (ret)
for (i--; i >= 0; i--)
rb_erase(&master->streams[i].node, &smmu->streams);
kfree(master->streams);
+ kfree(master->build_invs);
}
mutex_unlock(&smmu->streams_mutex);
mutex_unlock(&smmu->streams_mutex);
kfree(master->streams);
+ kfree(master->build_invs);
}
static struct iommu_device *arm_smmu_probe_device(struct device *dev)
struct arm_smmu_device *smmu;
struct device *dev;
struct arm_smmu_stream *streams;
+ /*
+ * Scratch memory for a to_merge or to_unref array to build a per-domain
+ * invalidation array. It'll be pre-allocated with enough enries for all
+ * possible build scenarios. It can be used by only one caller at a time
+ * until the arm_smmu_invs_merge/unref() finishes. Must be locked by the
+ * iommu_group mutex.
+ */
+ struct arm_smmu_invs *build_invs;
struct arm_smmu_vmaster *vmaster; /* use smmu->streams_mutex */
/* Locked by the iommu core using the group mutex */
struct arm_smmu_ctx_desc_cfg cd_table;