list_del(&unbound->unbound_next);
kfree(unbound);
}
+ iommu_group_put(group->iommu_group);
kfree(group);
}
atomic_set(&group->opened, 0);
init_waitqueue_head(&group->container_q);
group->iommu_group = iommu_group;
+ /* put in vfio_group_unlock_and_free() */
+ iommu_group_ref_get(iommu_group);
group->type = type;
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
group->nb.notifier_call = vfio_iommu_group_notifier;
ret = iommu_group_register_notifier(iommu_group, &group->nb);
if (ret) {
+ iommu_group_put(iommu_group);
kfree(group);
return ERR_PTR(ret);
}
list_add(&group->vfio_next, &vfio.group_list);
mutex_unlock(&vfio.group_lock);
-
return group;
}
static void vfio_group_release(struct kref *kref)
{
struct vfio_group *group = container_of(kref, struct vfio_group, kref);
- struct iommu_group *iommu_group = group->iommu_group;
/*
* These data structures all have paired operations that can only be
list_del(&group->vfio_next);
vfio_free_group_minor(group->minor);
vfio_group_unlock_and_free(group);
- iommu_group_put(iommu_group);
}
static void vfio_group_put(struct vfio_group *group)
ret = PTR_ERR(group);
goto out_remove_device;
}
-
+ iommu_group_put(iommu_group);
return group;
out_remove_device:
if (!iommu_group)
return ERR_PTR(-EINVAL);
- /* a found vfio_group already holds a reference to the iommu_group */
group = vfio_group_get_from_iommu(iommu_group);
- if (group)
- goto out_put;
-
- /* a newly created vfio_group keeps the reference. */
- group = vfio_create_group(iommu_group, VFIO_IOMMU);
- if (IS_ERR(group))
- goto out_put;
- return group;
+ if (!group)
+ group = vfio_create_group(iommu_group, VFIO_IOMMU);
-out_put:
+ /* The vfio_group holds a reference to the iommu_group */
iommu_group_put(iommu_group);
return group;
}