From: Matthew Auld Date: Tue, 20 Jan 2026 11:06:10 +0000 (+0000) Subject: drm/xe/uapi: disallow bind queue sharing X-Git-Tag: v6.19-rc7~34^2~2^2~4 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6f4b7aed61817624250e590ba0ef304146d34614;p=thirdparty%2Fkernel%2Flinux.git drm/xe/uapi: disallow bind queue sharing Currently this is very broken if someone attempts to create a bind queue and share it across multiple VMs. For example currently we assume it is safe to acquire the user VM lock to protect some of the bind queue state, but if allow sharing the bind queue with multiple VMs then this quickly breaks down. To fix this reject using a bind queue with any VM that is not the same VM that was originally passed when creating the bind queue. This a uAPI change, however this was more of an oversight on kernel side that we didn't reject this, and expectation is that userspace shouldn't be using bind queues in this way, so in theory this change should go unnoticed. Based on a patch from Matt Brost. v2 (Matt B): - Hold the vm lock over queue create, to ensure it can't be closed as we attach the user_vm to the queue. - Make sure we actually check for NULL user_vm in destruction path. v3: - Fix error path handling. Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Reported-by: Thomas Hellström Signed-off-by: Matthew Auld Cc: José Roberto de Souza Cc: Matthew Brost Cc: Michal Mrozek Cc: Carl Zhang Cc: # v6.8+ Acked-by: José Roberto de Souza Reviewed-by: Matthew Brost Reviewed-by: Arvind Yadav Acked-by: Michal Mrozek Link: https://patch.msgid.link/20260120110609.77958-3-matthew.auld@intel.com (cherry picked from commit 9dd08fdecc0c98d6516c2d2d1fa189c1332f8dab) Signed-off-by: Thomas Hellström --- diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 8724f8de67e2..779d7e7e2d2e 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -328,6 +328,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe * @xe: Xe device. * @tile: tile which bind exec queue belongs to. * @flags: exec queue creation flags + * @user_vm: The user VM which this exec queue belongs to * @extensions: exec queue creation extensions * * Normalize bind exec queue creation. Bind exec queue is tied to migration VM @@ -341,6 +342,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe */ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, struct xe_tile *tile, + struct xe_vm *user_vm, u32 flags, u64 extensions) { struct xe_gt *gt = tile->primary_gt; @@ -377,6 +379,9 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, xe_exec_queue_put(q); return ERR_PTR(err); } + + if (user_vm) + q->user_vm = xe_vm_get(user_vm); } return q; @@ -407,6 +412,11 @@ void xe_exec_queue_destroy(struct kref *ref) xe_exec_queue_put(eq); } + if (q->user_vm) { + xe_vm_put(q->user_vm); + q->user_vm = NULL; + } + q->ops->destroy(q); } @@ -742,6 +752,22 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) return -EINVAL; + vm = xe_vm_lookup(xef, args->vm_id); + if (XE_IOCTL_DBG(xe, !vm)) + return -ENOENT; + + err = down_read_interruptible(&vm->lock); + if (err) { + xe_vm_put(vm); + return err; + } + + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { + up_read(&vm->lock); + xe_vm_put(vm); + return -ENOENT; + } + for_each_tile(tile, xe, id) { struct xe_exec_queue *new; @@ -749,9 +775,11 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, if (id) flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; - new = xe_exec_queue_create_bind(xe, tile, flags, + new = xe_exec_queue_create_bind(xe, tile, vm, flags, args->extensions); if (IS_ERR(new)) { + up_read(&vm->lock); + xe_vm_put(vm); err = PTR_ERR(new); if (q) goto put_exec_queue; @@ -763,6 +791,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, list_add_tail(&new->multi_gt_list, &q->multi_gt_link); } + up_read(&vm->lock); + xe_vm_put(vm); } else { logical_mask = calc_validate_logical_mask(xe, eci, args->width, diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h index fda4d4f9bda8..37a9da22f420 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.h +++ b/drivers/gpu/drm/xe/xe_exec_queue.h @@ -28,6 +28,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe u32 flags, u64 extensions); struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, struct xe_tile *tile, + struct xe_vm *user_vm, u32 flags, u64 extensions); void xe_exec_queue_fini(struct xe_exec_queue *q); diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 771ffe35cd0c..3a4263c92b3d 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -54,6 +54,12 @@ struct xe_exec_queue { struct kref refcount; /** @vm: VM (address space) for this exec queue */ struct xe_vm *vm; + /** + * @user_vm: User VM (address space) for this exec queue (bind queues + * only) + */ + struct xe_vm *user_vm; + /** @class: class of this exec queue */ enum xe_engine_class class; /** diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c index 797a4b866226..d963231b5135 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c @@ -346,7 +346,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe) flags = EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT | EXEC_QUEUE_FLAG_MIGRATE; - q = xe_exec_queue_create_bind(xe, tile, flags, 0); + q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_ret; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 79ab6c512d3e..095bb197e8b0 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1617,7 +1617,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) if (!vm->pt_root[id]) continue; - q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); + q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_close; @@ -3578,6 +3578,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } } + if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) { + err = -EINVAL; + goto put_exec_queue; + } + /* Ensure all UNMAPs visible */ xe_svm_flush(vm);