]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe/uapi: disallow bind queue sharing
authorMatthew Auld <matthew.auld@intel.com>
Tue, 20 Jan 2026 11:06:10 +0000 (11:06 +0000)
committerThomas Hellström <thomas.hellstrom@linux.intel.com>
Wed, 21 Jan 2026 14:24:16 +0000 (15:24 +0100)
Currently this is very broken if someone attempts to create a bind
queue and share it across multiple VMs. For example currently we assume
it is safe to acquire the user VM lock to protect some of the bind queue
state, but if allow sharing the bind queue with multiple VMs then this
quickly breaks down.

To fix this reject using a bind queue with any VM that is not the same
VM that was originally passed when creating the bind queue. This a uAPI
change, however this was more of an oversight on kernel side that we
didn't reject this, and expectation is that userspace shouldn't be using
bind queues in this way, so in theory this change should go unnoticed.

Based on a patch from Matt Brost.

v2 (Matt B):
  - Hold the vm lock over queue create, to ensure it can't be closed as
    we attach the user_vm to the queue.
  - Make sure we actually check for NULL user_vm in destruction path.
v3:
  - Fix error path handling.

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Reported-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: José Roberto de Souza <jose.souza@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Mrozek <michal.mrozek@intel.com>
Cc: Carl Zhang <carl.zhang@intel.com>
Cc: <stable@vger.kernel.org> # v6.8+
Acked-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Arvind Yadav <arvind.yadav@intel.com>
Acked-by: Michal Mrozek <michal.mrozek@intel.com>
Link: https://patch.msgid.link/20260120110609.77958-3-matthew.auld@intel.com
(cherry picked from commit 9dd08fdecc0c98d6516c2d2d1fa189c1332f8dab)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue.h
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
drivers/gpu/drm/xe/xe_vm.c

index 8724f8de67e2738dffef72aeab2a67d8cfe35656..779d7e7e2d2ec8dd4899ce6232a343c2ba5f7ba6 100644 (file)
@@ -328,6 +328,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
  * @xe: Xe device.
  * @tile: tile which bind exec queue belongs to.
  * @flags: exec queue creation flags
+ * @user_vm: The user VM which this exec queue belongs to
  * @extensions: exec queue creation extensions
  *
  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
@@ -341,6 +342,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
  */
 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
                                                struct xe_tile *tile,
+                                               struct xe_vm *user_vm,
                                                u32 flags, u64 extensions)
 {
        struct xe_gt *gt = tile->primary_gt;
@@ -377,6 +379,9 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
                        xe_exec_queue_put(q);
                        return ERR_PTR(err);
                }
+
+               if (user_vm)
+                       q->user_vm = xe_vm_get(user_vm);
        }
 
        return q;
@@ -407,6 +412,11 @@ void xe_exec_queue_destroy(struct kref *ref)
                        xe_exec_queue_put(eq);
        }
 
+       if (q->user_vm) {
+               xe_vm_put(q->user_vm);
+               q->user_vm = NULL;
+       }
+
        q->ops->destroy(q);
 }
 
@@ -742,6 +752,22 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
                        return -EINVAL;
 
+               vm = xe_vm_lookup(xef, args->vm_id);
+               if (XE_IOCTL_DBG(xe, !vm))
+                       return -ENOENT;
+
+               err = down_read_interruptible(&vm->lock);
+               if (err) {
+                       xe_vm_put(vm);
+                       return err;
+               }
+
+               if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+                       up_read(&vm->lock);
+                       xe_vm_put(vm);
+                       return -ENOENT;
+               }
+
                for_each_tile(tile, xe, id) {
                        struct xe_exec_queue *new;
 
@@ -749,9 +775,11 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                        if (id)
                                flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
 
-                       new = xe_exec_queue_create_bind(xe, tile, flags,
+                       new = xe_exec_queue_create_bind(xe, tile, vm, flags,
                                                        args->extensions);
                        if (IS_ERR(new)) {
+                               up_read(&vm->lock);
+                               xe_vm_put(vm);
                                err = PTR_ERR(new);
                                if (q)
                                        goto put_exec_queue;
@@ -763,6 +791,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                                list_add_tail(&new->multi_gt_list,
                                              &q->multi_gt_link);
                }
+               up_read(&vm->lock);
+               xe_vm_put(vm);
        } else {
                logical_mask = calc_validate_logical_mask(xe, eci,
                                                          args->width,
index fda4d4f9bda86bd69296b27ac9e9012bc2d5f734..37a9da22f4206a57ef64072325c9e8ce92fbdf19 100644 (file)
@@ -28,6 +28,7 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe
                                                 u32 flags, u64 extensions);
 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
                                                struct xe_tile *tile,
+                                               struct xe_vm *user_vm,
                                                u32 flags, u64 extensions);
 
 void xe_exec_queue_fini(struct xe_exec_queue *q);
index 771ffe35cd0c6c54e6308643fdf69c6c56790aac..3a4263c92b3d32f3240ea6f4994b4fb49f53903a 100644 (file)
@@ -54,6 +54,12 @@ struct xe_exec_queue {
        struct kref refcount;
        /** @vm: VM (address space) for this exec queue */
        struct xe_vm *vm;
+       /**
+        * @user_vm: User VM (address space) for this exec queue (bind queues
+        * only)
+        */
+       struct xe_vm *user_vm;
+
        /** @class: class of this exec queue */
        enum xe_engine_class class;
        /**
index 797a4b86622620da2d73a8abc9dd42ea3c72ab2a..d963231b513577fa4481e84043391d01274f0b7a 100644 (file)
@@ -346,7 +346,7 @@ int xe_sriov_vf_ccs_init(struct xe_device *xe)
                flags = EXEC_QUEUE_FLAG_KERNEL |
                        EXEC_QUEUE_FLAG_PERMANENT |
                        EXEC_QUEUE_FLAG_MIGRATE;
-               q = xe_exec_queue_create_bind(xe, tile, flags, 0);
+               q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0);
                if (IS_ERR(q)) {
                        err = PTR_ERR(q);
                        goto err_ret;
index 79ab6c512d3e0dee45d5af8ad4a2487da5a2198b..095bb197e8b05d093cf3fee067da1243e4dba018 100644 (file)
@@ -1617,7 +1617,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
                        if (!vm->pt_root[id])
                                continue;
 
-                       q = xe_exec_queue_create_bind(xe, tile, create_flags, 0);
+                       q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0);
                        if (IS_ERR(q)) {
                                err = PTR_ERR(q);
                                goto err_close;
@@ -3578,6 +3578,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                }
        }
 
+       if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) {
+               err = -EINVAL;
+               goto put_exec_queue;
+       }
+
        /* Ensure all UNMAPs visible */
        xe_svm_flush(vm);