if (xe_exec_queue_is_multi_queue(q))
xe_exec_queue_group_cleanup(q);
- if (q->vm)
+ if (q->vm) {
+ xe_vm_remove_exec_queue(q->vm, q);
xe_vm_put(q->vm);
+ }
if (q->xef)
xe_file_put(q->xef);
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->lr.link);
+ INIT_LIST_HEAD(&q->vm_exec_queue_link);
INIT_LIST_HEAD(&q->multi_gt_link);
INIT_LIST_HEAD(&q->hw_engine_group_link);
INIT_LIST_HEAD(&q->pxp.link);
}
q->xef = xe_file_get(xef);
+ if (eci[0].engine_class != DRM_XE_ENGINE_CLASS_VM_BIND)
+ xe_vm_add_exec_queue(vm, q);
/* user id alloc must always be last in ioctl to prevent UAF */
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues);
+ for (id = 0; id < XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE; ++id)
+ INIT_LIST_HEAD(&vm->exec_queues.list[id]);
if (flags & XE_VM_FLAG_FAULT_MODE)
vm->preempt.min_run_period_ms = xe->min_run_period_pf_ms;
else
vm->preempt.min_run_period_ms = xe->min_run_period_lr_ms;
+ init_rwsem(&vm->exec_queues.lock);
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&vm->exec_queues.lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ down_read(&vm->exec_queues.lock);
+ might_lock(&xe_root_mmio_gt(xe)->uc.guc.ct.lock);
+ up_read(&vm->exec_queues.lock);
+ }
+
for_each_tile(tile, xe, id)
xe_range_fence_tree_init(&vm->rftree[id]);
return xe_vm_alloc_vma(vm, &map_req, false);
}
+/**
+ * xe_vm_add_exec_queue() - Add exec queue to VM
+ * @vm: The VM.
+ * @q: The exec_queue
+ *
+ * Add exec queue to VM, skipped if the device does not have context based TLB
+ * invalidations.
+ */
+void xe_vm_add_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ struct xe_device *xe = vm->xe;
+
+ /* User VMs and queues only */
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM));
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_MIGRATE));
+ xe_assert(xe, vm->xef);
+ xe_assert(xe, vm == q->vm);
+
+ if (!xe->info.has_ctx_tlb_inval)
+ return;
+
+ down_write(&vm->exec_queues.lock);
+ list_add(&q->vm_exec_queue_link, &vm->exec_queues.list[q->gt->info.id]);
+ ++vm->exec_queues.count[q->gt->info.id];
+ up_write(&vm->exec_queues.lock);
+}
+
+/**
+ * xe_vm_remove_exec_queue() - Remove exec queue from VM
+ * @vm: The VM.
+ * @q: The exec_queue
+ *
+ * Remove exec queue from VM, skipped if the device does not have context based
+ * TLB invalidations.
+ */
+void xe_vm_remove_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ if (!vm->xe->info.has_ctx_tlb_inval)
+ return;
+
+ down_write(&vm->exec_queues.lock);
+ if (!list_empty(&q->vm_exec_queue_link)) {
+ list_del(&q->vm_exec_queue_link);
+ --vm->exec_queues.count[q->gt->info.id];
+ }
+ up_write(&vm->exec_queues.lock);
+}
struct list_head pm_activate_link;
} preempt;
+ /** @exec_queues: Manages list of exec queues attached to this VM, protected by lock. */
+ struct {
+ /**
+ * @exec_queues.list: list of exec queues attached to this VM,
+ * per GT
+ */
+ struct list_head list[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ /**
+ * @exec_queues.count: count of exec queues attached to this VM,
+ * per GT
+ */
+ int count[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ /** @exec_queues.lock: lock to protect exec_queues list */
+ struct rw_semaphore lock;
+ } exec_queues;
+
/** @um: unified memory state */
struct {
/** @asid: address space ID, unique to each VM */