]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Update VM trace events
authorMatthew Brost <matthew.brost@intel.com>
Thu, 4 Jul 2024 04:16:50 +0000 (21:16 -0700)
committerMatthew Brost <matthew.brost@intel.com>
Thu, 4 Jul 2024 05:28:06 +0000 (22:28 -0700)
The trace events have changed moving to a single job per VM bind IOCTL,
update the trace events align with old behavior as much as possible.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Jonathan Cavitt <jonathan.cavitt@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-6-matthew.brost@intel.com
drivers/gpu/drm/xe/xe_trace_bo.h
drivers/gpu/drm/xe/xe_vm.c

index f39f09ed3495625db5c9dd1a574f6ee0d1ce3281..9b1a1d4304ae1878b13f298bb01b716f54b237f4 100644 (file)
@@ -117,11 +117,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc,
             TP_ARGS(vma)
 );
 
-DEFINE_EVENT(xe_vma, xe_vma_fail,
-            TP_PROTO(struct xe_vma *vma),
-            TP_ARGS(vma)
-);
-
 DEFINE_EVENT(xe_vma, xe_vma_bind,
             TP_PROTO(struct xe_vma *vma),
             TP_ARGS(vma)
@@ -237,6 +232,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
             TP_ARGS(vm)
 );
 
+DEFINE_EVENT(xe_vm, xe_vm_ops_fail,
+            TP_PROTO(struct xe_vm *vm),
+            TP_ARGS(vm)
+);
+
 #endif
 
 /* This part must be outside protection */
index 73cc6b0efcef7a494d631f8a39ba24e1f3d161bb..5232856cc3fb5f9b68a2900d3e22ad34bcd1328d 100644 (file)
@@ -2481,6 +2481,38 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
        return 0;
 }
 
+static void op_trace(struct xe_vma_op *op)
+{
+       switch (op->base.op) {
+       case DRM_GPUVA_OP_MAP:
+               trace_xe_vma_bind(op->map.vma);
+               break;
+       case DRM_GPUVA_OP_REMAP:
+               trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
+               if (op->remap.prev)
+                       trace_xe_vma_bind(op->remap.prev);
+               if (op->remap.next)
+                       trace_xe_vma_bind(op->remap.next);
+               break;
+       case DRM_GPUVA_OP_UNMAP:
+               trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
+               break;
+       case DRM_GPUVA_OP_PREFETCH:
+               trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
+               break;
+       default:
+               XE_WARN_ON("NOT POSSIBLE");
+       }
+}
+
+static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
+{
+       struct xe_vma_op *op;
+
+       list_for_each_entry(op, &vops->list, link)
+               op_trace(op);
+}
+
 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
 {
        struct xe_exec_queue *q = vops->q;
@@ -2524,8 +2556,10 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
        if (number_tiles > 1) {
                fences = kmalloc_array(number_tiles, sizeof(*fences),
                                       GFP_KERNEL);
-               if (!fences)
-                       return ERR_PTR(-ENOMEM);
+               if (!fences) {
+                       fence = ERR_PTR(-ENOMEM);
+                       goto err_trace;
+               }
        }
 
        for_each_tile(tile, vm->xe, id) {
@@ -2539,6 +2573,8 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
                }
        }
 
+       trace_xe_vm_ops_execute(vops);
+
        for_each_tile(tile, vm->xe, id) {
                if (!vops->pt_update_ops[id].num_ops)
                        continue;
@@ -2585,6 +2621,8 @@ err_out:
        kfree(fences);
        kfree(cf);
 
+err_trace:
+       trace_xe_vm_ops_fail(vm);
        return fence;
 }