]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/xe: Limit number of jobs per exec queue
authorShuicheng Lin <shuicheng.lin@intel.com>
Mon, 27 Oct 2025 20:21:19 +0000 (20:21 +0000)
committerMatthew Brost <matthew.brost@intel.com>
Wed, 29 Oct 2025 01:46:19 +0000 (18:46 -0700)
Add a limit to the number of jobs that can be queued in a single
exec queue to avoid potential resource exhaustion.

A new field `job_cnt` is introduced in `struct xe_exec_queue` to
track the number of active DRM jobs, along with a maximum limit
`XE_MAX_JOB_COUNT_PER_EXEC_QUEUE` set to 1000.

If the job count exceeds this threshold, `xe_exec_ioctl()` now
returns `-EAGAIN` to signal that the caller should retry later.

A trace event is added to track when the limit is reached:
"xe_exec_queue_reach_max_job_count: dev=0000:03:00.0, job count
exceeded the maximum limit (1000) per exec queue. engine_class=0x3,
logical_mask=0x1, guc_id=2"

v3: add assert in xe_exec_queue_destroy that q->job_cnt is zero. (Matt)
v2 (Matt):
 - add log to trace the limit is hit.
 - Change max count from 0x1000 to 1000.
 - Use atomic_t for job_cnt.

Suggested-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patch.msgid.link/20251027202118.3339905-2-shuicheng.lin@intel.com
drivers/gpu/drm/xe/xe_exec.c
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_exec_queue_types.h
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_trace.h

index 521467d976f743f860ccee4fac8cbb23326b4818..f4d79b4b93960f6327bd87803543de554b265348 100644 (file)
@@ -21,6 +21,7 @@
 #include "xe_sched_job.h"
 #include "xe_sync.h"
 #include "xe_svm.h"
+#include "xe_trace.h"
 #include "xe_vm.h"
 
 /**
@@ -154,6 +155,12 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto err_exec_queue;
        }
 
+       if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) {
+               trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE);
+               err = -EAGAIN;
+               goto err_exec_queue;
+       }
+
        if (args->num_syncs) {
                syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
                if (!syncs) {
index 90cbc95f8e2e12dbddc0e71acd400480d889a60d..1b57d7c2cc9472e0dae1e71b4715f6865fcb2ad7 100644 (file)
@@ -377,6 +377,8 @@ void xe_exec_queue_destroy(struct kref *ref)
        struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
        struct xe_exec_queue *eq, *next;
 
+       xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
+
        if (xe_exec_queue_uses_pxp(q))
                xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
 
index 282505fa13774eddefed854eaa825a575c15ea3e..c8807268ec6c74600e36d3d6231f3519afee9f94 100644 (file)
@@ -162,6 +162,11 @@ struct xe_exec_queue {
        const struct xe_ring_ops *ring_ops;
        /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
        struct drm_sched_entity *entity;
+
+#define XE_MAX_JOB_COUNT_PER_EXEC_QUEUE        1000
+       /** @job_cnt: number of drm jobs in this exec queue */
+       atomic_t job_cnt;
+
        /**
         * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
         * Protected by @vm's resv. Unused if @vm == NULL.
index 6ae4cc6a380276d46be1f6ff610339515c491ffc..f1ba9c19e2182ca9f2e4d6a5fcf7fa049b51354f 100644 (file)
@@ -146,6 +146,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
        for (i = 0; i < width; ++i)
                job->ptrs[i].batch_addr = batch_addr[i];
 
+       atomic_inc(&q->job_cnt);
        xe_pm_runtime_get_noresume(job_to_xe(job));
        trace_xe_sched_job_create(job);
        return job;
@@ -177,6 +178,7 @@ void xe_sched_job_destroy(struct kref *ref)
        dma_fence_put(job->fence);
        drm_sched_job_cleanup(&job->drm);
        job_free(job);
+       atomic_dec(&q->job_cnt);
        xe_exec_queue_put(q);
        xe_pm_runtime_put(xe);
 }
index 314f42fcbcbd93a5f52a06e32bfd9b7047498bb0..79a97b086cb27ff3432eb7717add8b8a88bf0fd3 100644 (file)
@@ -441,6 +441,29 @@ TRACE_EVENT(xe_eu_stall_data_read,
                      __entry->read_size, __entry->total_size)
 );
 
+TRACE_EVENT(xe_exec_queue_reach_max_job_count,
+           TP_PROTO(struct xe_exec_queue *q, int max_cnt),
+           TP_ARGS(q, max_cnt),
+
+           TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
+                            __field(enum xe_engine_class, class)
+                            __field(u32, logical_mask)
+                            __field(u16, guc_id)
+                            __field(int, max_cnt)
+                            ),
+
+           TP_fast_assign(__assign_str(dev);
+                          __entry->class = q->class;
+                          __entry->logical_mask = q->logical_mask;
+                          __entry->guc_id = q->guc->id;
+                          __entry->max_cnt = max_cnt;
+                          ),
+
+           TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d",
+                     __get_str(dev), __entry->max_cnt,
+                     __entry->class, __entry->logical_mask, __entry->guc_id)
+);
+
 #endif
 
 /* This part must be outside protection */