]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/sched: Add pending job list iterator
authorMatthew Brost <matthew.brost@intel.com>
Tue, 9 Dec 2025 20:00:39 +0000 (12:00 -0800)
committerMatthew Brost <matthew.brost@intel.com>
Sat, 13 Dec 2025 08:34:21 +0000 (00:34 -0800)
Stop open coding pending job list in drivers. Add pending job list
iterator which safely walks DRM scheduler list asserting DRM scheduler
is stopped.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Link: https://patch.msgid.link/20251209200039.1366764-3-matthew.brost@intel.com
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index 5f08719a35f556efd28a744d55a441bb78e55f4f..bd7936c03da2aa541de66db7ae1ba1c074dd4d02 100644 (file)
@@ -729,7 +729,9 @@ EXPORT_SYMBOL(drm_sched_start);
  *
  * Drivers can still save and restore their state for recovery operations, but
  * we shouldn't make this a general scheduler feature around the dma_fence
- * interface.
+ * interface. The suggested driver-side replacement is to use
+ * drm_sched_for_each_pending_job() after stopping the scheduler and implement
+ * their own recovery operations.
  */
 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 {
index 86b6075ce7993be66a51e055f933e518282e5757..78e07c2507c7a420ad75d04821588b66394f5c39 100644 (file)
@@ -700,4 +700,54 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
                                   struct drm_gpu_scheduler **sched_list,
                                   unsigned int num_sched_list);
 
+/**
+ * struct drm_sched_pending_job_iter - DRM scheduler pending job iterator state
+ * @sched: DRM scheduler associated with pending job iterator
+ */
+struct drm_sched_pending_job_iter {
+       struct drm_gpu_scheduler *sched;
+};
+
+/* Drivers should never call this directly */
+static inline struct drm_sched_pending_job_iter
+__drm_sched_pending_job_iter_begin(struct drm_gpu_scheduler *sched)
+{
+       struct drm_sched_pending_job_iter iter = {
+               .sched = sched,
+       };
+
+       WARN_ON(!drm_sched_is_stopped(sched));
+       return iter;
+}
+
+/* Drivers should never call this directly */
+static inline void
+__drm_sched_pending_job_iter_end(const struct drm_sched_pending_job_iter iter)
+{
+       WARN_ON(!drm_sched_is_stopped(iter.sched));
+}
+
+DEFINE_CLASS(drm_sched_pending_job_iter, struct drm_sched_pending_job_iter,
+            __drm_sched_pending_job_iter_end(_T),
+            __drm_sched_pending_job_iter_begin(__sched),
+            struct drm_gpu_scheduler *__sched);
+static inline void *
+class_drm_sched_pending_job_iter_lock_ptr(class_drm_sched_pending_job_iter_t *_T)
+{ return _T; }
+#define class_drm_sched_pending_job_iter_is_conditional false
+
+/**
+ * drm_sched_for_each_pending_job() - Iterator for each pending job in scheduler
+ * @__job: Current pending job being iterated over
+ * @__sched: DRM scheduler to iterate over pending jobs
+ * @__entity: DRM scheduler entity to filter jobs, NULL indicates no filter
+ *
+ * Iterator for each pending job in scheduler, filtering on an entity, and
+ * enforcing scheduler is fully stopped
+ */
+#define drm_sched_for_each_pending_job(__job, __sched, __entity)               \
+       scoped_guard(drm_sched_pending_job_iter, (__sched))                     \
+               list_for_each_entry((__job), &(__sched)->pending_list, list)    \
+                       for_each_if(!(__entity) || (__job)->entity == (__entity))
+
 #endif