2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
35 * The organisation of the scheduler is the following:
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
46 * Note that once a job was taken from the entities queue and pushed to the
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
51 #include <linux/kthread.h>
52 #include <linux/wait.h>
53 #include <linux/sched.h>
54 #include <linux/completion.h>
55 #include <linux/dma-resv.h>
56 #include <uapi/linux/sched/types.h>
58 #include <drm/drm_print.h>
59 #include <drm/drm_gem.h>
60 #include <drm/drm_syncobj.h>
61 #include <drm/gpu_scheduler.h>
62 #include <drm/spsc_queue.h>
64 #define CREATE_TRACE_POINTS
65 #include "gpu_scheduler_trace.h"
67 #define to_drm_sched_job(sched_job) \
68 container_of((sched_job), struct drm_sched_job, queue_node)
70 int drm_sched_policy
= DRM_SCHED_POLICY_FIFO
;
73 * DOC: sched_policy (int)
74 * Used to override default entities scheduling policy in a run queue.
76 MODULE_PARM_DESC(sched_policy
, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR
) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO
) " = FIFO (default).");
77 module_param_named(sched_policy
, drm_sched_policy
, int, 0444);
79 static __always_inline
bool drm_sched_entity_compare_before(struct rb_node
*a
,
80 const struct rb_node
*b
)
82 struct drm_sched_entity
*ent_a
= rb_entry((a
), struct drm_sched_entity
, rb_tree_node
);
83 struct drm_sched_entity
*ent_b
= rb_entry((b
), struct drm_sched_entity
, rb_tree_node
);
85 return ktime_before(ent_a
->oldest_job_waiting
, ent_b
->oldest_job_waiting
);
88 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity
*entity
)
90 struct drm_sched_rq
*rq
= entity
->rq
;
92 if (!RB_EMPTY_NODE(&entity
->rb_tree_node
)) {
93 rb_erase_cached(&entity
->rb_tree_node
, &rq
->rb_tree_root
);
94 RB_CLEAR_NODE(&entity
->rb_tree_node
);
98 void drm_sched_rq_update_fifo(struct drm_sched_entity
*entity
, ktime_t ts
)
101 * Both locks need to be grabbed, one to protect from entity->rq change
102 * for entity from within concurrent drm_sched_entity_select_rq and the
103 * other to update the rb tree structure.
105 spin_lock(&entity
->rq_lock
);
106 spin_lock(&entity
->rq
->lock
);
108 drm_sched_rq_remove_fifo_locked(entity
);
110 entity
->oldest_job_waiting
= ts
;
112 rb_add_cached(&entity
->rb_tree_node
, &entity
->rq
->rb_tree_root
,
113 drm_sched_entity_compare_before
);
115 spin_unlock(&entity
->rq
->lock
);
116 spin_unlock(&entity
->rq_lock
);
120 * drm_sched_rq_init - initialize a given run queue struct
122 * @sched: scheduler instance to associate with this run queue
123 * @rq: scheduler run queue
125 * Initializes a scheduler runqueue.
127 static void drm_sched_rq_init(struct drm_gpu_scheduler
*sched
,
128 struct drm_sched_rq
*rq
)
130 spin_lock_init(&rq
->lock
);
131 INIT_LIST_HEAD(&rq
->entities
);
132 rq
->rb_tree_root
= RB_ROOT_CACHED
;
133 rq
->current_entity
= NULL
;
138 * drm_sched_rq_add_entity - add an entity
140 * @rq: scheduler run queue
141 * @entity: scheduler entity
143 * Adds a scheduler entity to the run queue.
145 void drm_sched_rq_add_entity(struct drm_sched_rq
*rq
,
146 struct drm_sched_entity
*entity
)
148 if (!list_empty(&entity
->list
))
151 spin_lock(&rq
->lock
);
153 atomic_inc(rq
->sched
->score
);
154 list_add_tail(&entity
->list
, &rq
->entities
);
156 spin_unlock(&rq
->lock
);
160 * drm_sched_rq_remove_entity - remove an entity
162 * @rq: scheduler run queue
163 * @entity: scheduler entity
165 * Removes a scheduler entity from the run queue.
167 void drm_sched_rq_remove_entity(struct drm_sched_rq
*rq
,
168 struct drm_sched_entity
*entity
)
170 if (list_empty(&entity
->list
))
173 spin_lock(&rq
->lock
);
175 atomic_dec(rq
->sched
->score
);
176 list_del_init(&entity
->list
);
178 if (rq
->current_entity
== entity
)
179 rq
->current_entity
= NULL
;
181 if (drm_sched_policy
== DRM_SCHED_POLICY_FIFO
)
182 drm_sched_rq_remove_fifo_locked(entity
);
184 spin_unlock(&rq
->lock
);
188 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
190 * @rq: scheduler run queue to check.
192 * Try to find a ready entity, returns NULL if none found.
194 static struct drm_sched_entity
*
195 drm_sched_rq_select_entity_rr(struct drm_sched_rq
*rq
)
197 struct drm_sched_entity
*entity
;
199 spin_lock(&rq
->lock
);
201 entity
= rq
->current_entity
;
203 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
204 if (drm_sched_entity_is_ready(entity
)) {
205 rq
->current_entity
= entity
;
206 reinit_completion(&entity
->entity_idle
);
207 spin_unlock(&rq
->lock
);
213 list_for_each_entry(entity
, &rq
->entities
, list
) {
215 if (drm_sched_entity_is_ready(entity
)) {
216 rq
->current_entity
= entity
;
217 reinit_completion(&entity
->entity_idle
);
218 spin_unlock(&rq
->lock
);
222 if (entity
== rq
->current_entity
)
226 spin_unlock(&rq
->lock
);
232 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
234 * @rq: scheduler run queue to check.
236 * Find oldest waiting ready entity, returns NULL if none found.
238 static struct drm_sched_entity
*
239 drm_sched_rq_select_entity_fifo(struct drm_sched_rq
*rq
)
243 spin_lock(&rq
->lock
);
244 for (rb
= rb_first_cached(&rq
->rb_tree_root
); rb
; rb
= rb_next(rb
)) {
245 struct drm_sched_entity
*entity
;
247 entity
= rb_entry(rb
, struct drm_sched_entity
, rb_tree_node
);
248 if (drm_sched_entity_is_ready(entity
)) {
249 rq
->current_entity
= entity
;
250 reinit_completion(&entity
->entity_idle
);
254 spin_unlock(&rq
->lock
);
256 return rb
? rb_entry(rb
, struct drm_sched_entity
, rb_tree_node
) : NULL
;
260 * drm_sched_job_done - complete a job
261 * @s_job: pointer to the job which is done
263 * Finish the job's fence and wake up the worker thread.
265 static void drm_sched_job_done(struct drm_sched_job
*s_job
, int result
)
267 struct drm_sched_fence
*s_fence
= s_job
->s_fence
;
268 struct drm_gpu_scheduler
*sched
= s_fence
->sched
;
270 atomic_dec(&sched
->hw_rq_count
);
271 atomic_dec(sched
->score
);
273 trace_drm_sched_process_job(s_fence
);
275 dma_fence_get(&s_fence
->finished
);
276 drm_sched_fence_finished(s_fence
, result
);
277 dma_fence_put(&s_fence
->finished
);
278 wake_up_interruptible(&sched
->wake_up_worker
);
282 * drm_sched_job_done_cb - the callback for a done job
284 * @cb: fence callbacks
286 static void drm_sched_job_done_cb(struct dma_fence
*f
, struct dma_fence_cb
*cb
)
288 struct drm_sched_job
*s_job
= container_of(cb
, struct drm_sched_job
, cb
);
290 drm_sched_job_done(s_job
, f
->error
);
294 * drm_sched_start_timeout - start timeout for reset worker
296 * @sched: scheduler instance to start the worker for
298 * Start the timeout for the given scheduler.
300 static void drm_sched_start_timeout(struct drm_gpu_scheduler
*sched
)
302 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
&&
303 !list_empty(&sched
->pending_list
))
304 queue_delayed_work(sched
->timeout_wq
, &sched
->work_tdr
, sched
->timeout
);
308 * drm_sched_fault - immediately start timeout handler
310 * @sched: scheduler where the timeout handling should be started.
312 * Start timeout handling immediately when the driver detects a hardware fault.
314 void drm_sched_fault(struct drm_gpu_scheduler
*sched
)
316 if (sched
->timeout_wq
)
317 mod_delayed_work(sched
->timeout_wq
, &sched
->work_tdr
, 0);
319 EXPORT_SYMBOL(drm_sched_fault
);
322 * drm_sched_suspend_timeout - Suspend scheduler job timeout
324 * @sched: scheduler instance for which to suspend the timeout
326 * Suspend the delayed work timeout for the scheduler. This is done by
327 * modifying the delayed work timeout to an arbitrary large value,
328 * MAX_SCHEDULE_TIMEOUT in this case.
330 * Returns the timeout remaining
333 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler
*sched
)
335 unsigned long sched_timeout
, now
= jiffies
;
337 sched_timeout
= sched
->work_tdr
.timer
.expires
;
340 * Modify the timeout to an arbitrarily large value. This also prevents
341 * the timeout to be restarted when new submissions arrive
343 if (mod_delayed_work(sched
->timeout_wq
, &sched
->work_tdr
, MAX_SCHEDULE_TIMEOUT
)
344 && time_after(sched_timeout
, now
))
345 return sched_timeout
- now
;
347 return sched
->timeout
;
349 EXPORT_SYMBOL(drm_sched_suspend_timeout
);
352 * drm_sched_resume_timeout - Resume scheduler job timeout
354 * @sched: scheduler instance for which to resume the timeout
355 * @remaining: remaining timeout
357 * Resume the delayed work timeout for the scheduler.
359 void drm_sched_resume_timeout(struct drm_gpu_scheduler
*sched
,
360 unsigned long remaining
)
362 spin_lock(&sched
->job_list_lock
);
364 if (list_empty(&sched
->pending_list
))
365 cancel_delayed_work(&sched
->work_tdr
);
367 mod_delayed_work(sched
->timeout_wq
, &sched
->work_tdr
, remaining
);
369 spin_unlock(&sched
->job_list_lock
);
371 EXPORT_SYMBOL(drm_sched_resume_timeout
);
373 static void drm_sched_job_begin(struct drm_sched_job
*s_job
)
375 struct drm_gpu_scheduler
*sched
= s_job
->sched
;
377 spin_lock(&sched
->job_list_lock
);
378 list_add_tail(&s_job
->list
, &sched
->pending_list
);
379 drm_sched_start_timeout(sched
);
380 spin_unlock(&sched
->job_list_lock
);
383 static void drm_sched_job_timedout(struct work_struct
*work
)
385 struct drm_gpu_scheduler
*sched
;
386 struct drm_sched_job
*job
;
387 enum drm_gpu_sched_stat status
= DRM_GPU_SCHED_STAT_NOMINAL
;
389 sched
= container_of(work
, struct drm_gpu_scheduler
, work_tdr
.work
);
391 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
392 spin_lock(&sched
->job_list_lock
);
393 job
= list_first_entry_or_null(&sched
->pending_list
,
394 struct drm_sched_job
, list
);
398 * Remove the bad job so it cannot be freed by concurrent
399 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
400 * is parked at which point it's safe.
402 list_del_init(&job
->list
);
403 spin_unlock(&sched
->job_list_lock
);
405 status
= job
->sched
->ops
->timedout_job(job
);
408 * Guilty job did complete and hence needs to be manually removed
409 * See drm_sched_stop doc.
411 if (sched
->free_guilty
) {
412 job
->sched
->ops
->free_job(job
);
413 sched
->free_guilty
= false;
416 spin_unlock(&sched
->job_list_lock
);
419 if (status
!= DRM_GPU_SCHED_STAT_ENODEV
) {
420 spin_lock(&sched
->job_list_lock
);
421 drm_sched_start_timeout(sched
);
422 spin_unlock(&sched
->job_list_lock
);
427 * drm_sched_stop - stop the scheduler
429 * @sched: scheduler instance
430 * @bad: job which caused the time out
432 * Stop the scheduler and also removes and frees all completed jobs.
433 * Note: bad job will not be freed as it might be used later and so it's
434 * callers responsibility to release it manually if it's not part of the
435 * pending list any more.
438 void drm_sched_stop(struct drm_gpu_scheduler
*sched
, struct drm_sched_job
*bad
)
440 struct drm_sched_job
*s_job
, *tmp
;
442 kthread_park(sched
->thread
);
445 * Reinsert back the bad job here - now it's safe as
446 * drm_sched_get_cleanup_job cannot race against us and release the
447 * bad job at this point - we parked (waited for) any in progress
448 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
449 * now until the scheduler thread is unparked.
451 if (bad
&& bad
->sched
== sched
)
453 * Add at the head of the queue to reflect it was the earliest
456 list_add(&bad
->list
, &sched
->pending_list
);
459 * Iterate the job list from later to earlier one and either deactive
460 * their HW callbacks or remove them from pending list if they already
462 * This iteration is thread safe as sched thread is stopped.
464 list_for_each_entry_safe_reverse(s_job
, tmp
, &sched
->pending_list
,
466 if (s_job
->s_fence
->parent
&&
467 dma_fence_remove_callback(s_job
->s_fence
->parent
,
469 dma_fence_put(s_job
->s_fence
->parent
);
470 s_job
->s_fence
->parent
= NULL
;
471 atomic_dec(&sched
->hw_rq_count
);
474 * remove job from pending_list.
475 * Locking here is for concurrent resume timeout
477 spin_lock(&sched
->job_list_lock
);
478 list_del_init(&s_job
->list
);
479 spin_unlock(&sched
->job_list_lock
);
482 * Wait for job's HW fence callback to finish using s_job
483 * before releasing it.
485 * Job is still alive so fence refcount at least 1
487 dma_fence_wait(&s_job
->s_fence
->finished
, false);
490 * We must keep bad job alive for later use during
491 * recovery by some of the drivers but leave a hint
492 * that the guilty job must be released.
495 sched
->ops
->free_job(s_job
);
497 sched
->free_guilty
= true;
502 * Stop pending timer in flight as we rearm it in drm_sched_start. This
503 * avoids the pending timeout work in progress to fire right away after
504 * this TDR finished and before the newly restarted jobs had a
505 * chance to complete.
507 cancel_delayed_work(&sched
->work_tdr
);
510 EXPORT_SYMBOL(drm_sched_stop
);
513 * drm_sched_start - recover jobs after a reset
515 * @sched: scheduler instance
516 * @full_recovery: proceed with complete sched restart
519 void drm_sched_start(struct drm_gpu_scheduler
*sched
, bool full_recovery
)
521 struct drm_sched_job
*s_job
, *tmp
;
525 * Locking the list is not required here as the sched thread is parked
526 * so no new jobs are being inserted or removed. Also concurrent
527 * GPU recovers can't run in parallel.
529 list_for_each_entry_safe(s_job
, tmp
, &sched
->pending_list
, list
) {
530 struct dma_fence
*fence
= s_job
->s_fence
->parent
;
532 atomic_inc(&sched
->hw_rq_count
);
538 r
= dma_fence_add_callback(fence
, &s_job
->cb
,
539 drm_sched_job_done_cb
);
541 drm_sched_job_done(s_job
, fence
->error
);
543 DRM_DEV_ERROR(sched
->dev
, "fence add callback failed (%d)\n",
546 drm_sched_job_done(s_job
, -ECANCELED
);
550 spin_lock(&sched
->job_list_lock
);
551 drm_sched_start_timeout(sched
);
552 spin_unlock(&sched
->job_list_lock
);
555 kthread_unpark(sched
->thread
);
557 EXPORT_SYMBOL(drm_sched_start
);
560 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
562 * @sched: scheduler instance
564 * Re-submitting jobs was a concept AMD came up as cheap way to implement
565 * recovery after a job timeout.
567 * This turned out to be not working very well. First of all there are many
568 * problem with the dma_fence implementation and requirements. Either the
569 * implementation is risking deadlocks with core memory management or violating
570 * documented implementation details of the dma_fence object.
572 * Drivers can still save and restore their state for recovery operations, but
573 * we shouldn't make this a general scheduler feature around the dma_fence
576 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler
*sched
)
578 struct drm_sched_job
*s_job
, *tmp
;
579 uint64_t guilty_context
;
580 bool found_guilty
= false;
581 struct dma_fence
*fence
;
583 list_for_each_entry_safe(s_job
, tmp
, &sched
->pending_list
, list
) {
584 struct drm_sched_fence
*s_fence
= s_job
->s_fence
;
586 if (!found_guilty
&& atomic_read(&s_job
->karma
) > sched
->hang_limit
) {
588 guilty_context
= s_job
->s_fence
->scheduled
.context
;
591 if (found_guilty
&& s_job
->s_fence
->scheduled
.context
== guilty_context
)
592 dma_fence_set_error(&s_fence
->finished
, -ECANCELED
);
594 fence
= sched
->ops
->run_job(s_job
);
596 if (IS_ERR_OR_NULL(fence
)) {
598 dma_fence_set_error(&s_fence
->finished
, PTR_ERR(fence
));
600 s_job
->s_fence
->parent
= NULL
;
603 s_job
->s_fence
->parent
= dma_fence_get(fence
);
605 /* Drop for orignal kref_init */
606 dma_fence_put(fence
);
610 EXPORT_SYMBOL(drm_sched_resubmit_jobs
);
613 * drm_sched_job_init - init a scheduler job
614 * @job: scheduler job to init
615 * @entity: scheduler entity to use
616 * @owner: job owner for debugging
618 * Refer to drm_sched_entity_push_job() documentation
619 * for locking considerations.
621 * Drivers must make sure drm_sched_job_cleanup() if this function returns
622 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
624 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
625 * has died, which can mean that there's no valid runqueue for a @entity.
626 * This function returns -ENOENT in this case (which probably should be -EIO as
627 * a more meanigful return value).
629 * Returns 0 for success, negative error code otherwise.
631 int drm_sched_job_init(struct drm_sched_job
*job
,
632 struct drm_sched_entity
*entity
,
638 job
->entity
= entity
;
639 job
->s_fence
= drm_sched_fence_alloc(entity
, owner
);
643 INIT_LIST_HEAD(&job
->list
);
645 xa_init_flags(&job
->dependencies
, XA_FLAGS_ALLOC
);
649 EXPORT_SYMBOL(drm_sched_job_init
);
652 * drm_sched_job_arm - arm a scheduler job for execution
653 * @job: scheduler job to arm
655 * This arms a scheduler job for execution. Specifically it initializes the
656 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
657 * or other places that need to track the completion of this job.
659 * Refer to drm_sched_entity_push_job() documentation for locking
662 * This can only be called if drm_sched_job_init() succeeded.
664 void drm_sched_job_arm(struct drm_sched_job
*job
)
666 struct drm_gpu_scheduler
*sched
;
667 struct drm_sched_entity
*entity
= job
->entity
;
670 drm_sched_entity_select_rq(entity
);
671 sched
= entity
->rq
->sched
;
674 job
->s_priority
= entity
->rq
- sched
->sched_rq
;
675 job
->id
= atomic64_inc_return(&sched
->job_id_count
);
677 drm_sched_fence_init(job
->s_fence
, job
->entity
);
679 EXPORT_SYMBOL(drm_sched_job_arm
);
682 * drm_sched_job_add_dependency - adds the fence as a job dependency
683 * @job: scheduler job to add the dependencies to
684 * @fence: the dma_fence to add to the list of dependencies.
686 * Note that @fence is consumed in both the success and error cases.
689 * 0 on success, or an error on failing to expand the array.
691 int drm_sched_job_add_dependency(struct drm_sched_job
*job
,
692 struct dma_fence
*fence
)
694 struct dma_fence
*entry
;
702 /* Deduplicate if we already depend on a fence from the same context.
703 * This lets the size of the array of deps scale with the number of
704 * engines involved, rather than the number of BOs.
706 xa_for_each(&job
->dependencies
, index
, entry
) {
707 if (entry
->context
!= fence
->context
)
710 if (dma_fence_is_later(fence
, entry
)) {
711 dma_fence_put(entry
);
712 xa_store(&job
->dependencies
, index
, fence
, GFP_KERNEL
);
714 dma_fence_put(fence
);
719 ret
= xa_alloc(&job
->dependencies
, &id
, fence
, xa_limit_32b
, GFP_KERNEL
);
721 dma_fence_put(fence
);
725 EXPORT_SYMBOL(drm_sched_job_add_dependency
);
728 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
729 * @job: scheduler job to add the dependencies to
730 * @file: drm file private pointer
731 * @handle: syncobj handle to lookup
732 * @point: timeline point
734 * This adds the fence matching the given syncobj to @job.
737 * 0 on success, or an error on failing to expand the array.
739 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job
*job
,
740 struct drm_file
*file
,
744 struct dma_fence
*fence
;
747 ret
= drm_syncobj_find_fence(file
, handle
, point
, 0, &fence
);
751 return drm_sched_job_add_dependency(job
, fence
);
753 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency
);
756 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
757 * @job: scheduler job to add the dependencies to
758 * @resv: the dma_resv object to get the fences from
759 * @usage: the dma_resv_usage to use to filter the fences
761 * This adds all fences matching the given usage from @resv to @job.
762 * Must be called with the @resv lock held.
765 * 0 on success, or an error on failing to expand the array.
767 int drm_sched_job_add_resv_dependencies(struct drm_sched_job
*job
,
768 struct dma_resv
*resv
,
769 enum dma_resv_usage usage
)
771 struct dma_resv_iter cursor
;
772 struct dma_fence
*fence
;
775 dma_resv_assert_held(resv
);
777 dma_resv_for_each_fence(&cursor
, resv
, usage
, fence
) {
778 /* Make sure to grab an additional ref on the added fence */
779 dma_fence_get(fence
);
780 ret
= drm_sched_job_add_dependency(job
, fence
);
782 dma_fence_put(fence
);
788 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies
);
791 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
793 * @job: scheduler job to add the dependencies to
794 * @obj: the gem object to add new dependencies from.
795 * @write: whether the job might write the object (so we need to depend on
796 * shared fences in the reservation object).
798 * This should be called after drm_gem_lock_reservations() on your array of
799 * GEM objects used in the job but before updating the reservations with your
803 * 0 on success, or an error on failing to expand the array.
805 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job
*job
,
806 struct drm_gem_object
*obj
,
809 return drm_sched_job_add_resv_dependencies(job
, obj
->resv
,
810 dma_resv_usage_rw(write
));
812 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies
);
815 * drm_sched_job_cleanup - clean up scheduler job resources
816 * @job: scheduler job to clean up
818 * Cleans up the resources allocated with drm_sched_job_init().
820 * Drivers should call this from their error unwind code if @job is aborted
821 * before drm_sched_job_arm() is called.
823 * After that point of no return @job is committed to be executed by the
824 * scheduler, and this function should be called from the
825 * &drm_sched_backend_ops.free_job callback.
827 void drm_sched_job_cleanup(struct drm_sched_job
*job
)
829 struct dma_fence
*fence
;
832 if (kref_read(&job
->s_fence
->finished
.refcount
)) {
833 /* drm_sched_job_arm() has been called */
834 dma_fence_put(&job
->s_fence
->finished
);
836 /* aborted job before committing to run it */
837 drm_sched_fence_free(job
->s_fence
);
842 xa_for_each(&job
->dependencies
, index
, fence
) {
843 dma_fence_put(fence
);
845 xa_destroy(&job
->dependencies
);
848 EXPORT_SYMBOL(drm_sched_job_cleanup
);
851 * drm_sched_can_queue -- Can we queue more to the hardware?
852 * @sched: scheduler instance
854 * Return true if we can push more jobs to the hw, otherwise false.
856 static bool drm_sched_can_queue(struct drm_gpu_scheduler
*sched
)
858 return atomic_read(&sched
->hw_rq_count
) <
859 sched
->hw_submission_limit
;
863 * drm_sched_wakeup_if_can_queue - Wake up the scheduler
864 * @sched: scheduler instance
866 * Wake up the scheduler if we can queue jobs.
868 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler
*sched
)
870 if (drm_sched_can_queue(sched
))
871 wake_up_interruptible(&sched
->wake_up_worker
);
875 * drm_sched_select_entity - Select next entity to process
877 * @sched: scheduler instance
879 * Returns the entity to process or NULL if none are found.
881 static struct drm_sched_entity
*
882 drm_sched_select_entity(struct drm_gpu_scheduler
*sched
)
884 struct drm_sched_entity
*entity
;
887 if (!drm_sched_can_queue(sched
))
890 /* Kernel run queue has higher priority than normal run queue*/
891 for (i
= DRM_SCHED_PRIORITY_COUNT
- 1; i
>= DRM_SCHED_PRIORITY_MIN
; i
--) {
892 entity
= drm_sched_policy
== DRM_SCHED_POLICY_FIFO
?
893 drm_sched_rq_select_entity_fifo(&sched
->sched_rq
[i
]) :
894 drm_sched_rq_select_entity_rr(&sched
->sched_rq
[i
]);
903 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
905 * @sched: scheduler instance
907 * Returns the next finished job from the pending list (if there is one)
908 * ready for it to be destroyed.
910 static struct drm_sched_job
*
911 drm_sched_get_cleanup_job(struct drm_gpu_scheduler
*sched
)
913 struct drm_sched_job
*job
, *next
;
915 spin_lock(&sched
->job_list_lock
);
917 job
= list_first_entry_or_null(&sched
->pending_list
,
918 struct drm_sched_job
, list
);
920 if (job
&& dma_fence_is_signaled(&job
->s_fence
->finished
)) {
921 /* remove job from pending_list */
922 list_del_init(&job
->list
);
924 /* cancel this job's TO timer */
925 cancel_delayed_work(&sched
->work_tdr
);
926 /* make the scheduled timestamp more accurate */
927 next
= list_first_entry_or_null(&sched
->pending_list
,
928 typeof(*next
), list
);
931 next
->s_fence
->scheduled
.timestamp
=
932 dma_fence_timestamp(&job
->s_fence
->finished
);
933 /* start TO timer for next job */
934 drm_sched_start_timeout(sched
);
940 spin_unlock(&sched
->job_list_lock
);
946 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
947 * @sched_list: list of drm_gpu_schedulers
948 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
950 * Returns pointer of the sched with the least load or NULL if none of the
951 * drm_gpu_schedulers are ready
953 struct drm_gpu_scheduler
*
954 drm_sched_pick_best(struct drm_gpu_scheduler
**sched_list
,
955 unsigned int num_sched_list
)
957 struct drm_gpu_scheduler
*sched
, *picked_sched
= NULL
;
959 unsigned int min_score
= UINT_MAX
, num_score
;
961 for (i
= 0; i
< num_sched_list
; ++i
) {
962 sched
= sched_list
[i
];
965 DRM_WARN("scheduler %s is not ready, skipping",
970 num_score
= atomic_read(sched
->score
);
971 if (num_score
< min_score
) {
972 min_score
= num_score
;
973 picked_sched
= sched
;
979 EXPORT_SYMBOL(drm_sched_pick_best
);
982 * drm_sched_blocked - check if the scheduler is blocked
984 * @sched: scheduler instance
986 * Returns true if blocked, otherwise false.
988 static bool drm_sched_blocked(struct drm_gpu_scheduler
*sched
)
990 if (kthread_should_park()) {
999 * drm_sched_main - main scheduler thread
1001 * @param: scheduler instance
1005 static int drm_sched_main(void *param
)
1007 struct drm_gpu_scheduler
*sched
= (struct drm_gpu_scheduler
*)param
;
1010 sched_set_fifo_low(current
);
1012 while (!kthread_should_stop()) {
1013 struct drm_sched_entity
*entity
= NULL
;
1014 struct drm_sched_fence
*s_fence
;
1015 struct drm_sched_job
*sched_job
;
1016 struct dma_fence
*fence
;
1017 struct drm_sched_job
*cleanup_job
= NULL
;
1019 wait_event_interruptible(sched
->wake_up_worker
,
1020 (cleanup_job
= drm_sched_get_cleanup_job(sched
)) ||
1021 (!drm_sched_blocked(sched
) &&
1022 (entity
= drm_sched_select_entity(sched
))) ||
1023 kthread_should_stop());
1026 sched
->ops
->free_job(cleanup_job
);
1031 sched_job
= drm_sched_entity_pop_job(entity
);
1034 complete_all(&entity
->entity_idle
);
1038 s_fence
= sched_job
->s_fence
;
1040 atomic_inc(&sched
->hw_rq_count
);
1041 drm_sched_job_begin(sched_job
);
1043 trace_drm_run_job(sched_job
, entity
);
1044 fence
= sched
->ops
->run_job(sched_job
);
1045 complete_all(&entity
->entity_idle
);
1046 drm_sched_fence_scheduled(s_fence
, fence
);
1048 if (!IS_ERR_OR_NULL(fence
)) {
1049 /* Drop for original kref_init of the fence */
1050 dma_fence_put(fence
);
1052 r
= dma_fence_add_callback(fence
, &sched_job
->cb
,
1053 drm_sched_job_done_cb
);
1055 drm_sched_job_done(sched_job
, fence
->error
);
1057 DRM_DEV_ERROR(sched
->dev
, "fence add callback failed (%d)\n",
1060 drm_sched_job_done(sched_job
, IS_ERR(fence
) ?
1061 PTR_ERR(fence
) : 0);
1064 wake_up(&sched
->job_scheduled
);
1070 * drm_sched_init - Init a gpu scheduler instance
1072 * @sched: scheduler instance
1073 * @ops: backend operations for this scheduler
1074 * @hw_submission: number of hw submissions that can be in flight
1075 * @hang_limit: number of times to allow a job to hang before dropping it
1076 * @timeout: timeout value in jiffies for the scheduler
1077 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1079 * @score: optional score atomic shared with other schedulers
1080 * @name: name used for debugging
1081 * @dev: target &struct device
1083 * Return 0 on success, otherwise error code.
1085 int drm_sched_init(struct drm_gpu_scheduler
*sched
,
1086 const struct drm_sched_backend_ops
*ops
,
1087 unsigned hw_submission
, unsigned hang_limit
,
1088 long timeout
, struct workqueue_struct
*timeout_wq
,
1089 atomic_t
*score
, const char *name
, struct device
*dev
)
1093 sched
->hw_submission_limit
= hw_submission
;
1095 sched
->timeout
= timeout
;
1096 sched
->timeout_wq
= timeout_wq
? : system_wq
;
1097 sched
->hang_limit
= hang_limit
;
1098 sched
->score
= score
? score
: &sched
->_score
;
1100 for (i
= DRM_SCHED_PRIORITY_MIN
; i
< DRM_SCHED_PRIORITY_COUNT
; i
++)
1101 drm_sched_rq_init(sched
, &sched
->sched_rq
[i
]);
1103 init_waitqueue_head(&sched
->wake_up_worker
);
1104 init_waitqueue_head(&sched
->job_scheduled
);
1105 INIT_LIST_HEAD(&sched
->pending_list
);
1106 spin_lock_init(&sched
->job_list_lock
);
1107 atomic_set(&sched
->hw_rq_count
, 0);
1108 INIT_DELAYED_WORK(&sched
->work_tdr
, drm_sched_job_timedout
);
1109 atomic_set(&sched
->_score
, 0);
1110 atomic64_set(&sched
->job_id_count
, 0);
1112 /* Each scheduler will run on a seperate kernel thread */
1113 sched
->thread
= kthread_run(drm_sched_main
, sched
, sched
->name
);
1114 if (IS_ERR(sched
->thread
)) {
1115 ret
= PTR_ERR(sched
->thread
);
1116 sched
->thread
= NULL
;
1117 DRM_DEV_ERROR(sched
->dev
, "Failed to create scheduler for %s.\n", name
);
1121 sched
->ready
= true;
1124 EXPORT_SYMBOL(drm_sched_init
);
1127 * drm_sched_fini - Destroy a gpu scheduler
1129 * @sched: scheduler instance
1131 * Tears down and cleans up the scheduler.
1133 void drm_sched_fini(struct drm_gpu_scheduler
*sched
)
1135 struct drm_sched_entity
*s_entity
;
1139 kthread_stop(sched
->thread
);
1141 for (i
= DRM_SCHED_PRIORITY_COUNT
- 1; i
>= DRM_SCHED_PRIORITY_MIN
; i
--) {
1142 struct drm_sched_rq
*rq
= &sched
->sched_rq
[i
];
1144 spin_lock(&rq
->lock
);
1145 list_for_each_entry(s_entity
, &rq
->entities
, list
)
1147 * Prevents reinsertion and marks job_queue as idle,
1148 * it will removed from rq in drm_sched_entity_fini
1151 s_entity
->stopped
= true;
1152 spin_unlock(&rq
->lock
);
1156 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1157 wake_up_all(&sched
->job_scheduled
);
1159 /* Confirm no work left behind accessing device structures */
1160 cancel_delayed_work_sync(&sched
->work_tdr
);
1162 sched
->ready
= false;
1164 EXPORT_SYMBOL(drm_sched_fini
);
1167 * drm_sched_increase_karma - Update sched_entity guilty flag
1169 * @bad: The job guilty of time out
1171 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1172 * limit of the scheduler then the respective sched entity is marked guilty and
1173 * jobs from it will not be scheduled further
1175 void drm_sched_increase_karma(struct drm_sched_job
*bad
)
1178 struct drm_sched_entity
*tmp
;
1179 struct drm_sched_entity
*entity
;
1180 struct drm_gpu_scheduler
*sched
= bad
->sched
;
1182 /* don't change @bad's karma if it's from KERNEL RQ,
1183 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1184 * corrupt but keep in mind that kernel jobs always considered good.
1186 if (bad
->s_priority
!= DRM_SCHED_PRIORITY_KERNEL
) {
1187 atomic_inc(&bad
->karma
);
1189 for (i
= DRM_SCHED_PRIORITY_MIN
; i
< DRM_SCHED_PRIORITY_KERNEL
;
1191 struct drm_sched_rq
*rq
= &sched
->sched_rq
[i
];
1193 spin_lock(&rq
->lock
);
1194 list_for_each_entry_safe(entity
, tmp
, &rq
->entities
, list
) {
1195 if (bad
->s_fence
->scheduled
.context
==
1196 entity
->fence_context
) {
1198 atomic_set(entity
->guilty
, 1);
1202 spin_unlock(&rq
->lock
);
1203 if (&entity
->list
!= &rq
->entities
)
1208 EXPORT_SYMBOL(drm_sched_increase_karma
);