2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
38 * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39 * on this fence. In other words we always insert a full CPU round trip before
40 * dependen jobs are pushed to the hw queue.
42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
45 * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
47 * Because we could have a deadline hint can be set before the backing hw
48 * fence is created, we need to keep track of whether a deadline has already
51 #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
55 struct drm_gem_object
;
57 struct drm_gpu_scheduler
;
62 /* These are often used as an (initial) index
63 * to an array, and as such should start at 0.
65 enum drm_sched_priority
{
66 DRM_SCHED_PRIORITY_MIN
,
67 DRM_SCHED_PRIORITY_NORMAL
,
68 DRM_SCHED_PRIORITY_HIGH
,
69 DRM_SCHED_PRIORITY_KERNEL
,
71 DRM_SCHED_PRIORITY_COUNT
,
72 DRM_SCHED_PRIORITY_UNSET
= -2
75 /* Used to chose between FIFO and RR jobs scheduling */
76 extern int drm_sched_policy
;
78 #define DRM_SCHED_POLICY_RR 0
79 #define DRM_SCHED_POLICY_FIFO 1
82 * struct drm_sched_entity - A wrapper around a job queue (typically
83 * attached to the DRM file_priv).
85 * Entities will emit jobs in order to their corresponding hardware
86 * ring, and the scheduler will alternate between entities based on
89 struct drm_sched_entity
{
93 * Used to append this struct to the list of entities in the runqueue
94 * @rq under &drm_sched_rq.entities.
96 * Protected by &drm_sched_rq.lock of @rq.
98 struct list_head list
;
103 * Runqueue on which this entity is currently scheduled.
105 * FIXME: Locking is very unclear for this. Writers are protected by
106 * @rq_lock, but readers are generally lockless and seem to just race
107 * with not even a READ_ONCE.
109 struct drm_sched_rq
*rq
;
114 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
115 * be scheduled on any scheduler on this list.
117 * This can be modified by calling drm_sched_entity_modify_sched().
118 * Locking is entirely up to the driver, see the above function for more
121 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
124 * FIXME: This means priority changes through
125 * drm_sched_entity_set_priority() will be lost henceforth in this case.
127 struct drm_gpu_scheduler
**sched_list
;
132 * Number of drm_gpu_schedulers in the @sched_list.
134 unsigned int num_sched_list
;
139 * Priority of the entity. This can be modified by calling
140 * drm_sched_entity_set_priority(). Protected by &rq_lock.
142 enum drm_sched_priority priority
;
147 * Lock to modify the runqueue to which this entity belongs.
152 * @job_queue: the list of jobs of this entity.
154 struct spsc_queue job_queue
;
159 * A linearly increasing seqno incremented with each new
160 * &drm_sched_fence which is part of the entity.
162 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
163 * this doesn't need to be atomic.
170 * A unique context for all the fences which belong to this entity. The
171 * &drm_sched_fence.scheduled uses the fence_context but
172 * &drm_sched_fence.finished uses fence_context + 1.
174 uint64_t fence_context
;
179 * The dependency fence of the job which is on the top of the job queue.
181 struct dma_fence
*dependency
;
186 * Callback for the dependency fence above.
188 struct dma_fence_cb cb
;
193 * Points to entities' guilty.
200 * Points to the finished fence of the last scheduled job. Only written
201 * by the scheduler thread, can be accessed locklessly from
202 * drm_sched_job_arm() iff the queue is empty.
204 struct dma_fence __rcu
*last_scheduled
;
207 * @last_user: last group leader pushing a job into the entity.
209 struct task_struct
*last_user
;
214 * Marks the enity as removed from rq and destined for
215 * termination. This is set by calling drm_sched_entity_flush() and by
223 * Signals when entity is not in use, used to sequence entity cleanup in
224 * drm_sched_entity_fini().
226 struct completion entity_idle
;
229 * @oldest_job_waiting:
231 * Marks earliest job waiting in SW queue
233 ktime_t oldest_job_waiting
;
238 * The node used to insert this entity into time based priority queue
240 struct rb_node rb_tree_node
;
245 * struct drm_sched_rq - queue of entities to be scheduled.
247 * @lock: to modify the entities list.
248 * @sched: the scheduler to which this rq belongs to.
249 * @entities: list of the entities to be scheduled.
250 * @current_entity: the entity which is to be scheduled.
251 * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
253 * Run queue is a set of entities scheduling command submissions for
254 * one specific ring. It implements the scheduling policy that selects
255 * the next entity to emit commands from.
257 struct drm_sched_rq
{
259 struct drm_gpu_scheduler
*sched
;
260 struct list_head entities
;
261 struct drm_sched_entity
*current_entity
;
262 struct rb_root_cached rb_tree_root
;
266 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
268 struct drm_sched_fence
{
270 * @scheduled: this fence is what will be signaled by the scheduler
271 * when the job is scheduled.
273 struct dma_fence scheduled
;
276 * @finished: this fence is what will be signaled by the scheduler
277 * when the job is completed.
279 * When setting up an out fence for the job, you should use
280 * this, since it's available immediately upon
281 * drm_sched_job_init(), and the fence returned by the driver
282 * from run_job() won't be created until the dependencies have
285 struct dma_fence finished
;
288 * @deadline: deadline set on &drm_sched_fence.finished which
289 * potentially needs to be propagated to &drm_sched_fence.parent
294 * @parent: the fence returned by &drm_sched_backend_ops.run_job
295 * when scheduling the job on hardware. We signal the
296 * &drm_sched_fence.finished fence once parent is signalled.
298 struct dma_fence
*parent
;
300 * @sched: the scheduler instance to which the job having this struct
303 struct drm_gpu_scheduler
*sched
;
305 * @lock: the lock used by the scheduled and the finished fences.
309 * @owner: job owner for debugging
314 struct drm_sched_fence
*to_drm_sched_fence(struct dma_fence
*f
);
317 * struct drm_sched_job - A job to be run by an entity.
319 * @queue_node: used to append this struct to the queue of jobs in an entity.
320 * @list: a job participates in a "pending" and "done" lists.
321 * @sched: the scheduler instance on which this job is scheduled.
322 * @s_fence: contains the fences for the scheduling of job.
323 * @finish_cb: the callback for the finished fence.
324 * @work: Helper to reschdeule job kill to different context.
325 * @id: a unique id assigned to each job scheduled on the scheduler.
326 * @karma: increment on every hang caused by this job. If this exceeds the hang
327 * limit of the scheduler then the job is marked guilty and will not
328 * be scheduled further.
329 * @s_priority: the priority of the job.
330 * @entity: the entity to which this job belongs.
331 * @cb: the callback for the parent fence in s_fence.
333 * A job is created by the driver using drm_sched_job_init(), and
334 * should call drm_sched_entity_push_job() once it wants the scheduler
335 * to schedule the job.
337 struct drm_sched_job
{
338 struct spsc_node queue_node
;
339 struct list_head list
;
340 struct drm_gpu_scheduler
*sched
;
341 struct drm_sched_fence
*s_fence
;
344 * work is used only after finish_cb has been used and will not be
348 struct dma_fence_cb finish_cb
;
349 struct work_struct work
;
354 enum drm_sched_priority s_priority
;
355 struct drm_sched_entity
*entity
;
356 struct dma_fence_cb cb
;
360 * Contains the dependencies as struct dma_fence for this job, see
361 * drm_sched_job_add_dependency() and
362 * drm_sched_job_add_implicit_dependencies().
364 struct xarray dependencies
;
366 /** @last_dependency: tracks @dependencies as they signal */
367 unsigned long last_dependency
;
372 * When the job was pushed into the entity queue.
377 static inline bool drm_sched_invalidate_job(struct drm_sched_job
*s_job
,
380 return s_job
&& atomic_inc_return(&s_job
->karma
) > threshold
;
383 enum drm_gpu_sched_stat
{
384 DRM_GPU_SCHED_STAT_NONE
, /* Reserve 0 */
385 DRM_GPU_SCHED_STAT_NOMINAL
,
386 DRM_GPU_SCHED_STAT_ENODEV
,
390 * struct drm_sched_backend_ops - Define the backend operations
391 * called by the scheduler
393 * These functions should be implemented in the driver side.
395 struct drm_sched_backend_ops
{
399 * Called when the scheduler is considering scheduling this job next, to
400 * get another struct dma_fence for this job to block on. Once it
401 * returns NULL, run_job() may be called.
403 * Can be NULL if no additional preparation to the dependencies are
404 * necessary. Skipped when jobs are killed instead of run.
406 struct dma_fence
*(*prepare_job
)(struct drm_sched_job
*sched_job
,
407 struct drm_sched_entity
*s_entity
);
410 * @run_job: Called to execute the job once all of the dependencies
411 * have been resolved. This may be called multiple times, if
412 * timedout_job() has happened and drm_sched_job_recovery()
413 * decides to try it again.
415 struct dma_fence
*(*run_job
)(struct drm_sched_job
*sched_job
);
418 * @timedout_job: Called when a job has taken too long to execute,
419 * to trigger GPU recovery.
421 * This method is called in a workqueue context.
423 * Drivers typically issue a reset to recover from GPU hangs, and this
424 * procedure usually follows the following workflow:
426 * 1. Stop the scheduler using drm_sched_stop(). This will park the
427 * scheduler thread and cancel the timeout work, guaranteeing that
428 * nothing is queued while we reset the hardware queue
429 * 2. Try to gracefully stop non-faulty jobs (optional)
430 * 3. Issue a GPU reset (driver-specific)
431 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
432 * 5. Restart the scheduler using drm_sched_start(). At that point, new
433 * jobs can be queued, and the scheduler thread is unblocked
435 * Note that some GPUs have distinct hardware queues but need to reset
436 * the GPU globally, which requires extra synchronization between the
437 * timeout handler of the different &drm_gpu_scheduler. One way to
438 * achieve this synchronization is to create an ordered workqueue
439 * (using alloc_ordered_workqueue()) at the driver level, and pass this
440 * queue to drm_sched_init(), to guarantee that timeout handlers are
441 * executed sequentially. The above workflow needs to be slightly
442 * adjusted in that case:
444 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
445 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
446 * the reset (optional)
447 * 3. Issue a GPU reset on all faulty queues (driver-specific)
448 * 4. Re-submit jobs on all schedulers impacted by the reset using
449 * drm_sched_resubmit_jobs()
450 * 5. Restart all schedulers that were stopped in step #1 using
453 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
454 * and the underlying driver has started or completed recovery.
456 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
457 * available, i.e. has been unplugged.
459 enum drm_gpu_sched_stat (*timedout_job
)(struct drm_sched_job
*sched_job
);
462 * @free_job: Called once the job's finished fence has been signaled
463 * and it's time to clean it up.
465 void (*free_job
)(struct drm_sched_job
*sched_job
);
469 * struct drm_gpu_scheduler - scheduler instance-specific data
471 * @ops: backend operations provided by the driver.
472 * @hw_submission_limit: the max size of the hardware queue.
473 * @timeout: the time after which a job is removed from the scheduler.
474 * @name: name of the ring for which this scheduler is being used.
475 * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
476 * as there's usually one run-queue per priority, but could be less.
477 * @sched_rq: An allocated array of run-queues of size @num_rqs;
478 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
479 * is ready to be scheduled.
480 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
481 * waits on this wait queue until all the scheduled jobs are
483 * @hw_rq_count: the number of jobs currently in the hardware queue.
484 * @job_id_count: used to assign unique id to the each job.
485 * @timeout_wq: workqueue used to queue @work_tdr
486 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
487 * timeout interval is over.
488 * @thread: the kthread on which the scheduler which run.
489 * @pending_list: the list of jobs which are currently in the job queue.
490 * @job_list_lock: lock to protect the pending_list.
491 * @hang_limit: once the hangs by a job crosses this limit then it is marked
492 * guilty and it will no longer be considered for scheduling.
493 * @score: score to help loadbalancer pick a idle sched
494 * @_score: score used when the driver doesn't provide one
495 * @ready: marks if the underlying HW is ready to work
496 * @free_guilty: A hit to time out handler to free the guilty job.
497 * @dev: system &struct device
499 * One scheduler is implemented for each hardware ring.
501 struct drm_gpu_scheduler
{
502 const struct drm_sched_backend_ops
*ops
;
503 uint32_t hw_submission_limit
;
507 struct drm_sched_rq
**sched_rq
;
508 wait_queue_head_t wake_up_worker
;
509 wait_queue_head_t job_scheduled
;
510 atomic_t hw_rq_count
;
511 atomic64_t job_id_count
;
512 struct workqueue_struct
*timeout_wq
;
513 struct delayed_work work_tdr
;
514 struct task_struct
*thread
;
515 struct list_head pending_list
;
516 spinlock_t job_list_lock
;
525 int drm_sched_init(struct drm_gpu_scheduler
*sched
,
526 const struct drm_sched_backend_ops
*ops
,
527 u32 num_rqs
, uint32_t hw_submission
, unsigned int hang_limit
,
528 long timeout
, struct workqueue_struct
*timeout_wq
,
529 atomic_t
*score
, const char *name
, struct device
*dev
);
531 void drm_sched_fini(struct drm_gpu_scheduler
*sched
);
532 int drm_sched_job_init(struct drm_sched_job
*job
,
533 struct drm_sched_entity
*entity
,
535 void drm_sched_job_arm(struct drm_sched_job
*job
);
536 int drm_sched_job_add_dependency(struct drm_sched_job
*job
,
537 struct dma_fence
*fence
);
538 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job
*job
,
539 struct drm_file
*file
,
542 int drm_sched_job_add_resv_dependencies(struct drm_sched_job
*job
,
543 struct dma_resv
*resv
,
544 enum dma_resv_usage usage
);
545 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job
*job
,
546 struct drm_gem_object
*obj
,
550 void drm_sched_entity_modify_sched(struct drm_sched_entity
*entity
,
551 struct drm_gpu_scheduler
**sched_list
,
552 unsigned int num_sched_list
);
554 void drm_sched_job_cleanup(struct drm_sched_job
*job
);
555 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler
*sched
);
556 void drm_sched_stop(struct drm_gpu_scheduler
*sched
, struct drm_sched_job
*bad
);
557 void drm_sched_start(struct drm_gpu_scheduler
*sched
, bool full_recovery
);
558 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler
*sched
);
559 void drm_sched_increase_karma(struct drm_sched_job
*bad
);
560 void drm_sched_reset_karma(struct drm_sched_job
*bad
);
561 void drm_sched_increase_karma_ext(struct drm_sched_job
*bad
, int type
);
562 bool drm_sched_dependency_optimized(struct dma_fence
* fence
,
563 struct drm_sched_entity
*entity
);
564 void drm_sched_fault(struct drm_gpu_scheduler
*sched
);
566 void drm_sched_rq_add_entity(struct drm_sched_rq
*rq
,
567 struct drm_sched_entity
*entity
);
568 void drm_sched_rq_remove_entity(struct drm_sched_rq
*rq
,
569 struct drm_sched_entity
*entity
);
571 void drm_sched_rq_update_fifo(struct drm_sched_entity
*entity
, ktime_t ts
);
573 int drm_sched_entity_init(struct drm_sched_entity
*entity
,
574 enum drm_sched_priority priority
,
575 struct drm_gpu_scheduler
**sched_list
,
576 unsigned int num_sched_list
,
578 long drm_sched_entity_flush(struct drm_sched_entity
*entity
, long timeout
);
579 void drm_sched_entity_fini(struct drm_sched_entity
*entity
);
580 void drm_sched_entity_destroy(struct drm_sched_entity
*entity
);
581 void drm_sched_entity_select_rq(struct drm_sched_entity
*entity
);
582 struct drm_sched_job
*drm_sched_entity_pop_job(struct drm_sched_entity
*entity
);
583 void drm_sched_entity_push_job(struct drm_sched_job
*sched_job
);
584 void drm_sched_entity_set_priority(struct drm_sched_entity
*entity
,
585 enum drm_sched_priority priority
);
586 bool drm_sched_entity_is_ready(struct drm_sched_entity
*entity
);
587 int drm_sched_entity_error(struct drm_sched_entity
*entity
);
589 struct drm_sched_fence
*drm_sched_fence_alloc(
590 struct drm_sched_entity
*s_entity
, void *owner
);
591 void drm_sched_fence_init(struct drm_sched_fence
*fence
,
592 struct drm_sched_entity
*entity
);
593 void drm_sched_fence_free(struct drm_sched_fence
*fence
);
595 void drm_sched_fence_scheduled(struct drm_sched_fence
*fence
,
596 struct dma_fence
*parent
);
597 void drm_sched_fence_finished(struct drm_sched_fence
*fence
, int result
);
599 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler
*sched
);
600 void drm_sched_resume_timeout(struct drm_gpu_scheduler
*sched
,
601 unsigned long remaining
);
602 struct drm_gpu_scheduler
*
603 drm_sched_pick_best(struct drm_gpu_scheduler
**sched_list
,
604 unsigned int num_sched_list
);