]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/i915/i915_scheduler_types.h
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / gpu / drm / i915 / i915_scheduler_types.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7 #ifndef _I915_SCHEDULER_TYPES_H_
8 #define _I915_SCHEDULER_TYPES_H_
9
10 #include <linux/list.h>
11
12 #include "gt/intel_engine_types.h"
13 #include "i915_priolist_types.h"
14
15 struct drm_i915_private;
16 struct i915_request;
17 struct intel_engine_cs;
18
19 struct i915_sched_attr {
20 /**
21 * @priority: execution and service priority
22 *
23 * All clients are equal, but some are more equal than others!
24 *
25 * Requests from a context with a greater (more positive) value of
26 * @priority will be executed before those with a lower @priority
27 * value, forming a simple QoS.
28 *
29 * The &drm_i915_private.kernel_context is assigned the lowest priority.
30 */
31 int priority;
32 };
33
34 /*
35 * "People assume that time is a strict progression of cause to effect, but
36 * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
37 * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
38 *
39 * Requests exist in a complex web of interdependencies. Each request
40 * has to wait for some other request to complete before it is ready to be run
41 * (e.g. we have to wait until the pixels have been rendering into a texture
42 * before we can copy from it). We track the readiness of a request in terms
43 * of fences, but we also need to keep the dependency tree for the lifetime
44 * of the request (beyond the life of an individual fence). We use the tree
45 * at various points to reorder the requests whilst keeping the requests
46 * in order with respect to their various dependencies.
47 *
48 * There is no active component to the "scheduler". As we know the dependency
49 * DAG of each request, we are able to insert it into a sorted queue when it
50 * is ready, and are able to reorder its portion of the graph to accommodate
51 * dynamic priority changes.
52 *
53 * Ok, there is now one active element to the "scheduler" in the backends.
54 * We let a new context run for a small amount of time before re-evaluating
55 * the run order. As we re-evaluate, we maintain the strict ordering of
56 * dependencies, but attempt to rotate the active contexts (the current context
57 * is put to the back of its priority queue, then reshuffling its dependents).
58 * This provides minimal timeslicing and prevents a userspace hog (e.g.
59 * something waiting on a user semaphore [VkEvent]) from denying service to
60 * others.
61 */
62 struct i915_sched_node {
63 struct list_head signalers_list; /* those before us, we depend upon */
64 struct list_head waiters_list; /* those after us, they depend upon us */
65 struct list_head link;
66 struct i915_sched_attr attr;
67 unsigned int flags;
68 #define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
69 intel_engine_mask_t semaphores;
70 };
71
72 struct i915_dependency {
73 struct i915_sched_node *signaler;
74 struct i915_sched_node *waiter;
75 struct list_head signal_link;
76 struct list_head wait_link;
77 struct list_head dfs_link;
78 unsigned long flags;
79 #define I915_DEPENDENCY_ALLOC BIT(0)
80 #define I915_DEPENDENCY_EXTERNAL BIT(1)
81 };
82
83 #endif /* _I915_SCHEDULER_TYPES_H_ */