]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/i915/i915_scheduler.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / gpu / drm / i915 / i915_scheduler.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7 #include <linux/mutex.h>
8
9 #include "i915_drv.h"
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
13
14 static struct i915_global_scheduler {
15 struct i915_global base;
16 struct kmem_cache *slab_dependencies;
17 struct kmem_cache *slab_priorities;
18 } global;
19
20 static DEFINE_SPINLOCK(schedule_lock);
21
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
24 {
25 return container_of(node, const struct i915_request, sched);
26 }
27
28 static inline bool node_started(const struct i915_sched_node *node)
29 {
30 return i915_request_started(node_to_request(node));
31 }
32
33 static inline bool node_signaled(const struct i915_sched_node *node)
34 {
35 return i915_request_completed(node_to_request(node));
36 }
37
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
39 {
40 return rb_entry(rb, struct i915_priolist, node);
41 }
42
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
44 {
45 struct rb_node *rb;
46 long last_prio, i;
47
48 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
49 return;
50
51 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52 rb_first(&execlists->queue.rb_root));
53
54 last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56 const struct i915_priolist *p = to_priolist(rb);
57
58 GEM_BUG_ON(p->priority >= last_prio);
59 last_prio = p->priority;
60
61 GEM_BUG_ON(!p->used);
62 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
63 if (list_empty(&p->requests[i]))
64 continue;
65
66 GEM_BUG_ON(!(p->used & BIT(i)));
67 }
68 }
69 }
70
71 struct list_head *
72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
73 {
74 struct intel_engine_execlists * const execlists = &engine->execlists;
75 struct i915_priolist *p;
76 struct rb_node **parent, *rb;
77 bool first = true;
78 int idx, i;
79
80 lockdep_assert_held(&engine->active.lock);
81 assert_priolists(execlists);
82
83 /* buckets sorted from highest [in slot 0] to lowest priority */
84 idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
85 prio >>= I915_USER_PRIORITY_SHIFT;
86 if (unlikely(execlists->no_priolist))
87 prio = I915_PRIORITY_NORMAL;
88
89 find_priolist:
90 /* most positive priority is scheduled first, equal priorities fifo */
91 rb = NULL;
92 parent = &execlists->queue.rb_root.rb_node;
93 while (*parent) {
94 rb = *parent;
95 p = to_priolist(rb);
96 if (prio > p->priority) {
97 parent = &rb->rb_left;
98 } else if (prio < p->priority) {
99 parent = &rb->rb_right;
100 first = false;
101 } else {
102 goto out;
103 }
104 }
105
106 if (prio == I915_PRIORITY_NORMAL) {
107 p = &execlists->default_priolist;
108 } else {
109 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
110 /* Convert an allocation failure to a priority bump */
111 if (unlikely(!p)) {
112 prio = I915_PRIORITY_NORMAL; /* recurses just once */
113
114 /* To maintain ordering with all rendering, after an
115 * allocation failure we have to disable all scheduling.
116 * Requests will then be executed in fifo, and schedule
117 * will ensure that dependencies are emitted in fifo.
118 * There will be still some reordering with existing
119 * requests, so if userspace lied about their
120 * dependencies that reordering may be visible.
121 */
122 execlists->no_priolist = true;
123 goto find_priolist;
124 }
125 }
126
127 p->priority = prio;
128 for (i = 0; i < ARRAY_SIZE(p->requests); i++)
129 INIT_LIST_HEAD(&p->requests[i]);
130 rb_link_node(&p->node, rb, parent);
131 rb_insert_color_cached(&p->node, &execlists->queue, first);
132 p->used = 0;
133
134 out:
135 p->used |= BIT(idx);
136 return &p->requests[idx];
137 }
138
139 void __i915_priolist_free(struct i915_priolist *p)
140 {
141 kmem_cache_free(global.slab_priorities, p);
142 }
143
144 struct sched_cache {
145 struct list_head *priolist;
146 };
147
148 static struct intel_engine_cs *
149 sched_lock_engine(const struct i915_sched_node *node,
150 struct intel_engine_cs *locked,
151 struct sched_cache *cache)
152 {
153 const struct i915_request *rq = node_to_request(node);
154 struct intel_engine_cs *engine;
155
156 GEM_BUG_ON(!locked);
157
158 /*
159 * Virtual engines complicate acquiring the engine timeline lock,
160 * as their rq->engine pointer is not stable until under that
161 * engine lock. The simple ploy we use is to take the lock then
162 * check that the rq still belongs to the newly locked engine.
163 */
164 while (locked != (engine = READ_ONCE(rq->engine))) {
165 spin_unlock(&locked->active.lock);
166 memset(cache, 0, sizeof(*cache));
167 spin_lock(&engine->active.lock);
168 locked = engine;
169 }
170
171 GEM_BUG_ON(locked != engine);
172 return locked;
173 }
174
175 static inline int rq_prio(const struct i915_request *rq)
176 {
177 return rq->sched.attr.priority | __NO_PREEMPTION;
178 }
179
180 static inline bool need_preempt(int prio, int active)
181 {
182 /*
183 * Allow preemption of low -> normal -> high, but we do
184 * not allow low priority tasks to preempt other low priority
185 * tasks under the impression that latency for low priority
186 * tasks does not matter (as much as background throughput),
187 * so kiss.
188 */
189 return prio >= max(I915_PRIORITY_NORMAL, active);
190 }
191
192 static void kick_submission(struct intel_engine_cs *engine,
193 const struct i915_request *rq,
194 int prio)
195 {
196 const struct i915_request *inflight;
197
198 /*
199 * We only need to kick the tasklet once for the high priority
200 * new context we add into the queue.
201 */
202 if (prio <= engine->execlists.queue_priority_hint)
203 return;
204
205 rcu_read_lock();
206
207 /* Nothing currently active? We're overdue for a submission! */
208 inflight = execlists_active(&engine->execlists);
209 if (!inflight)
210 goto unlock;
211
212 engine->execlists.queue_priority_hint = prio;
213
214 /*
215 * If we are already the currently executing context, don't
216 * bother evaluating if we should preempt ourselves.
217 */
218 if (inflight->context == rq->context)
219 goto unlock;
220
221 if (need_preempt(prio, rq_prio(inflight)))
222 tasklet_hi_schedule(&engine->execlists.tasklet);
223
224 unlock:
225 rcu_read_unlock();
226 }
227
228 static void __i915_schedule(struct i915_sched_node *node,
229 const struct i915_sched_attr *attr)
230 {
231 const int prio = max(attr->priority, node->attr.priority);
232 struct intel_engine_cs *engine;
233 struct i915_dependency *dep, *p;
234 struct i915_dependency stack;
235 struct sched_cache cache;
236 LIST_HEAD(dfs);
237
238 /* Needed in order to use the temporary link inside i915_dependency */
239 lockdep_assert_held(&schedule_lock);
240 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
241
242 if (node_signaled(node))
243 return;
244
245 stack.signaler = node;
246 list_add(&stack.dfs_link, &dfs);
247
248 /*
249 * Recursively bump all dependent priorities to match the new request.
250 *
251 * A naive approach would be to use recursion:
252 * static void update_priorities(struct i915_sched_node *node, prio) {
253 * list_for_each_entry(dep, &node->signalers_list, signal_link)
254 * update_priorities(dep->signal, prio)
255 * queue_request(node);
256 * }
257 * but that may have unlimited recursion depth and so runs a very
258 * real risk of overunning the kernel stack. Instead, we build
259 * a flat list of all dependencies starting with the current request.
260 * As we walk the list of dependencies, we add all of its dependencies
261 * to the end of the list (this may include an already visited
262 * request) and continue to walk onwards onto the new dependencies. The
263 * end result is a topological list of requests in reverse order, the
264 * last element in the list is the request we must execute first.
265 */
266 list_for_each_entry(dep, &dfs, dfs_link) {
267 struct i915_sched_node *node = dep->signaler;
268
269 /* If we are already flying, we know we have no signalers */
270 if (node_started(node))
271 continue;
272
273 /*
274 * Within an engine, there can be no cycle, but we may
275 * refer to the same dependency chain multiple times
276 * (redundant dependencies are not eliminated) and across
277 * engines.
278 */
279 list_for_each_entry(p, &node->signalers_list, signal_link) {
280 GEM_BUG_ON(p == dep); /* no cycles! */
281
282 if (node_signaled(p->signaler))
283 continue;
284
285 if (prio > READ_ONCE(p->signaler->attr.priority))
286 list_move_tail(&p->dfs_link, &dfs);
287 }
288 }
289
290 /*
291 * If we didn't need to bump any existing priorities, and we haven't
292 * yet submitted this request (i.e. there is no potential race with
293 * execlists_submit_request()), we can set our own priority and skip
294 * acquiring the engine locks.
295 */
296 if (node->attr.priority == I915_PRIORITY_INVALID) {
297 GEM_BUG_ON(!list_empty(&node->link));
298 node->attr = *attr;
299
300 if (stack.dfs_link.next == stack.dfs_link.prev)
301 return;
302
303 __list_del_entry(&stack.dfs_link);
304 }
305
306 memset(&cache, 0, sizeof(cache));
307 engine = node_to_request(node)->engine;
308 spin_lock(&engine->active.lock);
309
310 /* Fifo and depth-first replacement ensure our deps execute before us */
311 engine = sched_lock_engine(node, engine, &cache);
312 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
313 INIT_LIST_HEAD(&dep->dfs_link);
314
315 node = dep->signaler;
316 engine = sched_lock_engine(node, engine, &cache);
317 lockdep_assert_held(&engine->active.lock);
318
319 /* Recheck after acquiring the engine->timeline.lock */
320 if (prio <= node->attr.priority || node_signaled(node))
321 continue;
322
323 GEM_BUG_ON(node_to_request(node)->engine != engine);
324
325 WRITE_ONCE(node->attr.priority, prio);
326
327 /*
328 * Once the request is ready, it will be placed into the
329 * priority lists and then onto the HW runlist. Before the
330 * request is ready, it does not contribute to our preemption
331 * decisions and we can safely ignore it, as it will, and
332 * any preemption required, be dealt with upon submission.
333 * See engine->submit_request()
334 */
335 if (list_empty(&node->link))
336 continue;
337
338 if (i915_request_in_priority_queue(node_to_request(node))) {
339 if (!cache.priolist)
340 cache.priolist =
341 i915_sched_lookup_priolist(engine,
342 prio);
343 list_move_tail(&node->link, cache.priolist);
344 }
345
346 /* Defer (tasklet) submission until after all of our updates. */
347 kick_submission(engine, node_to_request(node), prio);
348 }
349
350 spin_unlock(&engine->active.lock);
351 }
352
353 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
354 {
355 spin_lock_irq(&schedule_lock);
356 __i915_schedule(&rq->sched, attr);
357 spin_unlock_irq(&schedule_lock);
358 }
359
360 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
361 {
362 struct i915_sched_attr attr = node->attr;
363
364 if (attr.priority & bump)
365 return;
366
367 attr.priority |= bump;
368 __i915_schedule(node, &attr);
369 }
370
371 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
372 {
373 unsigned long flags;
374
375 GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
376 if (READ_ONCE(rq->sched.attr.priority) & bump)
377 return;
378
379 spin_lock_irqsave(&schedule_lock, flags);
380 __bump_priority(&rq->sched, bump);
381 spin_unlock_irqrestore(&schedule_lock, flags);
382 }
383
384 void i915_sched_node_init(struct i915_sched_node *node)
385 {
386 INIT_LIST_HEAD(&node->signalers_list);
387 INIT_LIST_HEAD(&node->waiters_list);
388 INIT_LIST_HEAD(&node->link);
389
390 i915_sched_node_reinit(node);
391 }
392
393 void i915_sched_node_reinit(struct i915_sched_node *node)
394 {
395 node->attr.priority = I915_PRIORITY_INVALID;
396 node->semaphores = 0;
397 node->flags = 0;
398
399 GEM_BUG_ON(!list_empty(&node->signalers_list));
400 GEM_BUG_ON(!list_empty(&node->waiters_list));
401 GEM_BUG_ON(!list_empty(&node->link));
402 }
403
404 static struct i915_dependency *
405 i915_dependency_alloc(void)
406 {
407 return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
408 }
409
410 static void
411 i915_dependency_free(struct i915_dependency *dep)
412 {
413 kmem_cache_free(global.slab_dependencies, dep);
414 }
415
416 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
417 struct i915_sched_node *signal,
418 struct i915_dependency *dep,
419 unsigned long flags)
420 {
421 bool ret = false;
422
423 spin_lock_irq(&schedule_lock);
424
425 if (!node_signaled(signal)) {
426 INIT_LIST_HEAD(&dep->dfs_link);
427 dep->signaler = signal;
428 dep->waiter = node;
429 dep->flags = flags;
430
431 /* Keep track of whether anyone on this chain has a semaphore */
432 if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
433 !node_started(signal))
434 node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
435
436 /* All set, now publish. Beware the lockless walkers. */
437 list_add_rcu(&dep->signal_link, &node->signalers_list);
438 list_add_rcu(&dep->wait_link, &signal->waiters_list);
439
440 /*
441 * As we do not allow WAIT to preempt inflight requests,
442 * once we have executed a request, along with triggering
443 * any execution callbacks, we must preserve its ordering
444 * within the non-preemptible FIFO.
445 */
446 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
447 if (flags & I915_DEPENDENCY_EXTERNAL)
448 __bump_priority(signal, __NO_PREEMPTION);
449
450 ret = true;
451 }
452
453 spin_unlock_irq(&schedule_lock);
454
455 return ret;
456 }
457
458 int i915_sched_node_add_dependency(struct i915_sched_node *node,
459 struct i915_sched_node *signal,
460 unsigned long flags)
461 {
462 struct i915_dependency *dep;
463
464 dep = i915_dependency_alloc();
465 if (!dep)
466 return -ENOMEM;
467
468 if (!__i915_sched_node_add_dependency(node, signal, dep,
469 flags | I915_DEPENDENCY_ALLOC))
470 i915_dependency_free(dep);
471
472 return 0;
473 }
474
475 void i915_sched_node_fini(struct i915_sched_node *node)
476 {
477 struct i915_dependency *dep, *tmp;
478
479 spin_lock_irq(&schedule_lock);
480
481 /*
482 * Everyone we depended upon (the fences we wait to be signaled)
483 * should retire before us and remove themselves from our list.
484 * However, retirement is run independently on each timeline and
485 * so we may be called out-of-order.
486 */
487 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
488 GEM_BUG_ON(!list_empty(&dep->dfs_link));
489
490 list_del_rcu(&dep->wait_link);
491 if (dep->flags & I915_DEPENDENCY_ALLOC)
492 i915_dependency_free(dep);
493 }
494 INIT_LIST_HEAD(&node->signalers_list);
495
496 /* Remove ourselves from everyone who depends upon us */
497 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
498 GEM_BUG_ON(dep->signaler != node);
499 GEM_BUG_ON(!list_empty(&dep->dfs_link));
500
501 list_del_rcu(&dep->signal_link);
502 if (dep->flags & I915_DEPENDENCY_ALLOC)
503 i915_dependency_free(dep);
504 }
505 INIT_LIST_HEAD(&node->waiters_list);
506
507 spin_unlock_irq(&schedule_lock);
508 }
509
510 static void i915_global_scheduler_shrink(void)
511 {
512 kmem_cache_shrink(global.slab_dependencies);
513 kmem_cache_shrink(global.slab_priorities);
514 }
515
516 static void i915_global_scheduler_exit(void)
517 {
518 kmem_cache_destroy(global.slab_dependencies);
519 kmem_cache_destroy(global.slab_priorities);
520 }
521
522 static struct i915_global_scheduler global = { {
523 .shrink = i915_global_scheduler_shrink,
524 .exit = i915_global_scheduler_exit,
525 } };
526
527 int __init i915_global_scheduler_init(void)
528 {
529 global.slab_dependencies = KMEM_CACHE(i915_dependency,
530 SLAB_HWCACHE_ALIGN |
531 SLAB_TYPESAFE_BY_RCU);
532 if (!global.slab_dependencies)
533 return -ENOMEM;
534
535 global.slab_priorities = KMEM_CACHE(i915_priolist,
536 SLAB_HWCACHE_ALIGN);
537 if (!global.slab_priorities)
538 goto err_priorities;
539
540 i915_global_register(&global.base);
541 return 0;
542
543 err_priorities:
544 kmem_cache_destroy(global.slab_priorities);
545 return -ENOMEM;
546 }