]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/i915/selftests/intel_lrc.c
Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / selftests / intel_lrc.c
CommitLineData
2c66555e
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
a21f453c
CW
7#include <linux/prime_numbers.h>
8
9f58892e
CW
9#include "../i915_reset.h"
10
2c66555e 11#include "../i915_selftest.h"
98dc0454 12#include "igt_flush_test.h"
8d2f6e2f 13#include "igt_spinner.h"
dee4a0f8 14#include "i915_random.h"
2c66555e
CW
15
16#include "mock_context.h"
17
2c66555e
CW
18static int live_sanitycheck(void *arg)
19{
20 struct drm_i915_private *i915 = arg;
21 struct intel_engine_cs *engine;
22 struct i915_gem_context *ctx;
23 enum intel_engine_id id;
8d2f6e2f 24 struct igt_spinner spin;
c9d08cc3 25 intel_wakeref_t wakeref;
2c66555e
CW
26 int err = -ENOMEM;
27
28 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
29 return 0;
30
31 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 32 wakeref = intel_runtime_pm_get(i915);
2c66555e 33
8d2f6e2f 34 if (igt_spinner_init(&spin, i915))
2c66555e
CW
35 goto err_unlock;
36
37 ctx = kernel_context(i915);
38 if (!ctx)
39 goto err_spin;
40
41 for_each_engine(engine, i915, id) {
42 struct i915_request *rq;
43
8d2f6e2f 44 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
2c66555e
CW
45 if (IS_ERR(rq)) {
46 err = PTR_ERR(rq);
47 goto err_ctx;
48 }
49
50 i915_request_add(rq);
8d2f6e2f 51 if (!igt_wait_for_spinner(&spin, rq)) {
2c66555e
CW
52 GEM_TRACE("spinner failed to start\n");
53 GEM_TRACE_DUMP();
54 i915_gem_set_wedged(i915);
55 err = -EIO;
56 goto err_ctx;
57 }
58
8d2f6e2f 59 igt_spinner_end(&spin);
98dc0454 60 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
2c66555e
CW
61 err = -EIO;
62 goto err_ctx;
63 }
64 }
65
66 err = 0;
67err_ctx:
68 kernel_context_close(ctx);
69err_spin:
8d2f6e2f 70 igt_spinner_fini(&spin);
2c66555e 71err_unlock:
98dc0454 72 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 73 intel_runtime_pm_put(i915, wakeref);
2c66555e
CW
74 mutex_unlock(&i915->drm.struct_mutex);
75 return err;
76}
77
78static int live_preempt(void *arg)
79{
80 struct drm_i915_private *i915 = arg;
81 struct i915_gem_context *ctx_hi, *ctx_lo;
8d2f6e2f 82 struct igt_spinner spin_hi, spin_lo;
2c66555e
CW
83 struct intel_engine_cs *engine;
84 enum intel_engine_id id;
c9d08cc3 85 intel_wakeref_t wakeref;
2c66555e
CW
86 int err = -ENOMEM;
87
88 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
89 return 0;
90
91 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 92 wakeref = intel_runtime_pm_get(i915);
2c66555e 93
8d2f6e2f 94 if (igt_spinner_init(&spin_hi, i915))
2c66555e
CW
95 goto err_unlock;
96
8d2f6e2f 97 if (igt_spinner_init(&spin_lo, i915))
2c66555e
CW
98 goto err_spin_hi;
99
100 ctx_hi = kernel_context(i915);
101 if (!ctx_hi)
102 goto err_spin_lo;
7651a445
CW
103 ctx_hi->sched.priority =
104 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
2c66555e
CW
105
106 ctx_lo = kernel_context(i915);
107 if (!ctx_lo)
108 goto err_ctx_hi;
7651a445
CW
109 ctx_lo->sched.priority =
110 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
2c66555e
CW
111
112 for_each_engine(engine, i915, id) {
113 struct i915_request *rq;
114
8d2f6e2f
TU
115 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
116 MI_ARB_CHECK);
2c66555e
CW
117 if (IS_ERR(rq)) {
118 err = PTR_ERR(rq);
119 goto err_ctx_lo;
120 }
121
122 i915_request_add(rq);
8d2f6e2f 123 if (!igt_wait_for_spinner(&spin_lo, rq)) {
2c66555e
CW
124 GEM_TRACE("lo spinner failed to start\n");
125 GEM_TRACE_DUMP();
126 i915_gem_set_wedged(i915);
127 err = -EIO;
128 goto err_ctx_lo;
129 }
130
8d2f6e2f
TU
131 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
132 MI_ARB_CHECK);
2c66555e 133 if (IS_ERR(rq)) {
8d2f6e2f 134 igt_spinner_end(&spin_lo);
2c66555e
CW
135 err = PTR_ERR(rq);
136 goto err_ctx_lo;
137 }
138
139 i915_request_add(rq);
8d2f6e2f 140 if (!igt_wait_for_spinner(&spin_hi, rq)) {
2c66555e
CW
141 GEM_TRACE("hi spinner failed to start\n");
142 GEM_TRACE_DUMP();
143 i915_gem_set_wedged(i915);
144 err = -EIO;
145 goto err_ctx_lo;
146 }
147
8d2f6e2f
TU
148 igt_spinner_end(&spin_hi);
149 igt_spinner_end(&spin_lo);
98dc0454 150 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
2c66555e
CW
151 err = -EIO;
152 goto err_ctx_lo;
153 }
154 }
155
156 err = 0;
157err_ctx_lo:
158 kernel_context_close(ctx_lo);
159err_ctx_hi:
160 kernel_context_close(ctx_hi);
161err_spin_lo:
8d2f6e2f 162 igt_spinner_fini(&spin_lo);
2c66555e 163err_spin_hi:
8d2f6e2f 164 igt_spinner_fini(&spin_hi);
2c66555e 165err_unlock:
98dc0454 166 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 167 intel_runtime_pm_put(i915, wakeref);
2c66555e
CW
168 mutex_unlock(&i915->drm.struct_mutex);
169 return err;
170}
171
172static int live_late_preempt(void *arg)
173{
174 struct drm_i915_private *i915 = arg;
175 struct i915_gem_context *ctx_hi, *ctx_lo;
8d2f6e2f 176 struct igt_spinner spin_hi, spin_lo;
2c66555e 177 struct intel_engine_cs *engine;
b7268c5e 178 struct i915_sched_attr attr = {};
2c66555e 179 enum intel_engine_id id;
c9d08cc3 180 intel_wakeref_t wakeref;
2c66555e
CW
181 int err = -ENOMEM;
182
183 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
184 return 0;
185
186 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 187 wakeref = intel_runtime_pm_get(i915);
2c66555e 188
8d2f6e2f 189 if (igt_spinner_init(&spin_hi, i915))
2c66555e
CW
190 goto err_unlock;
191
8d2f6e2f 192 if (igt_spinner_init(&spin_lo, i915))
2c66555e
CW
193 goto err_spin_hi;
194
195 ctx_hi = kernel_context(i915);
196 if (!ctx_hi)
197 goto err_spin_lo;
198
199 ctx_lo = kernel_context(i915);
200 if (!ctx_lo)
201 goto err_ctx_hi;
202
203 for_each_engine(engine, i915, id) {
204 struct i915_request *rq;
205
8d2f6e2f
TU
206 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
207 MI_ARB_CHECK);
2c66555e
CW
208 if (IS_ERR(rq)) {
209 err = PTR_ERR(rq);
210 goto err_ctx_lo;
211 }
212
213 i915_request_add(rq);
8d2f6e2f 214 if (!igt_wait_for_spinner(&spin_lo, rq)) {
2c66555e
CW
215 pr_err("First context failed to start\n");
216 goto err_wedged;
217 }
218
8d2f6e2f
TU
219 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
220 MI_NOOP);
2c66555e 221 if (IS_ERR(rq)) {
8d2f6e2f 222 igt_spinner_end(&spin_lo);
2c66555e
CW
223 err = PTR_ERR(rq);
224 goto err_ctx_lo;
225 }
226
227 i915_request_add(rq);
8d2f6e2f 228 if (igt_wait_for_spinner(&spin_hi, rq)) {
2c66555e
CW
229 pr_err("Second context overtook first?\n");
230 goto err_wedged;
231 }
232
7651a445 233 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
b7268c5e 234 engine->schedule(rq, &attr);
2c66555e 235
8d2f6e2f 236 if (!igt_wait_for_spinner(&spin_hi, rq)) {
2c66555e
CW
237 pr_err("High priority context failed to preempt the low priority context\n");
238 GEM_TRACE_DUMP();
239 goto err_wedged;
240 }
241
8d2f6e2f
TU
242 igt_spinner_end(&spin_hi);
243 igt_spinner_end(&spin_lo);
98dc0454 244 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
2c66555e
CW
245 err = -EIO;
246 goto err_ctx_lo;
247 }
248 }
249
250 err = 0;
251err_ctx_lo:
252 kernel_context_close(ctx_lo);
253err_ctx_hi:
254 kernel_context_close(ctx_hi);
255err_spin_lo:
8d2f6e2f 256 igt_spinner_fini(&spin_lo);
2c66555e 257err_spin_hi:
8d2f6e2f 258 igt_spinner_fini(&spin_hi);
2c66555e 259err_unlock:
98dc0454 260 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 261 intel_runtime_pm_put(i915, wakeref);
2c66555e
CW
262 mutex_unlock(&i915->drm.struct_mutex);
263 return err;
264
265err_wedged:
8d2f6e2f
TU
266 igt_spinner_end(&spin_hi);
267 igt_spinner_end(&spin_lo);
2c66555e
CW
268 i915_gem_set_wedged(i915);
269 err = -EIO;
270 goto err_ctx_lo;
271}
272
c9a64622
CW
273struct preempt_client {
274 struct igt_spinner spin;
275 struct i915_gem_context *ctx;
276};
277
278static int preempt_client_init(struct drm_i915_private *i915,
279 struct preempt_client *c)
280{
281 c->ctx = kernel_context(i915);
282 if (!c->ctx)
283 return -ENOMEM;
284
285 if (igt_spinner_init(&c->spin, i915))
286 goto err_ctx;
287
288 return 0;
289
290err_ctx:
291 kernel_context_close(c->ctx);
292 return -ENOMEM;
293}
294
295static void preempt_client_fini(struct preempt_client *c)
296{
297 igt_spinner_fini(&c->spin);
298 kernel_context_close(c->ctx);
299}
300
301static int live_suppress_self_preempt(void *arg)
302{
303 struct drm_i915_private *i915 = arg;
304 struct intel_engine_cs *engine;
305 struct i915_sched_attr attr = {
306 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
307 };
308 struct preempt_client a, b;
309 enum intel_engine_id id;
310 intel_wakeref_t wakeref;
311 int err = -ENOMEM;
312
313 /*
314 * Verify that if a preemption request does not cause a change in
315 * the current execution order, the preempt-to-idle injection is
316 * skipped and that we do not accidentally apply it after the CS
317 * completion event.
318 */
319
320 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
321 return 0;
322
323 if (USES_GUC_SUBMISSION(i915))
324 return 0; /* presume black blox */
325
326 mutex_lock(&i915->drm.struct_mutex);
327 wakeref = intel_runtime_pm_get(i915);
328
329 if (preempt_client_init(i915, &a))
330 goto err_unlock;
331 if (preempt_client_init(i915, &b))
332 goto err_client_a;
333
334 for_each_engine(engine, i915, id) {
335 struct i915_request *rq_a, *rq_b;
336 int depth;
337
338 engine->execlists.preempt_hang.count = 0;
339
340 rq_a = igt_spinner_create_request(&a.spin,
341 a.ctx, engine,
342 MI_NOOP);
343 if (IS_ERR(rq_a)) {
344 err = PTR_ERR(rq_a);
345 goto err_client_b;
346 }
347
348 i915_request_add(rq_a);
349 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
350 pr_err("First client failed to start\n");
351 goto err_wedged;
352 }
353
354 for (depth = 0; depth < 8; depth++) {
355 rq_b = igt_spinner_create_request(&b.spin,
356 b.ctx, engine,
357 MI_NOOP);
358 if (IS_ERR(rq_b)) {
359 err = PTR_ERR(rq_b);
360 goto err_client_b;
361 }
362 i915_request_add(rq_b);
363
364 GEM_BUG_ON(i915_request_completed(rq_a));
365 engine->schedule(rq_a, &attr);
366 igt_spinner_end(&a.spin);
367
368 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
369 pr_err("Second client failed to start\n");
370 goto err_wedged;
371 }
372
373 swap(a, b);
374 rq_a = rq_b;
375 }
376 igt_spinner_end(&a.spin);
377
378 if (engine->execlists.preempt_hang.count) {
379 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
380 engine->execlists.preempt_hang.count,
381 depth);
382 err = -EINVAL;
383 goto err_client_b;
384 }
385
386 if (igt_flush_test(i915, I915_WAIT_LOCKED))
387 goto err_wedged;
388 }
389
390 err = 0;
391err_client_b:
392 preempt_client_fini(&b);
393err_client_a:
394 preempt_client_fini(&a);
395err_unlock:
396 if (igt_flush_test(i915, I915_WAIT_LOCKED))
397 err = -EIO;
398 intel_runtime_pm_put(i915, wakeref);
399 mutex_unlock(&i915->drm.struct_mutex);
400 return err;
401
402err_wedged:
403 igt_spinner_end(&b.spin);
404 igt_spinner_end(&a.spin);
405 i915_gem_set_wedged(i915);
406 err = -EIO;
407 goto err_client_b;
408}
409
a21f453c
CW
410static int live_chain_preempt(void *arg)
411{
412 struct drm_i915_private *i915 = arg;
413 struct intel_engine_cs *engine;
414 struct preempt_client hi, lo;
415 enum intel_engine_id id;
416 intel_wakeref_t wakeref;
417 int err = -ENOMEM;
418
419 /*
420 * Build a chain AB...BA between two contexts (A, B) and request
421 * preemption of the last request. It should then complete before
422 * the previously submitted spinner in B.
423 */
424
425 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
426 return 0;
427
428 mutex_lock(&i915->drm.struct_mutex);
429 wakeref = intel_runtime_pm_get(i915);
430
431 if (preempt_client_init(i915, &hi))
432 goto err_unlock;
433
434 if (preempt_client_init(i915, &lo))
435 goto err_client_hi;
436
437 for_each_engine(engine, i915, id) {
438 struct i915_sched_attr attr = {
439 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
440 };
441 int count, i;
442
443 for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
444 struct i915_request *rq;
445
446 rq = igt_spinner_create_request(&hi.spin,
447 hi.ctx, engine,
448 MI_ARB_CHECK);
449 if (IS_ERR(rq))
450 goto err_wedged;
451 i915_request_add(rq);
452 if (!igt_wait_for_spinner(&hi.spin, rq))
453 goto err_wedged;
454
455 rq = igt_spinner_create_request(&lo.spin,
456 lo.ctx, engine,
457 MI_ARB_CHECK);
458 if (IS_ERR(rq))
459 goto err_wedged;
460 i915_request_add(rq);
461
462 for (i = 0; i < count; i++) {
463 rq = i915_request_alloc(engine, lo.ctx);
464 if (IS_ERR(rq))
465 goto err_wedged;
466 i915_request_add(rq);
467 }
468
469 rq = i915_request_alloc(engine, hi.ctx);
470 if (IS_ERR(rq))
471 goto err_wedged;
472 i915_request_add(rq);
473 engine->schedule(rq, &attr);
474
475 igt_spinner_end(&hi.spin);
476 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
477 struct drm_printer p =
478 drm_info_printer(i915->drm.dev);
479
480 pr_err("Failed to preempt over chain of %d\n",
481 count);
482 intel_engine_dump(engine, &p,
483 "%s\n", engine->name);
484 goto err_wedged;
485 }
486 igt_spinner_end(&lo.spin);
487 }
488 }
489
490 err = 0;
491err_client_lo:
492 preempt_client_fini(&lo);
493err_client_hi:
494 preempt_client_fini(&hi);
495err_unlock:
496 if (igt_flush_test(i915, I915_WAIT_LOCKED))
497 err = -EIO;
498 intel_runtime_pm_put(i915, wakeref);
499 mutex_unlock(&i915->drm.struct_mutex);
500 return err;
501
502err_wedged:
503 igt_spinner_end(&hi.spin);
504 igt_spinner_end(&lo.spin);
505 i915_gem_set_wedged(i915);
506 err = -EIO;
507 goto err_client_lo;
508}
509
0f6b79fa
CW
510static int live_preempt_hang(void *arg)
511{
512 struct drm_i915_private *i915 = arg;
513 struct i915_gem_context *ctx_hi, *ctx_lo;
8d2f6e2f 514 struct igt_spinner spin_hi, spin_lo;
0f6b79fa
CW
515 struct intel_engine_cs *engine;
516 enum intel_engine_id id;
c9d08cc3 517 intel_wakeref_t wakeref;
0f6b79fa
CW
518 int err = -ENOMEM;
519
520 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
521 return 0;
522
523 if (!intel_has_reset_engine(i915))
524 return 0;
525
526 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 527 wakeref = intel_runtime_pm_get(i915);
0f6b79fa 528
8d2f6e2f 529 if (igt_spinner_init(&spin_hi, i915))
0f6b79fa
CW
530 goto err_unlock;
531
8d2f6e2f 532 if (igt_spinner_init(&spin_lo, i915))
0f6b79fa
CW
533 goto err_spin_hi;
534
535 ctx_hi = kernel_context(i915);
536 if (!ctx_hi)
537 goto err_spin_lo;
538 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
539
540 ctx_lo = kernel_context(i915);
541 if (!ctx_lo)
542 goto err_ctx_hi;
543 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
544
545 for_each_engine(engine, i915, id) {
546 struct i915_request *rq;
547
548 if (!intel_engine_has_preemption(engine))
549 continue;
550
8d2f6e2f
TU
551 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
552 MI_ARB_CHECK);
0f6b79fa
CW
553 if (IS_ERR(rq)) {
554 err = PTR_ERR(rq);
555 goto err_ctx_lo;
556 }
557
558 i915_request_add(rq);
8d2f6e2f 559 if (!igt_wait_for_spinner(&spin_lo, rq)) {
0f6b79fa
CW
560 GEM_TRACE("lo spinner failed to start\n");
561 GEM_TRACE_DUMP();
562 i915_gem_set_wedged(i915);
563 err = -EIO;
564 goto err_ctx_lo;
565 }
566
8d2f6e2f
TU
567 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
568 MI_ARB_CHECK);
0f6b79fa 569 if (IS_ERR(rq)) {
8d2f6e2f 570 igt_spinner_end(&spin_lo);
0f6b79fa
CW
571 err = PTR_ERR(rq);
572 goto err_ctx_lo;
573 }
574
575 init_completion(&engine->execlists.preempt_hang.completion);
576 engine->execlists.preempt_hang.inject_hang = true;
577
578 i915_request_add(rq);
579
580 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
581 HZ / 10)) {
582 pr_err("Preemption did not occur within timeout!");
583 GEM_TRACE_DUMP();
584 i915_gem_set_wedged(i915);
585 err = -EIO;
586 goto err_ctx_lo;
587 }
588
589 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
590 i915_reset_engine(engine, NULL);
591 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
592
593 engine->execlists.preempt_hang.inject_hang = false;
594
8d2f6e2f 595 if (!igt_wait_for_spinner(&spin_hi, rq)) {
0f6b79fa
CW
596 GEM_TRACE("hi spinner failed to start\n");
597 GEM_TRACE_DUMP();
598 i915_gem_set_wedged(i915);
599 err = -EIO;
600 goto err_ctx_lo;
601 }
602
8d2f6e2f
TU
603 igt_spinner_end(&spin_hi);
604 igt_spinner_end(&spin_lo);
0f6b79fa
CW
605 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
606 err = -EIO;
607 goto err_ctx_lo;
608 }
609 }
610
611 err = 0;
612err_ctx_lo:
613 kernel_context_close(ctx_lo);
614err_ctx_hi:
615 kernel_context_close(ctx_hi);
616err_spin_lo:
8d2f6e2f 617 igt_spinner_fini(&spin_lo);
0f6b79fa 618err_spin_hi:
8d2f6e2f 619 igt_spinner_fini(&spin_hi);
0f6b79fa
CW
620err_unlock:
621 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 622 intel_runtime_pm_put(i915, wakeref);
0f6b79fa
CW
623 mutex_unlock(&i915->drm.struct_mutex);
624 return err;
625}
626
dee4a0f8
CW
627static int random_range(struct rnd_state *rnd, int min, int max)
628{
629 return i915_prandom_u32_max_state(max - min, rnd) + min;
630}
631
632static int random_priority(struct rnd_state *rnd)
633{
634 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
635}
636
637struct preempt_smoke {
638 struct drm_i915_private *i915;
639 struct i915_gem_context **contexts;
992d2098 640 struct intel_engine_cs *engine;
567a6057 641 struct drm_i915_gem_object *batch;
dee4a0f8
CW
642 unsigned int ncontext;
643 struct rnd_state prng;
992d2098 644 unsigned long count;
dee4a0f8
CW
645};
646
647static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
648{
649 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
650 &smoke->prng)];
651}
652
567a6057
CW
653static int smoke_submit(struct preempt_smoke *smoke,
654 struct i915_gem_context *ctx, int prio,
655 struct drm_i915_gem_object *batch)
656{
657 struct i915_request *rq;
658 struct i915_vma *vma = NULL;
659 int err = 0;
660
661 if (batch) {
662 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
663 if (IS_ERR(vma))
664 return PTR_ERR(vma);
665
666 err = i915_vma_pin(vma, 0, 0, PIN_USER);
667 if (err)
668 return err;
669 }
670
671 ctx->sched.priority = prio;
672
673 rq = i915_request_alloc(smoke->engine, ctx);
674 if (IS_ERR(rq)) {
675 err = PTR_ERR(rq);
676 goto unpin;
677 }
678
679 if (vma) {
680 err = rq->engine->emit_bb_start(rq,
681 vma->node.start,
682 PAGE_SIZE, 0);
683 if (!err)
684 err = i915_vma_move_to_active(vma, rq, 0);
685 }
686
687 i915_request_add(rq);
688
689unpin:
690 if (vma)
691 i915_vma_unpin(vma);
692
693 return err;
694}
695
992d2098
CW
696static int smoke_crescendo_thread(void *arg)
697{
698 struct preempt_smoke *smoke = arg;
699 IGT_TIMEOUT(end_time);
700 unsigned long count;
701
702 count = 0;
703 do {
704 struct i915_gem_context *ctx = smoke_context(smoke);
567a6057 705 int err;
992d2098
CW
706
707 mutex_lock(&smoke->i915->drm.struct_mutex);
567a6057
CW
708 err = smoke_submit(smoke,
709 ctx, count % I915_PRIORITY_MAX,
710 smoke->batch);
992d2098 711 mutex_unlock(&smoke->i915->drm.struct_mutex);
567a6057
CW
712 if (err)
713 return err;
992d2098
CW
714
715 count++;
716 } while (!__igt_timeout(end_time, NULL));
717
718 smoke->count = count;
719 return 0;
720}
721
567a6057
CW
722static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
723#define BATCH BIT(0)
dee4a0f8 724{
992d2098
CW
725 struct task_struct *tsk[I915_NUM_ENGINES] = {};
726 struct preempt_smoke arg[I915_NUM_ENGINES];
dee4a0f8
CW
727 struct intel_engine_cs *engine;
728 enum intel_engine_id id;
729 unsigned long count;
992d2098
CW
730 int err = 0;
731
732 mutex_unlock(&smoke->i915->drm.struct_mutex);
dee4a0f8 733
dee4a0f8 734 for_each_engine(engine, smoke->i915, id) {
992d2098
CW
735 arg[id] = *smoke;
736 arg[id].engine = engine;
567a6057
CW
737 if (!(flags & BATCH))
738 arg[id].batch = NULL;
992d2098
CW
739 arg[id].count = 0;
740
741 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
742 "igt/smoke:%d", id);
743 if (IS_ERR(tsk[id])) {
744 err = PTR_ERR(tsk[id]);
745 break;
746 }
5ec244f4 747 get_task_struct(tsk[id]);
992d2098 748 }
dee4a0f8 749
992d2098
CW
750 count = 0;
751 for_each_engine(engine, smoke->i915, id) {
752 int status;
dee4a0f8 753
992d2098
CW
754 if (IS_ERR_OR_NULL(tsk[id]))
755 continue;
dee4a0f8 756
992d2098
CW
757 status = kthread_stop(tsk[id]);
758 if (status && !err)
759 err = status;
dee4a0f8 760
992d2098 761 count += arg[id].count;
5ec244f4
CW
762
763 put_task_struct(tsk[id]);
dee4a0f8
CW
764 }
765
992d2098
CW
766 mutex_lock(&smoke->i915->drm.struct_mutex);
767
567a6057
CW
768 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
769 count, flags,
0258404f 770 RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
dee4a0f8
CW
771 return 0;
772}
773
567a6057 774static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
dee4a0f8 775{
dee4a0f8
CW
776 enum intel_engine_id id;
777 IGT_TIMEOUT(end_time);
778 unsigned long count;
779
780 count = 0;
781 do {
567a6057 782 for_each_engine(smoke->engine, smoke->i915, id) {
dee4a0f8 783 struct i915_gem_context *ctx = smoke_context(smoke);
567a6057 784 int err;
dee4a0f8 785
567a6057
CW
786 err = smoke_submit(smoke,
787 ctx, random_priority(&smoke->prng),
788 flags & BATCH ? smoke->batch : NULL);
789 if (err)
790 return err;
dee4a0f8 791
dee4a0f8
CW
792 count++;
793 }
794 } while (!__igt_timeout(end_time, NULL));
795
567a6057
CW
796 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
797 count, flags,
0258404f 798 RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
dee4a0f8
CW
799 return 0;
800}
801
802static int live_preempt_smoke(void *arg)
803{
804 struct preempt_smoke smoke = {
805 .i915 = arg,
806 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
807 .ncontext = 1024,
808 };
567a6057 809 const unsigned int phase[] = { 0, BATCH };
c9d08cc3 810 intel_wakeref_t wakeref;
dee4a0f8 811 int err = -ENOMEM;
567a6057 812 u32 *cs;
dee4a0f8
CW
813 int n;
814
815 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
816 return 0;
817
818 smoke.contexts = kmalloc_array(smoke.ncontext,
819 sizeof(*smoke.contexts),
820 GFP_KERNEL);
821 if (!smoke.contexts)
822 return -ENOMEM;
823
824 mutex_lock(&smoke.i915->drm.struct_mutex);
c9d08cc3 825 wakeref = intel_runtime_pm_get(smoke.i915);
dee4a0f8 826
567a6057
CW
827 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
828 if (IS_ERR(smoke.batch)) {
829 err = PTR_ERR(smoke.batch);
830 goto err_unlock;
831 }
832
833 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
834 if (IS_ERR(cs)) {
835 err = PTR_ERR(cs);
836 goto err_batch;
837 }
838 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
839 cs[n] = MI_ARB_CHECK;
840 cs[n] = MI_BATCH_BUFFER_END;
841 i915_gem_object_unpin_map(smoke.batch);
842
843 err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
844 if (err)
845 goto err_batch;
846
dee4a0f8
CW
847 for (n = 0; n < smoke.ncontext; n++) {
848 smoke.contexts[n] = kernel_context(smoke.i915);
849 if (!smoke.contexts[n])
850 goto err_ctx;
851 }
852
567a6057
CW
853 for (n = 0; n < ARRAY_SIZE(phase); n++) {
854 err = smoke_crescendo(&smoke, phase[n]);
855 if (err)
856 goto err_ctx;
dee4a0f8 857
567a6057
CW
858 err = smoke_random(&smoke, phase[n]);
859 if (err)
860 goto err_ctx;
861 }
dee4a0f8
CW
862
863err_ctx:
864 if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
865 err = -EIO;
866
867 for (n = 0; n < smoke.ncontext; n++) {
868 if (!smoke.contexts[n])
869 break;
870 kernel_context_close(smoke.contexts[n]);
871 }
872
567a6057
CW
873err_batch:
874 i915_gem_object_put(smoke.batch);
875err_unlock:
c9d08cc3 876 intel_runtime_pm_put(smoke.i915, wakeref);
dee4a0f8
CW
877 mutex_unlock(&smoke.i915->drm.struct_mutex);
878 kfree(smoke.contexts);
879
880 return err;
881}
882
2c66555e
CW
883int intel_execlists_live_selftests(struct drm_i915_private *i915)
884{
885 static const struct i915_subtest tests[] = {
886 SUBTEST(live_sanitycheck),
887 SUBTEST(live_preempt),
888 SUBTEST(live_late_preempt),
c9a64622 889 SUBTEST(live_suppress_self_preempt),
a21f453c 890 SUBTEST(live_chain_preempt),
0f6b79fa 891 SUBTEST(live_preempt_hang),
dee4a0f8 892 SUBTEST(live_preempt_smoke),
2c66555e 893 };
52cc8014
CW
894
895 if (!HAS_EXECLISTS(i915))
896 return 0;
897
03bbc508
CW
898 if (i915_terminally_wedged(&i915->gpu_error))
899 return 0;
900
2c66555e
CW
901 return i915_subtests(tests, i915);
902}