]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/i915/gt/intel_timeline.c
Merge tag 'drm/tegra/for-5.7-fixes' of git://anongit.freedesktop.org/tegra/linux...
[thirdparty/linux.git] / drivers / gpu / drm / i915 / gt / intel_timeline.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016-2018 Intel Corporation
5 */
6
7 #include "i915_drv.h"
8
9 #include "i915_active.h"
10 #include "i915_syncmap.h"
11 #include "intel_gt.h"
12 #include "intel_ring.h"
13 #include "intel_timeline.h"
14
15 #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
16 #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
17
18 #define CACHELINE_BITS 6
19 #define CACHELINE_FREE CACHELINE_BITS
20
21 struct intel_timeline_hwsp {
22 struct intel_gt *gt;
23 struct intel_gt_timelines *gt_timelines;
24 struct list_head free_link;
25 struct i915_vma *vma;
26 u64 free_bitmap;
27 };
28
29 static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
30 {
31 struct drm_i915_private *i915 = gt->i915;
32 struct drm_i915_gem_object *obj;
33 struct i915_vma *vma;
34
35 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
36 if (IS_ERR(obj))
37 return ERR_CAST(obj);
38
39 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
40
41 vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
42 if (IS_ERR(vma))
43 i915_gem_object_put(obj);
44
45 return vma;
46 }
47
48 static struct i915_vma *
49 hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
50 {
51 struct intel_gt_timelines *gt = &timeline->gt->timelines;
52 struct intel_timeline_hwsp *hwsp;
53
54 BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
55
56 spin_lock_irq(&gt->hwsp_lock);
57
58 /* hwsp_free_list only contains HWSP that have available cachelines */
59 hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
60 typeof(*hwsp), free_link);
61 if (!hwsp) {
62 struct i915_vma *vma;
63
64 spin_unlock_irq(&gt->hwsp_lock);
65
66 hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
67 if (!hwsp)
68 return ERR_PTR(-ENOMEM);
69
70 vma = __hwsp_alloc(timeline->gt);
71 if (IS_ERR(vma)) {
72 kfree(hwsp);
73 return vma;
74 }
75
76 vma->private = hwsp;
77 hwsp->gt = timeline->gt;
78 hwsp->vma = vma;
79 hwsp->free_bitmap = ~0ull;
80 hwsp->gt_timelines = gt;
81
82 spin_lock_irq(&gt->hwsp_lock);
83 list_add(&hwsp->free_link, &gt->hwsp_free_list);
84 }
85
86 GEM_BUG_ON(!hwsp->free_bitmap);
87 *cacheline = __ffs64(hwsp->free_bitmap);
88 hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
89 if (!hwsp->free_bitmap)
90 list_del(&hwsp->free_link);
91
92 spin_unlock_irq(&gt->hwsp_lock);
93
94 GEM_BUG_ON(hwsp->vma->private != hwsp);
95 return hwsp->vma;
96 }
97
98 static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
99 {
100 struct intel_gt_timelines *gt = hwsp->gt_timelines;
101 unsigned long flags;
102
103 spin_lock_irqsave(&gt->hwsp_lock, flags);
104
105 /* As a cacheline becomes available, publish the HWSP on the freelist */
106 if (!hwsp->free_bitmap)
107 list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
108
109 GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
110 hwsp->free_bitmap |= BIT_ULL(cacheline);
111
112 /* And if no one is left using it, give the page back to the system */
113 if (hwsp->free_bitmap == ~0ull) {
114 i915_vma_put(hwsp->vma);
115 list_del(&hwsp->free_link);
116 kfree(hwsp);
117 }
118
119 spin_unlock_irqrestore(&gt->hwsp_lock, flags);
120 }
121
122 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
123 {
124 GEM_BUG_ON(!i915_active_is_idle(&cl->active));
125
126 i915_gem_object_unpin_map(cl->hwsp->vma->obj);
127 i915_vma_put(cl->hwsp->vma);
128 __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
129
130 i915_active_fini(&cl->active);
131 kfree_rcu(cl, rcu);
132 }
133
134 __i915_active_call
135 static void __cacheline_retire(struct i915_active *active)
136 {
137 struct intel_timeline_cacheline *cl =
138 container_of(active, typeof(*cl), active);
139
140 i915_vma_unpin(cl->hwsp->vma);
141 if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
142 __idle_cacheline_free(cl);
143 }
144
145 static int __cacheline_active(struct i915_active *active)
146 {
147 struct intel_timeline_cacheline *cl =
148 container_of(active, typeof(*cl), active);
149
150 __i915_vma_pin(cl->hwsp->vma);
151 return 0;
152 }
153
154 static struct intel_timeline_cacheline *
155 cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
156 {
157 struct intel_timeline_cacheline *cl;
158 void *vaddr;
159
160 GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
161
162 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
163 if (!cl)
164 return ERR_PTR(-ENOMEM);
165
166 vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
167 if (IS_ERR(vaddr)) {
168 kfree(cl);
169 return ERR_CAST(vaddr);
170 }
171
172 i915_vma_get(hwsp->vma);
173 cl->hwsp = hwsp;
174 cl->vaddr = page_pack_bits(vaddr, cacheline);
175
176 i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
177
178 return cl;
179 }
180
181 static void cacheline_acquire(struct intel_timeline_cacheline *cl)
182 {
183 if (cl)
184 i915_active_acquire(&cl->active);
185 }
186
187 static void cacheline_release(struct intel_timeline_cacheline *cl)
188 {
189 if (cl)
190 i915_active_release(&cl->active);
191 }
192
193 static void cacheline_free(struct intel_timeline_cacheline *cl)
194 {
195 if (!i915_active_acquire_if_busy(&cl->active)) {
196 __idle_cacheline_free(cl);
197 return;
198 }
199
200 GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
201 cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
202
203 i915_active_release(&cl->active);
204 }
205
206 int intel_timeline_init(struct intel_timeline *timeline,
207 struct intel_gt *gt,
208 struct i915_vma *hwsp)
209 {
210 void *vaddr;
211
212 kref_init(&timeline->kref);
213 atomic_set(&timeline->pin_count, 0);
214
215 timeline->gt = gt;
216
217 timeline->has_initial_breadcrumb = !hwsp;
218 timeline->hwsp_cacheline = NULL;
219
220 if (!hwsp) {
221 struct intel_timeline_cacheline *cl;
222 unsigned int cacheline;
223
224 hwsp = hwsp_alloc(timeline, &cacheline);
225 if (IS_ERR(hwsp))
226 return PTR_ERR(hwsp);
227
228 cl = cacheline_alloc(hwsp->private, cacheline);
229 if (IS_ERR(cl)) {
230 __idle_hwsp_free(hwsp->private, cacheline);
231 return PTR_ERR(cl);
232 }
233
234 timeline->hwsp_cacheline = cl;
235 timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
236
237 vaddr = page_mask_bits(cl->vaddr);
238 } else {
239 timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
240
241 vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
242 if (IS_ERR(vaddr))
243 return PTR_ERR(vaddr);
244 }
245
246 timeline->hwsp_seqno =
247 memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
248
249 timeline->hwsp_ggtt = i915_vma_get(hwsp);
250 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
251
252 timeline->fence_context = dma_fence_context_alloc(1);
253
254 mutex_init(&timeline->mutex);
255
256 INIT_ACTIVE_FENCE(&timeline->last_request);
257 INIT_LIST_HEAD(&timeline->requests);
258
259 i915_syncmap_init(&timeline->sync);
260
261 return 0;
262 }
263
264 void intel_gt_init_timelines(struct intel_gt *gt)
265 {
266 struct intel_gt_timelines *timelines = &gt->timelines;
267
268 spin_lock_init(&timelines->lock);
269 INIT_LIST_HEAD(&timelines->active_list);
270
271 spin_lock_init(&timelines->hwsp_lock);
272 INIT_LIST_HEAD(&timelines->hwsp_free_list);
273 }
274
275 void intel_timeline_fini(struct intel_timeline *timeline)
276 {
277 GEM_BUG_ON(atomic_read(&timeline->pin_count));
278 GEM_BUG_ON(!list_empty(&timeline->requests));
279 GEM_BUG_ON(timeline->retire);
280
281 if (timeline->hwsp_cacheline)
282 cacheline_free(timeline->hwsp_cacheline);
283 else
284 i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
285
286 i915_vma_put(timeline->hwsp_ggtt);
287 }
288
289 struct intel_timeline *
290 intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
291 {
292 struct intel_timeline *timeline;
293 int err;
294
295 timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
296 if (!timeline)
297 return ERR_PTR(-ENOMEM);
298
299 err = intel_timeline_init(timeline, gt, global_hwsp);
300 if (err) {
301 kfree(timeline);
302 return ERR_PTR(err);
303 }
304
305 return timeline;
306 }
307
308 int intel_timeline_pin(struct intel_timeline *tl)
309 {
310 int err;
311
312 if (atomic_add_unless(&tl->pin_count, 1, 0))
313 return 0;
314
315 err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
316 if (err)
317 return err;
318
319 tl->hwsp_offset =
320 i915_ggtt_offset(tl->hwsp_ggtt) +
321 offset_in_page(tl->hwsp_offset);
322
323 cacheline_acquire(tl->hwsp_cacheline);
324 if (atomic_fetch_inc(&tl->pin_count)) {
325 cacheline_release(tl->hwsp_cacheline);
326 __i915_vma_unpin(tl->hwsp_ggtt);
327 }
328
329 return 0;
330 }
331
332 void intel_timeline_enter(struct intel_timeline *tl)
333 {
334 struct intel_gt_timelines *timelines = &tl->gt->timelines;
335
336 /*
337 * Pretend we are serialised by the timeline->mutex.
338 *
339 * While generally true, there are a few exceptions to the rule
340 * for the engine->kernel_context being used to manage power
341 * transitions. As the engine_park may be called from under any
342 * timeline, it uses the power mutex as a global serialisation
343 * lock to prevent any other request entering its timeline.
344 *
345 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
346 *
347 * However, intel_gt_retire_request() does not know which engine
348 * it is retiring along and so cannot partake in the engine-pm
349 * barrier, and there we use the tl->active_count as a means to
350 * pin the timeline in the active_list while the locks are dropped.
351 * Ergo, as that is outside of the engine-pm barrier, we need to
352 * use atomic to manipulate tl->active_count.
353 */
354 lockdep_assert_held(&tl->mutex);
355
356 if (atomic_add_unless(&tl->active_count, 1, 0))
357 return;
358
359 spin_lock(&timelines->lock);
360 if (!atomic_fetch_inc(&tl->active_count))
361 list_add_tail(&tl->link, &timelines->active_list);
362 spin_unlock(&timelines->lock);
363 }
364
365 void intel_timeline_exit(struct intel_timeline *tl)
366 {
367 struct intel_gt_timelines *timelines = &tl->gt->timelines;
368
369 /* See intel_timeline_enter() */
370 lockdep_assert_held(&tl->mutex);
371
372 GEM_BUG_ON(!atomic_read(&tl->active_count));
373 if (atomic_add_unless(&tl->active_count, -1, 1))
374 return;
375
376 spin_lock(&timelines->lock);
377 if (atomic_dec_and_test(&tl->active_count))
378 list_del(&tl->link);
379 spin_unlock(&timelines->lock);
380
381 /*
382 * Since this timeline is idle, all bariers upon which we were waiting
383 * must also be complete and so we can discard the last used barriers
384 * without loss of information.
385 */
386 i915_syncmap_free(&tl->sync);
387 }
388
389 static u32 timeline_advance(struct intel_timeline *tl)
390 {
391 GEM_BUG_ON(!atomic_read(&tl->pin_count));
392 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
393
394 return tl->seqno += 1 + tl->has_initial_breadcrumb;
395 }
396
397 static void timeline_rollback(struct intel_timeline *tl)
398 {
399 tl->seqno -= 1 + tl->has_initial_breadcrumb;
400 }
401
402 static noinline int
403 __intel_timeline_get_seqno(struct intel_timeline *tl,
404 struct i915_request *rq,
405 u32 *seqno)
406 {
407 struct intel_timeline_cacheline *cl;
408 unsigned int cacheline;
409 struct i915_vma *vma;
410 void *vaddr;
411 int err;
412
413 might_lock(&tl->gt->ggtt->vm.mutex);
414
415 /*
416 * If there is an outstanding GPU reference to this cacheline,
417 * such as it being sampled by a HW semaphore on another timeline,
418 * we cannot wraparound our seqno value (the HW semaphore does
419 * a strict greater-than-or-equals compare, not i915_seqno_passed).
420 * So if the cacheline is still busy, we must detach ourselves
421 * from it and leave it inflight alongside its users.
422 *
423 * However, if nobody is watching and we can guarantee that nobody
424 * will, we could simply reuse the same cacheline.
425 *
426 * if (i915_active_request_is_signaled(&tl->last_request) &&
427 * i915_active_is_signaled(&tl->hwsp_cacheline->active))
428 * return 0;
429 *
430 * That seems unlikely for a busy timeline that needed to wrap in
431 * the first place, so just replace the cacheline.
432 */
433
434 vma = hwsp_alloc(tl, &cacheline);
435 if (IS_ERR(vma)) {
436 err = PTR_ERR(vma);
437 goto err_rollback;
438 }
439
440 err = i915_ggtt_pin(vma, 0, PIN_HIGH);
441 if (err) {
442 __idle_hwsp_free(vma->private, cacheline);
443 goto err_rollback;
444 }
445
446 cl = cacheline_alloc(vma->private, cacheline);
447 if (IS_ERR(cl)) {
448 err = PTR_ERR(cl);
449 __idle_hwsp_free(vma->private, cacheline);
450 goto err_unpin;
451 }
452 GEM_BUG_ON(cl->hwsp->vma != vma);
453
454 /*
455 * Attach the old cacheline to the current request, so that we only
456 * free it after the current request is retired, which ensures that
457 * all writes into the cacheline from previous requests are complete.
458 */
459 err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
460 if (err)
461 goto err_cacheline;
462
463 cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
464 cacheline_free(tl->hwsp_cacheline);
465
466 i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
467 i915_vma_put(tl->hwsp_ggtt);
468
469 tl->hwsp_ggtt = i915_vma_get(vma);
470
471 vaddr = page_mask_bits(cl->vaddr);
472 tl->hwsp_offset = cacheline * CACHELINE_BYTES;
473 tl->hwsp_seqno =
474 memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
475
476 tl->hwsp_offset += i915_ggtt_offset(vma);
477
478 cacheline_acquire(cl);
479 tl->hwsp_cacheline = cl;
480
481 *seqno = timeline_advance(tl);
482 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
483 return 0;
484
485 err_cacheline:
486 cacheline_free(cl);
487 err_unpin:
488 i915_vma_unpin(vma);
489 err_rollback:
490 timeline_rollback(tl);
491 return err;
492 }
493
494 int intel_timeline_get_seqno(struct intel_timeline *tl,
495 struct i915_request *rq,
496 u32 *seqno)
497 {
498 *seqno = timeline_advance(tl);
499
500 /* Replace the HWSP on wraparound for HW semaphores */
501 if (unlikely(!*seqno && tl->hwsp_cacheline))
502 return __intel_timeline_get_seqno(tl, rq, seqno);
503
504 return 0;
505 }
506
507 static int cacheline_ref(struct intel_timeline_cacheline *cl,
508 struct i915_request *rq)
509 {
510 return i915_active_add_request(&cl->active, rq);
511 }
512
513 int intel_timeline_read_hwsp(struct i915_request *from,
514 struct i915_request *to,
515 u32 *hwsp)
516 {
517 struct intel_timeline_cacheline *cl;
518 int err;
519
520 GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
521
522 rcu_read_lock();
523 cl = rcu_dereference(from->hwsp_cacheline);
524 if (i915_request_completed(from)) /* confirm cacheline is valid */
525 goto unlock;
526 if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
527 goto unlock; /* seqno wrapped and completed! */
528 if (unlikely(i915_request_completed(from)))
529 goto release;
530 rcu_read_unlock();
531
532 err = cacheline_ref(cl, to);
533 if (err)
534 goto out;
535
536 *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
537 ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
538
539 out:
540 i915_active_release(&cl->active);
541 return err;
542
543 release:
544 i915_active_release(&cl->active);
545 unlock:
546 rcu_read_unlock();
547 return 1;
548 }
549
550 void intel_timeline_unpin(struct intel_timeline *tl)
551 {
552 GEM_BUG_ON(!atomic_read(&tl->pin_count));
553 if (!atomic_dec_and_test(&tl->pin_count))
554 return;
555
556 cacheline_release(tl->hwsp_cacheline);
557
558 __i915_vma_unpin(tl->hwsp_ggtt);
559 }
560
561 void __intel_timeline_free(struct kref *kref)
562 {
563 struct intel_timeline *timeline =
564 container_of(kref, typeof(*timeline), kref);
565
566 intel_timeline_fini(timeline);
567 kfree_rcu(timeline, rcu);
568 }
569
570 void intel_gt_fini_timelines(struct intel_gt *gt)
571 {
572 struct intel_gt_timelines *timelines = &gt->timelines;
573
574 GEM_BUG_ON(!list_empty(&timelines->active_list));
575 GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
576 }
577
578 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
579 #include "gt/selftests/mock_timeline.c"
580 #include "gt/selftest_timeline.c"
581 #endif