]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/i915/i915_timeline.c
Merge tag 'pwm/for-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / i915_timeline.c
CommitLineData
a89d1f92
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016-2018 Intel Corporation
5 */
6
7#include "i915_drv.h"
8
ebece753 9#include "i915_active.h"
a89d1f92 10#include "i915_syncmap.h"
ebece753
CW
11#include "i915_timeline.h"
12
13#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
14#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
a89d1f92 15
8ba306a6 16struct i915_timeline_hwsp {
ebece753 17 struct i915_gt_timelines *gt;
8ba306a6 18 struct list_head free_link;
ebece753 19 struct i915_vma *vma;
8ba306a6
CW
20 u64 free_bitmap;
21};
22
ebece753
CW
23struct i915_timeline_cacheline {
24 struct i915_active active;
25 struct i915_timeline_hwsp *hwsp;
26 void *vaddr;
27#define CACHELINE_BITS 6
28#define CACHELINE_FREE CACHELINE_BITS
29};
30
31static inline struct drm_i915_private *
32hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
8ba306a6 33{
ebece753 34 return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
8ba306a6
CW
35}
36
52954edd
CW
37static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
38{
39 struct drm_i915_gem_object *obj;
40 struct i915_vma *vma;
41
42 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
43 if (IS_ERR(obj))
44 return ERR_CAST(obj);
45
46 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
47
48 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
49 if (IS_ERR(vma))
50 i915_gem_object_put(obj);
51
52 return vma;
53}
54
8ba306a6
CW
55static struct i915_vma *
56hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
52954edd 57{
8ba306a6
CW
58 struct drm_i915_private *i915 = timeline->i915;
59 struct i915_gt_timelines *gt = &i915->gt.timelines;
60 struct i915_timeline_hwsp *hwsp;
52954edd 61
8ba306a6 62 BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
52954edd 63
8ba306a6 64 spin_lock(&gt->hwsp_lock);
52954edd 65
8ba306a6
CW
66 /* hwsp_free_list only contains HWSP that have available cachelines */
67 hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
68 typeof(*hwsp), free_link);
69 if (!hwsp) {
70 struct i915_vma *vma;
71
72 spin_unlock(&gt->hwsp_lock);
73
74 hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
75 if (!hwsp)
76 return ERR_PTR(-ENOMEM);
77
78 vma = __hwsp_alloc(i915);
79 if (IS_ERR(vma)) {
80 kfree(hwsp);
81 return vma;
82 }
83
84 vma->private = hwsp;
85 hwsp->vma = vma;
86 hwsp->free_bitmap = ~0ull;
ebece753 87 hwsp->gt = gt;
8ba306a6
CW
88
89 spin_lock(&gt->hwsp_lock);
90 list_add(&hwsp->free_link, &gt->hwsp_free_list);
91 }
92
93 GEM_BUG_ON(!hwsp->free_bitmap);
94 *cacheline = __ffs64(hwsp->free_bitmap);
95 hwsp->free_bitmap &= ~BIT_ULL(*cacheline);
96 if (!hwsp->free_bitmap)
97 list_del(&hwsp->free_link);
98
99 spin_unlock(&gt->hwsp_lock);
100
101 GEM_BUG_ON(hwsp->vma->private != hwsp);
102 return hwsp->vma;
103}
104
ebece753 105static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
8ba306a6 106{
ebece753 107 struct i915_gt_timelines *gt = hwsp->gt;
8ba306a6
CW
108
109 spin_lock(&gt->hwsp_lock);
110
111 /* As a cacheline becomes available, publish the HWSP on the freelist */
112 if (!hwsp->free_bitmap)
113 list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
114
ebece753
CW
115 GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
116 hwsp->free_bitmap |= BIT_ULL(cacheline);
8ba306a6
CW
117
118 /* And if no one is left using it, give the page back to the system */
119 if (hwsp->free_bitmap == ~0ull) {
120 i915_vma_put(hwsp->vma);
121 list_del(&hwsp->free_link);
122 kfree(hwsp);
123 }
124
125 spin_unlock(&gt->hwsp_lock);
52954edd
CW
126}
127
ebece753
CW
128static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
129{
130 GEM_BUG_ON(!i915_active_is_idle(&cl->active));
131
132 i915_gem_object_unpin_map(cl->hwsp->vma->obj);
133 i915_vma_put(cl->hwsp->vma);
134 __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
135
136 i915_active_fini(&cl->active);
137 kfree(cl);
138}
139
140static void __cacheline_retire(struct i915_active *active)
141{
142 struct i915_timeline_cacheline *cl =
143 container_of(active, typeof(*cl), active);
144
145 i915_vma_unpin(cl->hwsp->vma);
146 if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
147 __idle_cacheline_free(cl);
148}
149
150static struct i915_timeline_cacheline *
151cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
152{
153 struct i915_timeline_cacheline *cl;
154 void *vaddr;
155
156 GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
157
158 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
159 if (!cl)
160 return ERR_PTR(-ENOMEM);
161
162 vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
163 if (IS_ERR(vaddr)) {
164 kfree(cl);
165 return ERR_CAST(vaddr);
166 }
167
168 i915_vma_get(hwsp->vma);
169 cl->hwsp = hwsp;
170 cl->vaddr = page_pack_bits(vaddr, cacheline);
171
172 i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
173
174 return cl;
175}
176
177static void cacheline_acquire(struct i915_timeline_cacheline *cl)
178{
179 if (cl && i915_active_acquire(&cl->active))
180 __i915_vma_pin(cl->hwsp->vma);
181}
182
183static void cacheline_release(struct i915_timeline_cacheline *cl)
184{
185 if (cl)
186 i915_active_release(&cl->active);
187}
188
189static void cacheline_free(struct i915_timeline_cacheline *cl)
190{
191 GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
192 cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
193
194 if (i915_active_is_idle(&cl->active))
195 __idle_cacheline_free(cl);
196}
197
52954edd
CW
198int i915_timeline_init(struct drm_i915_private *i915,
199 struct i915_timeline *timeline,
8ba306a6 200 struct i915_vma *hwsp)
a89d1f92 201{
52954edd 202 void *vaddr;
a89d1f92
CW
203
204 /*
205 * Ideally we want a set of engines on a single leaf as we expect
206 * to mostly be tracking synchronisation between engines. It is not
207 * a huge issue if this is not the case, but we may want to mitigate
208 * any page crossing penalties if they become an issue.
52954edd
CW
209 *
210 * Called during early_init before we know how many engines there are.
a89d1f92
CW
211 */
212 BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
213
1e345568 214 timeline->i915 = i915;
52954edd 215 timeline->pin_count = 0;
85474441 216 timeline->has_initial_breadcrumb = !hwsp;
ebece753 217 timeline->hwsp_cacheline = NULL;
52954edd 218
8ba306a6 219 if (!hwsp) {
ebece753 220 struct i915_timeline_cacheline *cl;
8ba306a6
CW
221 unsigned int cacheline;
222
223 hwsp = hwsp_alloc(timeline, &cacheline);
224 if (IS_ERR(hwsp))
225 return PTR_ERR(hwsp);
226
ebece753
CW
227 cl = cacheline_alloc(hwsp->private, cacheline);
228 if (IS_ERR(cl)) {
229 __idle_hwsp_free(hwsp->private, cacheline);
230 return PTR_ERR(cl);
231 }
232
233 timeline->hwsp_cacheline = cl;
8ba306a6 234 timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
a89d1f92 235
ebece753
CW
236 vaddr = page_mask_bits(cl->vaddr);
237 } else {
238 timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
239
240 vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
241 if (IS_ERR(vaddr))
242 return PTR_ERR(vaddr);
52954edd 243 }
a89d1f92 244
52954edd
CW
245 timeline->hwsp_seqno =
246 memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
a89d1f92 247
ebece753
CW
248 timeline->hwsp_ggtt = i915_vma_get(hwsp);
249 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
250
a89d1f92
CW
251 timeline->fence_context = dma_fence_context_alloc(1);
252
253 spin_lock_init(&timeline->lock);
3ef71149 254 mutex_init(&timeline->mutex);
a89d1f92 255
21950ee7 256 INIT_ACTIVE_REQUEST(&timeline->last_request);
a89d1f92
CW
257 INIT_LIST_HEAD(&timeline->requests);
258
259 i915_syncmap_init(&timeline->sync);
52954edd 260
52954edd 261 return 0;
a89d1f92
CW
262}
263
1e345568
CW
264void i915_timelines_init(struct drm_i915_private *i915)
265{
266 struct i915_gt_timelines *gt = &i915->gt.timelines;
267
268 mutex_init(&gt->mutex);
9407d3bd 269 INIT_LIST_HEAD(&gt->active_list);
1e345568 270
8ba306a6
CW
271 spin_lock_init(&gt->hwsp_lock);
272 INIT_LIST_HEAD(&gt->hwsp_free_list);
273
1e345568
CW
274 /* via i915_gem_wait_for_idle() */
275 i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
276}
277
9407d3bd
CW
278static void timeline_add_to_active(struct i915_timeline *tl)
279{
280 struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
281
282 mutex_lock(&gt->mutex);
283 list_add(&tl->link, &gt->active_list);
284 mutex_unlock(&gt->mutex);
285}
286
287static void timeline_remove_from_active(struct i915_timeline *tl)
288{
289 struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
290
291 mutex_lock(&gt->mutex);
292 list_del(&tl->link);
293 mutex_unlock(&gt->mutex);
294}
295
a89d1f92
CW
296/**
297 * i915_timelines_park - called when the driver idles
298 * @i915: the drm_i915_private device
299 *
300 * When the driver is completely idle, we know that all of our sync points
301 * have been signaled and our tracking is then entirely redundant. Any request
302 * to wait upon an older sync point will be completed instantly as we know
303 * the fence is signaled and therefore we will not even look them up in the
304 * sync point map.
305 */
306void i915_timelines_park(struct drm_i915_private *i915)
307{
1e345568 308 struct i915_gt_timelines *gt = &i915->gt.timelines;
a89d1f92
CW
309 struct i915_timeline *timeline;
310
1e345568 311 mutex_lock(&gt->mutex);
9407d3bd 312 list_for_each_entry(timeline, &gt->active_list, link) {
a89d1f92
CW
313 /*
314 * All known fences are completed so we can scrap
315 * the current sync point tracking and start afresh,
316 * any attempt to wait upon a previous sync point
317 * will be skipped as the fence was signaled.
318 */
319 i915_syncmap_free(&timeline->sync);
320 }
1e345568 321 mutex_unlock(&gt->mutex);
a89d1f92
CW
322}
323
324void i915_timeline_fini(struct i915_timeline *timeline)
325{
52954edd 326 GEM_BUG_ON(timeline->pin_count);
a89d1f92
CW
327 GEM_BUG_ON(!list_empty(&timeline->requests));
328
8ba306a6 329 i915_syncmap_free(&timeline->sync);
8ba306a6 330
ebece753
CW
331 if (timeline->hwsp_cacheline)
332 cacheline_free(timeline->hwsp_cacheline);
333 else
334 i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
335
52954edd 336 i915_vma_put(timeline->hwsp_ggtt);
a89d1f92
CW
337}
338
339struct i915_timeline *
52954edd 340i915_timeline_create(struct drm_i915_private *i915,
52954edd 341 struct i915_vma *global_hwsp)
a89d1f92
CW
342{
343 struct i915_timeline *timeline;
52954edd 344 int err;
a89d1f92
CW
345
346 timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
347 if (!timeline)
348 return ERR_PTR(-ENOMEM);
349
4daffb66 350 err = i915_timeline_init(i915, timeline, global_hwsp);
52954edd
CW
351 if (err) {
352 kfree(timeline);
353 return ERR_PTR(err);
354 }
355
a89d1f92
CW
356 kref_init(&timeline->kref);
357
358 return timeline;
359}
360
52954edd
CW
361int i915_timeline_pin(struct i915_timeline *tl)
362{
363 int err;
364
365 if (tl->pin_count++)
366 return 0;
367 GEM_BUG_ON(!tl->pin_count);
368
369 err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
370 if (err)
371 goto unpin;
372
5013eb8c
CW
373 tl->hwsp_offset =
374 i915_ggtt_offset(tl->hwsp_ggtt) +
375 offset_in_page(tl->hwsp_offset);
376
ebece753 377 cacheline_acquire(tl->hwsp_cacheline);
9407d3bd
CW
378 timeline_add_to_active(tl);
379
52954edd
CW
380 return 0;
381
382unpin:
383 tl->pin_count = 0;
384 return err;
385}
386
ebece753
CW
387static u32 timeline_advance(struct i915_timeline *tl)
388{
389 GEM_BUG_ON(!tl->pin_count);
390 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
391
392 return tl->seqno += 1 + tl->has_initial_breadcrumb;
393}
394
395static void timeline_rollback(struct i915_timeline *tl)
396{
397 tl->seqno -= 1 + tl->has_initial_breadcrumb;
398}
399
400static noinline int
401__i915_timeline_get_seqno(struct i915_timeline *tl,
402 struct i915_request *rq,
403 u32 *seqno)
404{
405 struct i915_timeline_cacheline *cl;
406 unsigned int cacheline;
407 struct i915_vma *vma;
408 void *vaddr;
409 int err;
410
411 /*
412 * If there is an outstanding GPU reference to this cacheline,
413 * such as it being sampled by a HW semaphore on another timeline,
414 * we cannot wraparound our seqno value (the HW semaphore does
415 * a strict greater-than-or-equals compare, not i915_seqno_passed).
416 * So if the cacheline is still busy, we must detach ourselves
417 * from it and leave it inflight alongside its users.
418 *
419 * However, if nobody is watching and we can guarantee that nobody
420 * will, we could simply reuse the same cacheline.
421 *
422 * if (i915_active_request_is_signaled(&tl->last_request) &&
423 * i915_active_is_signaled(&tl->hwsp_cacheline->active))
424 * return 0;
425 *
426 * That seems unlikely for a busy timeline that needed to wrap in
427 * the first place, so just replace the cacheline.
428 */
429
430 vma = hwsp_alloc(tl, &cacheline);
431 if (IS_ERR(vma)) {
432 err = PTR_ERR(vma);
433 goto err_rollback;
434 }
435
436 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
437 if (err) {
438 __idle_hwsp_free(vma->private, cacheline);
439 goto err_rollback;
440 }
441
442 cl = cacheline_alloc(vma->private, cacheline);
443 if (IS_ERR(cl)) {
444 err = PTR_ERR(cl);
445 __idle_hwsp_free(vma->private, cacheline);
446 goto err_unpin;
447 }
448 GEM_BUG_ON(cl->hwsp->vma != vma);
449
450 /*
451 * Attach the old cacheline to the current request, so that we only
452 * free it after the current request is retired, which ensures that
453 * all writes into the cacheline from previous requests are complete.
454 */
455 err = i915_active_ref(&tl->hwsp_cacheline->active,
456 tl->fence_context, rq);
457 if (err)
458 goto err_cacheline;
459
460 cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
461 cacheline_free(tl->hwsp_cacheline);
462
463 i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
464 i915_vma_put(tl->hwsp_ggtt);
465
466 tl->hwsp_ggtt = i915_vma_get(vma);
467
468 vaddr = page_mask_bits(cl->vaddr);
469 tl->hwsp_offset = cacheline * CACHELINE_BYTES;
470 tl->hwsp_seqno =
471 memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
472
473 tl->hwsp_offset += i915_ggtt_offset(vma);
474
475 cacheline_acquire(cl);
476 tl->hwsp_cacheline = cl;
477
478 *seqno = timeline_advance(tl);
479 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
480 return 0;
481
482err_cacheline:
483 cacheline_free(cl);
484err_unpin:
485 i915_vma_unpin(vma);
486err_rollback:
487 timeline_rollback(tl);
488 return err;
489}
490
491int i915_timeline_get_seqno(struct i915_timeline *tl,
492 struct i915_request *rq,
493 u32 *seqno)
494{
495 *seqno = timeline_advance(tl);
496
497 /* Replace the HWSP on wraparound for HW semaphores */
498 if (unlikely(!*seqno && tl->hwsp_cacheline))
499 return __i915_timeline_get_seqno(tl, rq, seqno);
500
501 return 0;
502}
503
504static int cacheline_ref(struct i915_timeline_cacheline *cl,
505 struct i915_request *rq)
506{
507 return i915_active_ref(&cl->active, rq->fence.context, rq);
508}
509
510int i915_timeline_read_hwsp(struct i915_request *from,
511 struct i915_request *to,
512 u32 *hwsp)
513{
514 struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
515 struct i915_timeline *tl = from->timeline;
516 int err;
517
518 GEM_BUG_ON(to->timeline == tl);
519
520 mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
521 err = i915_request_completed(from);
522 if (!err)
523 err = cacheline_ref(cl, to);
524 if (!err) {
525 if (likely(cl == tl->hwsp_cacheline)) {
526 *hwsp = tl->hwsp_offset;
527 } else { /* across a seqno wrap, recover the original offset */
528 *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
529 ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
530 CACHELINE_BYTES;
531 }
532 }
533 mutex_unlock(&tl->mutex);
534
535 return err;
536}
537
52954edd
CW
538void i915_timeline_unpin(struct i915_timeline *tl)
539{
540 GEM_BUG_ON(!tl->pin_count);
541 if (--tl->pin_count)
542 return;
543
9407d3bd 544 timeline_remove_from_active(tl);
ebece753 545 cacheline_release(tl->hwsp_cacheline);
9407d3bd 546
52954edd
CW
547 /*
548 * Since this timeline is idle, all bariers upon which we were waiting
549 * must also be complete and so we can discard the last used barriers
550 * without loss of information.
551 */
552 i915_syncmap_free(&tl->sync);
553
554 __i915_vma_unpin(tl->hwsp_ggtt);
555}
556
a89d1f92
CW
557void __i915_timeline_free(struct kref *kref)
558{
559 struct i915_timeline *timeline =
560 container_of(kref, typeof(*timeline), kref);
561
562 i915_timeline_fini(timeline);
563 kfree(timeline);
564}
565
1e345568
CW
566void i915_timelines_fini(struct drm_i915_private *i915)
567{
568 struct i915_gt_timelines *gt = &i915->gt.timelines;
569
9407d3bd 570 GEM_BUG_ON(!list_empty(&gt->active_list));
8ba306a6 571 GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
1e345568
CW
572
573 mutex_destroy(&gt->mutex);
574}
575
a89d1f92
CW
576#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
577#include "selftests/mock_timeline.c"
578#include "selftests/i915_timeline.c"
579#endif