]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/i915/i915_vma.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / gpu / drm / i915 / i915_vma.c
CommitLineData
b42fe9ca
JL
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
10195b1e 24
09480072 25#include <linux/sched/mm.h>
df0566a6 26#include <drm/drm_gem.h>
112ed2d3 27
df0566a6
JN
28#include "display/intel_frontbuffer.h"
29
30#include "gt/intel_engine.h"
ccd20945 31#include "gt/intel_engine_heartbeat.h"
a1c8a09e 32#include "gt/intel_gt.h"
ccd20945 33#include "gt/intel_gt_requests.h"
b42fe9ca
JL
34
35#include "i915_drv.h"
103b76ee 36#include "i915_globals.h"
2850748e 37#include "i915_sw_fence_work.h"
a09d9a80 38#include "i915_trace.h"
df0566a6 39#include "i915_vma.h"
b42fe9ca 40
13f1bfd3 41static struct i915_global_vma {
103b76ee 42 struct i915_global base;
13f1bfd3
CW
43 struct kmem_cache *slab_vmas;
44} global;
45
46struct i915_vma *i915_vma_alloc(void)
47{
48 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49}
50
51void i915_vma_free(struct i915_vma *vma)
52{
53 return kmem_cache_free(global.slab_vmas, vma);
54}
55
1eca65d9 56#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
10195b1e
CW
57
58#include <linux/stackdepot.h>
59
60static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61{
487f3c7f
TG
62 unsigned long *entries;
63 unsigned int nr_entries;
10195b1e
CW
64 char buf[512];
65
66 if (!vma->node.stack) {
67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 vma->node.start, vma->node.size, reason);
69 return;
70 }
71
487f3c7f
TG
72 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
10195b1e
CW
74 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 vma->node.start, vma->node.size, reason, buf);
76}
77
78#else
79
80static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81{
82}
83
84#endif
85
12c255b5 86static inline struct i915_vma *active_to_vma(struct i915_active *ref)
b42fe9ca 87{
12c255b5
CW
88 return container_of(ref, typeof(struct i915_vma), active);
89}
1ab22356 90
12c255b5
CW
91static int __i915_vma_active(struct i915_active *ref)
92{
2833ddcc 93 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
12c255b5
CW
94}
95
274cbf20 96__i915_active_call
12c255b5
CW
97static void __i915_vma_retire(struct i915_active *ref)
98{
99 i915_vma_put(active_to_vma(ref));
b42fe9ca
JL
100}
101
b42fe9ca 102static struct i915_vma *
a01cb37a
CW
103vma_create(struct drm_i915_gem_object *obj,
104 struct i915_address_space *vm,
105 const struct i915_ggtt_view *view)
b42fe9ca
JL
106{
107 struct i915_vma *vma;
108 struct rb_node *rb, **p;
b42fe9ca 109
e1cc3db0 110 /* The aliasing_ppgtt should never be used directly! */
71e51ca8 111 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
e1cc3db0 112
13f1bfd3 113 vma = i915_vma_alloc();
b42fe9ca
JL
114 if (vma == NULL)
115 return ERR_PTR(-ENOMEM);
116
76f9764c 117 kref_init(&vma->ref);
2850748e
CW
118 mutex_init(&vma->pages_mutex);
119 vma->vm = i915_vm_get(vm);
93f2cde2 120 vma->ops = &vm->vma_ops;
b42fe9ca 121 vma->obj = obj;
ef78f7b1 122 vma->resv = obj->base.resv;
b42fe9ca 123 vma->size = obj->base.size;
f51455d4 124 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
b42fe9ca 125
b1e3177b 126 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
155ab883 127
09480072
CW
128 /* Declare ourselves safe for use inside shrinkers */
129 if (IS_ENABLED(CONFIG_LOCKDEP)) {
130 fs_reclaim_acquire(GFP_KERNEL);
131 might_lock(&vma->active.mutex);
132 fs_reclaim_release(GFP_KERNEL);
133 }
134
155ab883
CW
135 INIT_LIST_HEAD(&vma->closed_link);
136
7c518460 137 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
b42fe9ca
JL
138 vma->ggtt_view = *view;
139 if (view->type == I915_GGTT_VIEW_PARTIAL) {
07e19ea4 140 GEM_BUG_ON(range_overflows_t(u64,
8bab1193
CW
141 view->partial.offset,
142 view->partial.size,
07e19ea4 143 obj->base.size >> PAGE_SHIFT));
8bab1193 144 vma->size = view->partial.size;
b42fe9ca 145 vma->size <<= PAGE_SHIFT;
7e7367d3 146 GEM_BUG_ON(vma->size > obj->base.size);
b42fe9ca 147 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
8bab1193 148 vma->size = intel_rotation_info_size(&view->rotated);
b42fe9ca 149 vma->size <<= PAGE_SHIFT;
1a74fc0b
VS
150 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
151 vma->size = intel_remapped_info_size(&view->remapped);
152 vma->size <<= PAGE_SHIFT;
b42fe9ca
JL
153 }
154 }
155
1fcdaa7e
CW
156 if (unlikely(vma->size > vm->total))
157 goto err_vma;
158
b00ddb27
CW
159 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
160
f524a774
CW
161 spin_lock(&obj->vma.lock);
162
b42fe9ca 163 if (i915_is_ggtt(vm)) {
1fcdaa7e 164 if (unlikely(overflows_type(vma->size, u32)))
f524a774 165 goto err_unlock;
1fcdaa7e 166
91d4e0aa
CW
167 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
168 i915_gem_object_get_tiling(obj),
169 i915_gem_object_get_stride(obj));
1fcdaa7e
CW
170 if (unlikely(vma->fence_size < vma->size || /* overflow */
171 vma->fence_size > vm->total))
f524a774 172 goto err_unlock;
1fcdaa7e 173
f51455d4 174 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
944397f0 175
91d4e0aa
CW
176 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
177 i915_gem_object_get_tiling(obj),
178 i915_gem_object_get_stride(obj));
944397f0
CW
179 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
180
4dd2fbbf 181 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
b42fe9ca
JL
182 }
183
184 rb = NULL;
528cbd17 185 p = &obj->vma.tree.rb_node;
b42fe9ca
JL
186 while (*p) {
187 struct i915_vma *pos;
528cbd17 188 long cmp;
b42fe9ca
JL
189
190 rb = *p;
191 pos = rb_entry(rb, struct i915_vma, obj_node);
528cbd17
CW
192
193 /*
194 * If the view already exists in the tree, another thread
195 * already created a matching vma, so return the older instance
196 * and dispose of ours.
197 */
198 cmp = i915_vma_compare(pos, vm, view);
199 if (cmp == 0) {
200 spin_unlock(&obj->vma.lock);
13f1bfd3 201 i915_vma_free(vma);
528cbd17
CW
202 return pos;
203 }
204
205 if (cmp < 0)
b42fe9ca
JL
206 p = &rb->rb_right;
207 else
208 p = &rb->rb_left;
209 }
210 rb_link_node(&vma->obj_node, rb, p);
528cbd17
CW
211 rb_insert_color(&vma->obj_node, &obj->vma.tree);
212
213 if (i915_vma_is_ggtt(vma))
214 /*
215 * We put the GGTT vma at the start of the vma-list, followed
216 * by the ppGGTT vma. This allows us to break early when
217 * iterating over only the GGTT vma for an object, see
218 * for_each_ggtt_vma()
219 */
220 list_add(&vma->obj_link, &obj->vma.list);
221 else
222 list_add_tail(&vma->obj_link, &obj->vma.list);
223
224 spin_unlock(&obj->vma.lock);
09d7e46b 225
b42fe9ca 226 return vma;
1fcdaa7e 227
f524a774
CW
228err_unlock:
229 spin_unlock(&obj->vma.lock);
1fcdaa7e 230err_vma:
13f1bfd3 231 i915_vma_free(vma);
1fcdaa7e 232 return ERR_PTR(-E2BIG);
b42fe9ca
JL
233}
234
481a6f7d
CW
235static struct i915_vma *
236vma_lookup(struct drm_i915_gem_object *obj,
237 struct i915_address_space *vm,
238 const struct i915_ggtt_view *view)
718659a6
CW
239{
240 struct rb_node *rb;
241
528cbd17 242 rb = obj->vma.tree.rb_node;
718659a6
CW
243 while (rb) {
244 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
245 long cmp;
246
247 cmp = i915_vma_compare(vma, vm, view);
248 if (cmp == 0)
249 return vma;
250
251 if (cmp < 0)
252 rb = rb->rb_right;
253 else
254 rb = rb->rb_left;
255 }
256
257 return NULL;
258}
259
718659a6
CW
260/**
261 * i915_vma_instance - return the singleton instance of the VMA
262 * @obj: parent &struct drm_i915_gem_object to be mapped
263 * @vm: address space in which the mapping is located
264 * @view: additional mapping requirements
265 *
266 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
267 * the same @view characteristics. If a match is not found, one is created.
268 * Once created, the VMA is kept until either the object is freed, or the
269 * address space is closed.
270 *
718659a6
CW
271 * Returns the vma, or an error pointer.
272 */
273struct i915_vma *
274i915_vma_instance(struct drm_i915_gem_object *obj,
275 struct i915_address_space *vm,
276 const struct i915_ggtt_view *view)
277{
278 struct i915_vma *vma;
279
718659a6 280 GEM_BUG_ON(view && !i915_is_ggtt(vm));
2850748e 281 GEM_BUG_ON(!atomic_read(&vm->open));
718659a6 282
528cbd17 283 spin_lock(&obj->vma.lock);
481a6f7d 284 vma = vma_lookup(obj, vm, view);
528cbd17
CW
285 spin_unlock(&obj->vma.lock);
286
287 /* vma_create() will resolve the race if another creates the vma */
288 if (unlikely(!vma))
a01cb37a 289 vma = vma_create(obj, vm, view);
718659a6 290
4ea9527c 291 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
718659a6
CW
292 return vma;
293}
294
2850748e
CW
295struct i915_vma_work {
296 struct dma_fence_work base;
297 struct i915_vma *vma;
54d7195f 298 struct drm_i915_gem_object *pinned;
e3793468 299 struct i915_sw_dma_fence_cb cb;
2850748e
CW
300 enum i915_cache_level cache_level;
301 unsigned int flags;
302};
303
304static int __vma_bind(struct dma_fence_work *work)
305{
306 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
307 struct i915_vma *vma = vw->vma;
308 int err;
309
310 err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
311 if (err)
312 atomic_or(I915_VMA_ERROR, &vma->flags);
313
2850748e
CW
314 return err;
315}
316
54d7195f
CW
317static void __vma_release(struct dma_fence_work *work)
318{
319 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
320
321 if (vw->pinned)
322 __i915_gem_object_unpin_pages(vw->pinned);
323}
324
2850748e
CW
325static const struct dma_fence_work_ops bind_ops = {
326 .name = "bind",
327 .work = __vma_bind,
54d7195f 328 .release = __vma_release,
2850748e
CW
329};
330
331struct i915_vma_work *i915_vma_work(void)
332{
333 struct i915_vma_work *vw;
334
335 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
336 if (!vw)
337 return NULL;
338
339 dma_fence_work_init(&vw->base, &bind_ops);
340 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
341
342 return vw;
343}
344
e3793468
CW
345int i915_vma_wait_for_bind(struct i915_vma *vma)
346{
347 int err = 0;
348
349 if (rcu_access_pointer(vma->active.excl.fence)) {
350 struct dma_fence *fence;
351
352 rcu_read_lock();
353 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
354 rcu_read_unlock();
355 if (fence) {
356 err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
357 dma_fence_put(fence);
358 }
359 }
360
361 return err;
362}
363
b42fe9ca
JL
364/**
365 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
366 * @vma: VMA to map
367 * @cache_level: mapping cache level
368 * @flags: flags like global or local mapping
2850748e 369 * @work: preallocated worker for allocating and binding the PTE
b42fe9ca
JL
370 *
371 * DMA addresses are taken from the scatter-gather table of this object (or of
372 * this VMA in case of non-default GGTT views) and PTE entries set up.
373 * Note that DMA addresses are also the only part of the SG table we care about.
374 */
2850748e
CW
375int i915_vma_bind(struct i915_vma *vma,
376 enum i915_cache_level cache_level,
377 u32 flags,
378 struct i915_vma_work *work)
b42fe9ca
JL
379{
380 u32 bind_flags;
381 u32 vma_flags;
382 int ret;
383
aa149431
CW
384 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
385 GEM_BUG_ON(vma->size > vma->node.size);
386
bbb8a9d7
TU
387 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
388 vma->node.size,
389 vma->vm->total)))
aa149431
CW
390 return -ENODEV;
391
bbb8a9d7 392 if (GEM_DEBUG_WARN_ON(!flags))
b42fe9ca
JL
393 return -EINVAL;
394
2850748e
CW
395 bind_flags = flags;
396 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
b42fe9ca 397
4dd2fbbf
CW
398 vma_flags = atomic_read(&vma->flags);
399 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
b42fe9ca
JL
400 if (flags & PIN_UPDATE)
401 bind_flags |= vma_flags;
402 else
403 bind_flags &= ~vma_flags;
404 if (bind_flags == 0)
405 return 0;
406
fa3f46af
MA
407 GEM_BUG_ON(!vma->pages);
408
6146e6da 409 trace_i915_vma_bind(vma, bind_flags);
2850748e 410 if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
e3793468
CW
411 struct dma_fence *prev;
412
2850748e
CW
413 work->vma = vma;
414 work->cache_level = cache_level;
415 work->flags = bind_flags | I915_VMA_ALLOC;
416
417 /*
418 * Note we only want to chain up to the migration fence on
419 * the pages (not the object itself). As we don't track that,
420 * yet, we have to use the exclusive fence instead.
421 *
422 * Also note that we do not want to track the async vma as
423 * part of the obj->resv->excl_fence as it only affects
424 * execution and not content or object's backing store lifetime.
425 */
e3793468 426 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
30ca04e1 427 if (prev) {
e3793468
CW
428 __i915_sw_fence_await_dma_fence(&work->base.chain,
429 prev,
430 &work->cb);
30ca04e1
CW
431 dma_fence_put(prev);
432 }
e3793468 433
2850748e
CW
434 work->base.dma.error = 0; /* enable the queue_work() */
435
54d7195f 436 if (vma->obj) {
2850748e 437 __i915_gem_object_pin_pages(vma->obj);
54d7195f
CW
438 work->pinned = vma->obj;
439 }
2850748e 440 } else {
2850748e
CW
441 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
442 if (ret)
443 return ret;
444 }
b42fe9ca 445
4dd2fbbf 446 atomic_or(bind_flags, &vma->flags);
b42fe9ca
JL
447 return 0;
448}
449
450void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
451{
452 void __iomem *ptr;
b4563f59 453 int err;
b42fe9ca 454
2850748e 455 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
b4563f59
CW
456 err = -ENODEV;
457 goto err;
458 }
b42fe9ca
JL
459
460 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4dd2fbbf 461 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
b42fe9ca 462
2850748e 463 ptr = READ_ONCE(vma->iomap);
b42fe9ca 464 if (ptr == NULL) {
73ebd503 465 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
b42fe9ca
JL
466 vma->node.start,
467 vma->node.size);
b4563f59
CW
468 if (ptr == NULL) {
469 err = -ENOMEM;
470 goto err;
471 }
b42fe9ca 472
2850748e
CW
473 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
474 io_mapping_unmap(ptr);
475 ptr = vma->iomap;
476 }
b42fe9ca
JL
477 }
478
479 __i915_vma_pin(vma);
b4563f59 480
3bd40735 481 err = i915_vma_pin_fence(vma);
b4563f59
CW
482 if (err)
483 goto err_unpin;
484
7125397b 485 i915_vma_set_ggtt_write(vma);
a5972e93
CW
486
487 /* NB Access through the GTT requires the device to be awake. */
b42fe9ca 488 return ptr;
b4563f59
CW
489
490err_unpin:
491 __i915_vma_unpin(vma);
492err:
493 return IO_ERR_PTR(err);
494}
495
7125397b
CW
496void i915_vma_flush_writes(struct i915_vma *vma)
497{
2850748e
CW
498 if (i915_vma_unset_ggtt_write(vma))
499 intel_gt_flush_ggtt_writes(vma->vm->gt);
7125397b
CW
500}
501
b4563f59
CW
502void i915_vma_unpin_iomap(struct i915_vma *vma)
503{
b4563f59
CW
504 GEM_BUG_ON(vma->iomap == NULL);
505
7125397b
CW
506 i915_vma_flush_writes(vma);
507
b4563f59
CW
508 i915_vma_unpin_fence(vma);
509 i915_vma_unpin(vma);
b42fe9ca
JL
510}
511
6a2f59e4 512void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
b42fe9ca
JL
513{
514 struct i915_vma *vma;
515 struct drm_i915_gem_object *obj;
516
517 vma = fetch_and_zero(p_vma);
518 if (!vma)
519 return;
520
521 obj = vma->obj;
520ea7c5 522 GEM_BUG_ON(!obj);
b42fe9ca
JL
523
524 i915_vma_unpin(vma);
525 i915_vma_close(vma);
526
6a2f59e4
CW
527 if (flags & I915_VMA_RELEASE_MAP)
528 i915_gem_object_unpin_map(obj);
529
c017cf6b 530 i915_gem_object_put(obj);
b42fe9ca
JL
531}
532
782a3e9e
CW
533bool i915_vma_misplaced(const struct i915_vma *vma,
534 u64 size, u64 alignment, u64 flags)
b42fe9ca
JL
535{
536 if (!drm_mm_node_allocated(&vma->node))
537 return false;
538
2850748e
CW
539 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
540 return true;
541
b42fe9ca
JL
542 if (vma->node.size < size)
543 return true;
544
f51455d4
CW
545 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
546 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
b42fe9ca
JL
547 return true;
548
549 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
550 return true;
551
552 if (flags & PIN_OFFSET_BIAS &&
553 vma->node.start < (flags & PIN_OFFSET_MASK))
554 return true;
555
556 if (flags & PIN_OFFSET_FIXED &&
557 vma->node.start != (flags & PIN_OFFSET_MASK))
558 return true;
559
560 return false;
561}
562
563void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
564{
b42fe9ca 565 bool mappable, fenceable;
b42fe9ca 566
944397f0
CW
567 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
568 GEM_BUG_ON(!vma->fence_size);
b42fe9ca 569
944397f0 570 fenceable = (vma->node.size >= vma->fence_size &&
f51455d4 571 IS_ALIGNED(vma->node.start, vma->fence_alignment));
944397f0
CW
572
573 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
574
575 if (mappable && fenceable)
4dd2fbbf 576 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
b42fe9ca 577 else
4dd2fbbf 578 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
b42fe9ca
JL
579}
580
33dd8899 581bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
7d1d9aea
CW
582{
583 struct drm_mm_node *node = &vma->node;
b42fe9ca
JL
584 struct drm_mm_node *other;
585
586 /*
587 * On some machines we have to be careful when putting differing types
588 * of snoopable memory together to avoid the prefetcher crossing memory
589 * domains and dying. During vm initialisation, we decide whether or not
590 * these constraints apply and set the drm_mm.color_adjust
591 * appropriately.
592 */
33dd8899 593 if (!i915_vm_has_cache_coloring(vma->vm))
b42fe9ca
JL
594 return true;
595
7d1d9aea
CW
596 /* Only valid to be called on an already inserted vma */
597 GEM_BUG_ON(!drm_mm_node_allocated(node));
598 GEM_BUG_ON(list_empty(&node->node_list));
b42fe9ca 599
7d1d9aea 600 other = list_prev_entry(node, node_list);
33dd8899 601 if (i915_node_color_differs(other, color) &&
1e0a96e5 602 !drm_mm_hole_follows(other))
b42fe9ca
JL
603 return false;
604
7d1d9aea 605 other = list_next_entry(node, node_list);
33dd8899 606 if (i915_node_color_differs(other, color) &&
1e0a96e5 607 !drm_mm_hole_follows(node))
b42fe9ca
JL
608 return false;
609
610 return true;
611}
612
83d317ad
CW
613static void assert_bind_count(const struct drm_i915_gem_object *obj)
614{
615 /*
616 * Combine the assertion that the object is bound and that we have
617 * pinned its pages. But we should never have bound the object
618 * more than we have pinned its pages. (For complete accuracy, we
619 * assume that no else is pinning the pages, but as a rough assertion
620 * that we will not run into problems later, this will do!)
621 */
ecab9be1 622 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
83d317ad
CW
623}
624
b42fe9ca
JL
625/**
626 * i915_vma_insert - finds a slot for the vma in its address space
627 * @vma: the vma
628 * @size: requested size in bytes (can be larger than the VMA)
629 * @alignment: required alignment
630 * @flags: mask of PIN_* flags to use
631 *
632 * First we try to allocate some free space that meets the requirements for
633 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
634 * preferrably the oldest idle entry to make room for the new VMA.
635 *
636 * Returns:
637 * 0 on success, negative error code otherwise.
638 */
639static int
640i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
641{
33dd8899 642 unsigned long color;
b42fe9ca
JL
643 u64 start, end;
644 int ret;
645
4dd2fbbf 646 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
b42fe9ca
JL
647 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
648
649 size = max(size, vma->size);
944397f0
CW
650 alignment = max(alignment, vma->display_alignment);
651 if (flags & PIN_MAPPABLE) {
652 size = max_t(typeof(size), size, vma->fence_size);
653 alignment = max_t(typeof(alignment),
654 alignment, vma->fence_alignment);
655 }
b42fe9ca 656
f51455d4
CW
657 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
658 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
659 GEM_BUG_ON(!is_power_of_2(alignment));
660
b42fe9ca 661 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
f51455d4 662 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
b42fe9ca
JL
663
664 end = vma->vm->total;
665 if (flags & PIN_MAPPABLE)
2850748e 666 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
b42fe9ca 667 if (flags & PIN_ZONE_4G)
f51455d4
CW
668 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
669 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
b42fe9ca
JL
670
671 /* If binding the object/GGTT view requires more space than the entire
672 * aperture has, reject it early before evicting everything in a vain
673 * attempt to find space.
674 */
675 if (size > end) {
520ea7c5
CW
676 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
677 size, flags & PIN_MAPPABLE ? "mappable" : "total",
b42fe9ca 678 end);
2889caa9 679 return -ENOSPC;
b42fe9ca
JL
680 }
681
33dd8899 682 color = 0;
2850748e
CW
683 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
684 color = vma->obj->cache_level;
fa3f46af 685
b42fe9ca
JL
686 if (flags & PIN_OFFSET_FIXED) {
687 u64 offset = flags & PIN_OFFSET_MASK;
f51455d4 688 if (!IS_ALIGNED(offset, alignment) ||
2850748e
CW
689 range_overflows(offset, size, end))
690 return -EINVAL;
b42fe9ca 691
625d988a 692 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
33dd8899 693 size, offset, color,
625d988a
CW
694 flags);
695 if (ret)
2850748e 696 return ret;
b42fe9ca 697 } else {
7464284b
MA
698 /*
699 * We only support huge gtt pages through the 48b PPGTT,
700 * however we also don't want to force any alignment for
701 * objects which need to be tightly packed into the low 32bits.
702 *
703 * Note that we assume that GGTT are limited to 4GiB for the
704 * forseeable future. See also i915_ggtt_offset().
705 */
706 if (upper_32_bits(end - 1) &&
707 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
855822be
MA
708 /*
709 * We can't mix 64K and 4K PTEs in the same page-table
710 * (2M block), and so to avoid the ugliness and
711 * complexity of coloring we opt for just aligning 64K
712 * objects to 2M.
713 */
7464284b 714 u64 page_alignment =
855822be
MA
715 rounddown_pow_of_two(vma->page_sizes.sg |
716 I915_GTT_PAGE_SIZE_2M);
7464284b 717
bef27bdb
CW
718 /*
719 * Check we don't expand for the limited Global GTT
720 * (mappable aperture is even more precious!). This
721 * also checks that we exclude the aliasing-ppgtt.
722 */
723 GEM_BUG_ON(i915_vma_is_ggtt(vma));
724
7464284b 725 alignment = max(alignment, page_alignment);
855822be
MA
726
727 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
728 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
7464284b
MA
729 }
730
e007b19d 731 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
33dd8899 732 size, alignment, color,
e007b19d
CW
733 start, end, flags);
734 if (ret)
2850748e 735 return ret;
b42fe9ca
JL
736
737 GEM_BUG_ON(vma->node.start < start);
738 GEM_BUG_ON(vma->node.start + vma->node.size > end);
739 }
44a0ec0d 740 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
33dd8899 741 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
b42fe9ca 742
520ea7c5 743 if (vma->obj) {
dde01d94
CW
744 struct drm_i915_gem_object *obj = vma->obj;
745
746 atomic_inc(&obj->bind_count);
747 assert_bind_count(obj);
520ea7c5 748 }
dde01d94 749 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
b42fe9ca
JL
750
751 return 0;
b42fe9ca
JL
752}
753
31c7effa 754static void
dde01d94 755i915_vma_detach(struct i915_vma *vma)
31c7effa 756{
31c7effa 757 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
4dd2fbbf 758 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
31c7effa 759
520ea7c5 760 /*
dde01d94
CW
761 * And finally now the object is completely decoupled from this
762 * vma, we can drop its hold on the backing storage and allow
763 * it to be reaped by the shrinker.
31c7effa 764 */
dde01d94 765 list_del(&vma->vm_link);
520ea7c5
CW
766 if (vma->obj) {
767 struct drm_i915_gem_object *obj = vma->obj;
d82b4b26 768
520ea7c5 769 assert_bind_count(obj);
dde01d94 770 atomic_dec(&obj->bind_count);
520ea7c5 771 }
31c7effa
CW
772}
773
2850748e 774static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
b42fe9ca 775{
2850748e
CW
776 unsigned int bound;
777 bool pinned = true;
b42fe9ca 778
2850748e
CW
779 bound = atomic_read(&vma->flags);
780 do {
781 if (unlikely(flags & ~bound))
782 return false;
b42fe9ca 783
2850748e
CW
784 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
785 return false;
786
787 if (!(bound & I915_VMA_PIN_MASK))
788 goto unpinned;
789
790 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
791 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
792
793 return true;
794
795unpinned:
796 /*
797 * If pin_count==0, but we are bound, check under the lock to avoid
798 * racing with a concurrent i915_vma_unbind().
799 */
800 mutex_lock(&vma->vm->mutex);
801 do {
802 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
803 pinned = false;
804 break;
805 }
806
807 if (unlikely(flags & ~bound)) {
808 pinned = false;
809 break;
810 }
811 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
812 mutex_unlock(&vma->vm->mutex);
813
814 return pinned;
815}
816
817static int vma_get_pages(struct i915_vma *vma)
818{
819 int err = 0;
820
821 if (atomic_add_unless(&vma->pages_count, 1, 0))
822 return 0;
823
824 /* Allocations ahoy! */
825 if (mutex_lock_interruptible(&vma->pages_mutex))
826 return -EINTR;
827
828 if (!atomic_read(&vma->pages_count)) {
829 if (vma->obj) {
830 err = i915_gem_object_pin_pages(vma->obj);
831 if (err)
832 goto unlock;
833 }
834
835 err = vma->ops->set_pages(vma);
56184a20
CW
836 if (err) {
837 if (vma->obj)
838 i915_gem_object_unpin_pages(vma->obj);
2850748e 839 goto unlock;
56184a20 840 }
b42fe9ca 841 }
2850748e 842 atomic_inc(&vma->pages_count);
b42fe9ca 843
2850748e
CW
844unlock:
845 mutex_unlock(&vma->pages_mutex);
846
847 return err;
848}
849
850static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
851{
852 /* We allocate under vma_get_pages, so beware the shrinker */
853 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
854 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
855 if (atomic_sub_return(count, &vma->pages_count) == 0) {
856 vma->ops->clear_pages(vma);
857 GEM_BUG_ON(vma->pages);
858 if (vma->obj)
859 i915_gem_object_unpin_pages(vma->obj);
b42fe9ca 860 }
2850748e
CW
861 mutex_unlock(&vma->pages_mutex);
862}
b42fe9ca 863
2850748e
CW
864static void vma_put_pages(struct i915_vma *vma)
865{
866 if (atomic_add_unless(&vma->pages_count, -1, 1))
867 return;
868
869 __vma_put_pages(vma, 1);
870}
871
872static void vma_unbind_pages(struct i915_vma *vma)
873{
874 unsigned int count;
875
876 lockdep_assert_held(&vma->vm->mutex);
877
878 /* The upper portion of pages_count is the number of bindings */
879 count = atomic_read(&vma->pages_count);
880 count >>= I915_VMA_PAGES_BIAS;
881 GEM_BUG_ON(!count);
882
883 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
884}
885
886int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
887{
888 struct i915_vma_work *work = NULL;
c0e60347 889 intel_wakeref_t wakeref = 0;
2850748e
CW
890 unsigned int bound;
891 int err;
892
893 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
894 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
895
896 GEM_BUG_ON(flags & PIN_UPDATE);
897 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
898
899 /* First try and grab the pin without rebinding the vma */
900 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
901 return 0;
902
903 err = vma_get_pages(vma);
904 if (err)
905 return err;
906
907 if (flags & vma->vm->bind_async_flags) {
908 work = i915_vma_work();
909 if (!work) {
910 err = -ENOMEM;
911 goto err_pages;
912 }
913 }
914
c0e60347
CW
915 if (flags & PIN_GLOBAL)
916 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
917
2850748e
CW
918 /* No more allocations allowed once we hold vm->mutex */
919 err = mutex_lock_interruptible(&vma->vm->mutex);
920 if (err)
921 goto err_fence;
922
00de702c
CW
923 if (unlikely(i915_vma_is_closed(vma))) {
924 err = -ENOENT;
925 goto err_unlock;
926 }
927
2850748e
CW
928 bound = atomic_read(&vma->flags);
929 if (unlikely(bound & I915_VMA_ERROR)) {
930 err = -ENOMEM;
931 goto err_unlock;
932 }
933
934 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
935 err = -EAGAIN; /* pins are meant to be fairly temporary */
936 goto err_unlock;
937 }
938
939 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
940 __i915_vma_pin(vma);
941 goto err_unlock;
942 }
943
944 err = i915_active_acquire(&vma->active);
945 if (err)
946 goto err_unlock;
947
948 if (!(bound & I915_VMA_BIND_MASK)) {
949 err = i915_vma_insert(vma, size, alignment, flags);
950 if (err)
951 goto err_active;
952
953 if (i915_is_ggtt(vma->vm))
954 __i915_vma_set_map_and_fenceable(vma);
955 }
b42fe9ca 956
2850748e
CW
957 GEM_BUG_ON(!vma->pages);
958 err = i915_vma_bind(vma,
959 vma->obj ? vma->obj->cache_level : 0,
960 flags, work);
961 if (err)
962 goto err_remove;
d36caeea 963
2850748e
CW
964 /* There should only be at most 2 active bindings (user, global) */
965 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
966 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
967 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
b42fe9ca 968
2850748e
CW
969 __i915_vma_pin(vma);
970 GEM_BUG_ON(!i915_vma_is_pinned(vma));
971 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
b42fe9ca 972 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
b42fe9ca 973
31c7effa 974err_remove:
dde01d94
CW
975 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
976 i915_vma_detach(vma);
977 drm_mm_remove_node(&vma->node);
978 }
2850748e
CW
979err_active:
980 i915_active_release(&vma->active);
981err_unlock:
982 mutex_unlock(&vma->vm->mutex);
983err_fence:
984 if (work)
985 dma_fence_work_commit(&work->base);
c0e60347
CW
986 if (wakeref)
987 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
2850748e
CW
988err_pages:
989 vma_put_pages(vma);
990 return err;
b42fe9ca
JL
991}
992
ccd20945
CW
993static void flush_idle_contexts(struct intel_gt *gt)
994{
995 struct intel_engine_cs *engine;
996 enum intel_engine_id id;
997
998 for_each_engine(engine, gt, id)
999 intel_engine_flush_barriers(engine);
1000
1001 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1002}
1003
1004int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
1005{
1006 struct i915_address_space *vm = vma->vm;
1007 int err;
1008
1009 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1010
1011 do {
1012 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
e3793468
CW
1013 if (err != -ENOSPC) {
1014 if (!err) {
1015 err = i915_vma_wait_for_bind(vma);
1016 if (err)
1017 i915_vma_unpin(vma);
1018 }
ccd20945 1019 return err;
e3793468 1020 }
ccd20945
CW
1021
1022 /* Unlike i915_vma_pin, we don't take no for an answer! */
1023 flush_idle_contexts(vm->gt);
1024 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1025 i915_gem_evict_vm(vm);
1026 mutex_unlock(&vm->mutex);
1027 }
1028 } while (1);
1029}
1030
3365e226
CW
1031void i915_vma_close(struct i915_vma *vma)
1032{
71e51ca8 1033 struct intel_gt *gt = vma->vm->gt;
155ab883 1034 unsigned long flags;
3365e226
CW
1035
1036 GEM_BUG_ON(i915_vma_is_closed(vma));
3365e226
CW
1037
1038 /*
1039 * We defer actually closing, unbinding and destroying the VMA until
1040 * the next idle point, or if the object is freed in the meantime. By
1041 * postponing the unbind, we allow for it to be resurrected by the
1042 * client, avoiding the work required to rebind the VMA. This is
1043 * advantageous for DRI, where the client/server pass objects
1044 * between themselves, temporarily opening a local VMA to the
1045 * object, and then closing it again. The same object is then reused
1046 * on the next frame (or two, depending on the depth of the swap queue)
1047 * causing us to rebind the VMA once more. This ends up being a lot
1048 * of wasted work for the steady state.
1049 */
71e51ca8
CW
1050 spin_lock_irqsave(&gt->closed_lock, flags);
1051 list_add(&vma->closed_link, &gt->closed_vma);
1052 spin_unlock_irqrestore(&gt->closed_lock, flags);
3365e226
CW
1053}
1054
155ab883 1055static void __i915_vma_remove_closed(struct i915_vma *vma)
3365e226 1056{
71e51ca8 1057 struct intel_gt *gt = vma->vm->gt;
3365e226 1058
71e51ca8 1059 spin_lock_irq(&gt->closed_lock);
155ab883 1060 list_del_init(&vma->closed_link);
71e51ca8 1061 spin_unlock_irq(&gt->closed_lock);
155ab883
CW
1062}
1063
1064void i915_vma_reopen(struct i915_vma *vma)
1065{
2850748e
CW
1066 if (i915_vma_is_closed(vma))
1067 __i915_vma_remove_closed(vma);
3365e226
CW
1068}
1069
76f9764c 1070void i915_vma_release(struct kref *ref)
b42fe9ca 1071{
76f9764c
CW
1072 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1073
2850748e
CW
1074 if (drm_mm_node_allocated(&vma->node)) {
1075 mutex_lock(&vma->vm->mutex);
1076 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1077 WARN_ON(__i915_vma_unbind(vma));
1078 mutex_unlock(&vma->vm->mutex);
1079 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1080 }
1081 GEM_BUG_ON(i915_vma_is_active(vma));
b42fe9ca 1082
528cbd17
CW
1083 if (vma->obj) {
1084 struct drm_i915_gem_object *obj = vma->obj;
1085
1086 spin_lock(&obj->vma.lock);
1087 list_del(&vma->obj_link);
2850748e 1088 rb_erase(&vma->obj_node, &obj->vma.tree);
528cbd17
CW
1089 spin_unlock(&obj->vma.lock);
1090 }
010e3e68 1091
155ab883 1092 __i915_vma_remove_closed(vma);
2850748e 1093 i915_vm_put(vma->vm);
3365e226 1094
2850748e
CW
1095 i915_active_fini(&vma->active);
1096 i915_vma_free(vma);
3365e226
CW
1097}
1098
71e51ca8 1099void i915_vma_parked(struct intel_gt *gt)
3365e226
CW
1100{
1101 struct i915_vma *vma, *next;
b0647a5e 1102 LIST_HEAD(closed);
b42fe9ca 1103
71e51ca8
CW
1104 spin_lock_irq(&gt->closed_lock);
1105 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
2850748e
CW
1106 struct drm_i915_gem_object *obj = vma->obj;
1107 struct i915_address_space *vm = vma->vm;
1108
1109 /* XXX All to avoid keeping a reference on i915_vma itself */
1110
1111 if (!kref_get_unless_zero(&obj->base.refcount))
1112 continue;
1113
b0647a5e 1114 if (!i915_vm_tryopen(vm)) {
2850748e 1115 i915_gem_object_put(obj);
b0647a5e 1116 continue;
2850748e
CW
1117 }
1118
b0647a5e
CW
1119 list_move(&vma->closed_link, &closed);
1120 }
1121 spin_unlock_irq(&gt->closed_lock);
155ab883 1122
b0647a5e
CW
1123 /* As the GT is held idle, no vma can be reopened as we destroy them */
1124 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1125 struct drm_i915_gem_object *obj = vma->obj;
1126 struct i915_address_space *vm = vma->vm;
3365e226 1127
b0647a5e
CW
1128 INIT_LIST_HEAD(&vma->closed_link);
1129 __i915_vma_put(vma);
2850748e 1130
b0647a5e
CW
1131 i915_gem_object_put(obj);
1132 i915_vm_close(vm);
155ab883 1133 }
b42fe9ca
JL
1134}
1135
1136static void __i915_vma_iounmap(struct i915_vma *vma)
1137{
1138 GEM_BUG_ON(i915_vma_is_pinned(vma));
1139
1140 if (vma->iomap == NULL)
1141 return;
1142
1143 io_mapping_unmap(vma->iomap);
1144 vma->iomap = NULL;
1145}
1146
a65adaf8
CW
1147void i915_vma_revoke_mmap(struct i915_vma *vma)
1148{
cc662126 1149 struct drm_vma_offset_node *node;
a65adaf8
CW
1150 u64 vma_offset;
1151
a65adaf8
CW
1152 if (!i915_vma_has_userfault(vma))
1153 return;
1154
1155 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1156 GEM_BUG_ON(!vma->obj->userfault_count);
1157
cc662126 1158 node = &vma->mmo->vma_node;
a65adaf8
CW
1159 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1160 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1161 drm_vma_node_offset_addr(node) + vma_offset,
1162 vma->size,
1163 1);
1164
1165 i915_vma_unset_userfault(vma);
1166 if (!--vma->obj->userfault_count)
1167 list_del(&vma->obj->userfault_link);
1168}
1169
2850748e
CW
1170int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1171{
1172 int err;
1173
1174 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1175
1176 /* Wait for the vma to be bound before we start! */
29e6ecf3 1177 err = i915_request_await_active(rq, &vma->active, 0);
2850748e
CW
1178 if (err)
1179 return err;
1180
1181 return i915_active_add_request(&vma->active, rq);
1182}
1183
e6bb1d7f
CW
1184int i915_vma_move_to_active(struct i915_vma *vma,
1185 struct i915_request *rq,
1186 unsigned int flags)
1187{
1188 struct drm_i915_gem_object *obj = vma->obj;
a93615f9 1189 int err;
e6bb1d7f 1190
6951e589 1191 assert_object_held(obj);
e6bb1d7f 1192
2850748e 1193 err = __i915_vma_move_to_active(vma, rq);
a93615f9
CW
1194 if (unlikely(err))
1195 return err;
e6bb1d7f 1196
e6bb1d7f 1197 if (flags & EXEC_OBJECT_WRITE) {
da42104f
CW
1198 struct intel_frontbuffer *front;
1199
1200 front = __intel_frontbuffer_get(obj);
1201 if (unlikely(front)) {
1202 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1203 i915_active_add_request(&front->write, rq);
1204 intel_frontbuffer_put(front);
1205 }
e6bb1d7f 1206
829e8def 1207 dma_resv_add_excl_fence(vma->resv, &rq->fence);
cd2a4eaf 1208 obj->write_domain = I915_GEM_DOMAIN_RENDER;
e6bb1d7f 1209 obj->read_domains = 0;
cd2a4eaf 1210 } else {
829e8def 1211 err = dma_resv_reserve_shared(vma->resv, 1);
cd2a4eaf
CW
1212 if (unlikely(err))
1213 return err;
1214
829e8def 1215 dma_resv_add_shared_fence(vma->resv, &rq->fence);
cd2a4eaf 1216 obj->write_domain = 0;
e6bb1d7f
CW
1217 }
1218 obj->read_domains |= I915_GEM_GPU_DOMAINS;
a93615f9 1219 obj->mm.dirty = true;
e6bb1d7f 1220
a93615f9 1221 GEM_BUG_ON(!i915_vma_is_active(vma));
e6bb1d7f
CW
1222 return 0;
1223}
1224
2850748e 1225int __i915_vma_unbind(struct i915_vma *vma)
b42fe9ca 1226{
b42fe9ca
JL
1227 int ret;
1228
2850748e 1229 lockdep_assert_held(&vma->vm->mutex);
b42fe9ca 1230
10195b1e
CW
1231 if (i915_vma_is_pinned(vma)) {
1232 vma_print_allocator(vma, "is pinned");
d3e48352 1233 return -EAGAIN;
10195b1e 1234 }
b42fe9ca 1235
60e94557
CW
1236 /*
1237 * After confirming that no one else is pinning this vma, wait for
1238 * any laggards who may have crept in during the wait (through
1239 * a residual pin skipping the vm->mutex) to complete.
1240 */
1241 ret = i915_vma_sync(vma);
1242 if (ret)
1243 return ret;
1244
b42fe9ca 1245 if (!drm_mm_node_allocated(&vma->node))
3365e226 1246 return 0;
b42fe9ca 1247
60e94557
CW
1248 GEM_BUG_ON(i915_vma_is_pinned(vma));
1249 GEM_BUG_ON(i915_vma_is_active(vma));
1250
b42fe9ca 1251 if (i915_vma_is_map_and_fenceable(vma)) {
7125397b
CW
1252 /*
1253 * Check that we have flushed all writes through the GGTT
1254 * before the unbind, other due to non-strict nature of those
1255 * indirect writes they may end up referencing the GGTT PTE
1256 * after the unbind.
5424f5d7
CW
1257 *
1258 * Note that we may be concurrently poking at the GGTT_WRITE
1259 * bit from set-domain, as we mark all GGTT vma associated
1260 * with an object. We know this is for another vma, as we
1261 * are currently unbinding this one -- so if this vma will be
1262 * reused, it will be refaulted and have its dirty bit set
1263 * before the next write.
7125397b
CW
1264 */
1265 i915_vma_flush_writes(vma);
7125397b 1266
b42fe9ca 1267 /* release the fence reg _after_ flushing */
1f7fd484 1268 ret = i915_vma_revoke_fence(vma);
b42fe9ca
JL
1269 if (ret)
1270 return ret;
1271
1272 /* Force a pagefault for domain tracking on next user access */
a65adaf8 1273 i915_vma_revoke_mmap(vma);
b42fe9ca
JL
1274
1275 __i915_vma_iounmap(vma);
4dd2fbbf 1276 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
b42fe9ca 1277 }
a65adaf8
CW
1278 GEM_BUG_ON(vma->fence);
1279 GEM_BUG_ON(i915_vma_has_userfault(vma));
b42fe9ca 1280
2850748e 1281 if (likely(atomic_read(&vma->vm->open))) {
b42fe9ca 1282 trace_i915_vma_unbind(vma);
93f2cde2 1283 vma->ops->unbind_vma(vma);
b42fe9ca 1284 }
5424f5d7
CW
1285 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1286 &vma->flags);
b42fe9ca 1287
dde01d94 1288 i915_vma_detach(vma);
2850748e 1289 vma_unbind_pages(vma);
b42fe9ca 1290
76f9764c 1291 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
b42fe9ca
JL
1292 return 0;
1293}
1294
2850748e
CW
1295int i915_vma_unbind(struct i915_vma *vma)
1296{
1297 struct i915_address_space *vm = vma->vm;
c0e60347 1298 intel_wakeref_t wakeref = 0;
2850748e
CW
1299 int err;
1300
e6ba7648
CW
1301 if (!drm_mm_node_allocated(&vma->node))
1302 return 0;
1303
d62f416f
CW
1304 /* Optimistic wait before taking the mutex */
1305 err = i915_vma_sync(vma);
1306 if (err)
1307 goto out_rpm;
1308
af23facc
CW
1309 if (i915_vma_is_pinned(vma)) {
1310 vma_print_allocator(vma, "is pinned");
1311 return -EAGAIN;
1312 }
1313
1314 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1315 /* XXX not always required: nop_clear_range */
1316 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1317
2850748e
CW
1318 err = mutex_lock_interruptible(&vm->mutex);
1319 if (err)
d62f416f 1320 goto out_rpm;
2850748e
CW
1321
1322 err = __i915_vma_unbind(vma);
1323 mutex_unlock(&vm->mutex);
1324
d62f416f 1325out_rpm:
c0e60347
CW
1326 if (wakeref)
1327 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2850748e
CW
1328 return err;
1329}
1330
1aff1903
CW
1331struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1332{
1333 i915_gem_object_make_unshrinkable(vma->obj);
1334 return vma;
1335}
1336
1337void i915_vma_make_shrinkable(struct i915_vma *vma)
1338{
1339 i915_gem_object_make_shrinkable(vma->obj);
1340}
1341
1342void i915_vma_make_purgeable(struct i915_vma *vma)
1343{
1344 i915_gem_object_make_purgeable(vma->obj);
1345}
1346
e3c7a1c5
CW
1347#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1348#include "selftests/i915_vma.c"
1349#endif
13f1bfd3 1350
103b76ee 1351static void i915_global_vma_shrink(void)
13f1bfd3 1352{
103b76ee 1353 kmem_cache_shrink(global.slab_vmas);
13f1bfd3
CW
1354}
1355
103b76ee 1356static void i915_global_vma_exit(void)
13f1bfd3 1357{
103b76ee 1358 kmem_cache_destroy(global.slab_vmas);
13f1bfd3
CW
1359}
1360
103b76ee
CW
1361static struct i915_global_vma global = { {
1362 .shrink = i915_global_vma_shrink,
1363 .exit = i915_global_vma_exit,
1364} };
1365
1366int __init i915_global_vma_init(void)
13f1bfd3 1367{
103b76ee
CW
1368 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1369 if (!global.slab_vmas)
1370 return -ENOMEM;
1371
1372 i915_global_register(&global.base);
1373 return 0;
13f1bfd3 1374}