2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/dma-fence-array.h>
29 #include <linux/kthread.h>
30 #include <linux/dma-resv.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/slab.h>
33 #include <linux/stop_machine.h>
34 #include <linux/swap.h>
35 #include <linux/pci.h>
36 #include <linux/dma-buf.h>
37 #include <linux/mman.h>
39 #include <drm/drm_cache.h>
40 #include <drm/drm_vma_manager.h>
42 #include "display/intel_display.h"
44 #include "gem/i915_gem_clflush.h"
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_ioctls.h"
47 #include "gem/i915_gem_mman.h"
48 #include "gem/i915_gem_object_frontbuffer.h"
49 #include "gem/i915_gem_pm.h"
50 #include "gem/i915_gem_region.h"
51 #include "gem/i915_gem_userptr.h"
52 #include "gt/intel_engine_user.h"
53 #include "gt/intel_gt.h"
54 #include "gt/intel_gt_pm.h"
55 #include "gt/intel_workarounds.h"
58 #include "i915_file_private.h"
59 #include "i915_trace.h"
60 #include "i915_vgpu.h"
61 #include "intel_clock_gating.h"
64 insert_mappable_node(struct i915_ggtt
*ggtt
, struct drm_mm_node
*node
, u32 size
)
68 err
= mutex_lock_interruptible(&ggtt
->vm
.mutex
);
72 memset(node
, 0, sizeof(*node
));
73 err
= drm_mm_insert_node_in_range(&ggtt
->vm
.mm
, node
,
74 size
, 0, I915_COLOR_UNEVICTABLE
,
75 0, ggtt
->mappable_end
,
78 mutex_unlock(&ggtt
->vm
.mutex
);
84 remove_mappable_node(struct i915_ggtt
*ggtt
, struct drm_mm_node
*node
)
86 mutex_lock(&ggtt
->vm
.mutex
);
87 drm_mm_remove_node(node
);
88 mutex_unlock(&ggtt
->vm
.mutex
);
92 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
93 struct drm_file
*file
)
95 struct drm_i915_private
*i915
= to_i915(dev
);
96 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
97 struct drm_i915_gem_get_aperture
*args
= data
;
101 if (mutex_lock_interruptible(&ggtt
->vm
.mutex
))
104 pinned
= ggtt
->vm
.reserved
;
105 list_for_each_entry(vma
, &ggtt
->vm
.bound_list
, vm_link
)
106 if (i915_vma_is_pinned(vma
))
107 pinned
+= vma
->node
.size
;
109 mutex_unlock(&ggtt
->vm
.mutex
);
111 args
->aper_size
= ggtt
->vm
.total
;
112 args
->aper_available_size
= args
->aper_size
- pinned
;
117 int i915_gem_object_unbind(struct drm_i915_gem_object
*obj
,
120 struct intel_runtime_pm
*rpm
= &to_i915(obj
->base
.dev
)->runtime_pm
;
121 bool vm_trylock
= !!(flags
& I915_GEM_OBJECT_UNBIND_VM_TRYLOCK
);
122 LIST_HEAD(still_in_list
);
123 intel_wakeref_t wakeref
;
124 struct i915_vma
*vma
;
127 assert_object_held(obj
);
129 if (list_empty(&obj
->vma
.list
))
133 * As some machines use ACPI to handle runtime-resume callbacks, and
134 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
135 * as they are required by the shrinker. Ergo, we wake the device up
136 * first just in case.
138 wakeref
= intel_runtime_pm_get(rpm
);
142 spin_lock(&obj
->vma
.lock
);
143 while (!ret
&& (vma
= list_first_entry_or_null(&obj
->vma
.list
,
146 list_move_tail(&vma
->obj_link
, &still_in_list
);
147 if (!i915_vma_is_bound(vma
, I915_VMA_BIND_MASK
))
150 if (flags
& I915_GEM_OBJECT_UNBIND_TEST
) {
156 * Requiring the vm destructor to take the object lock
157 * before destroying a vma would help us eliminate the
158 * i915_vm_tryget() here, AND thus also the barrier stuff
159 * at the end. That's an easy fix, but sleeping locks in
160 * a kthread should generally be avoided.
163 if (!i915_vm_tryget(vma
->vm
))
166 spin_unlock(&obj
->vma
.lock
);
169 * Since i915_vma_parked() takes the object lock
170 * before vma destruction, it won't race us here,
171 * and destroy the vma from under us.
175 if (flags
& I915_GEM_OBJECT_UNBIND_ASYNC
) {
176 assert_object_held(vma
->obj
);
177 ret
= i915_vma_unbind_async(vma
, vm_trylock
);
180 if (ret
== -EBUSY
&& (flags
& I915_GEM_OBJECT_UNBIND_ACTIVE
||
181 !i915_vma_is_active(vma
))) {
183 if (mutex_trylock(&vma
->vm
->mutex
)) {
184 ret
= __i915_vma_unbind(vma
);
185 mutex_unlock(&vma
->vm
->mutex
);
188 ret
= i915_vma_unbind(vma
);
192 i915_vm_put(vma
->vm
);
193 spin_lock(&obj
->vma
.lock
);
195 list_splice_init(&still_in_list
, &obj
->vma
.list
);
196 spin_unlock(&obj
->vma
.lock
);
198 if (ret
== -EAGAIN
&& flags
& I915_GEM_OBJECT_UNBIND_BARRIER
) {
199 rcu_barrier(); /* flush the i915_vm_release() */
203 intel_runtime_pm_put(rpm
, wakeref
);
209 shmem_pread(struct page
*page
, int offset
, int len
, char __user
*user_data
,
218 drm_clflush_virt_range(vaddr
+ offset
, len
);
220 ret
= __copy_to_user(user_data
, vaddr
+ offset
, len
);
224 return ret
? -EFAULT
: 0;
228 i915_gem_shmem_pread(struct drm_i915_gem_object
*obj
,
229 struct drm_i915_gem_pread
*args
)
231 unsigned int needs_clflush
;
232 char __user
*user_data
;
233 unsigned long offset
;
238 ret
= i915_gem_object_lock_interruptible(obj
, NULL
);
242 ret
= i915_gem_object_pin_pages(obj
);
246 ret
= i915_gem_object_prepare_read(obj
, &needs_clflush
);
250 i915_gem_object_finish_access(obj
);
251 i915_gem_object_unlock(obj
);
254 user_data
= u64_to_user_ptr(args
->data_ptr
);
255 offset
= offset_in_page(args
->offset
);
256 for (idx
= args
->offset
>> PAGE_SHIFT
; remain
; idx
++) {
257 struct page
*page
= i915_gem_object_get_page(obj
, idx
);
258 unsigned int length
= min_t(u64
, remain
, PAGE_SIZE
- offset
);
260 ret
= shmem_pread(page
, offset
, length
, user_data
,
270 i915_gem_object_unpin_pages(obj
);
274 i915_gem_object_unpin_pages(obj
);
276 i915_gem_object_unlock(obj
);
281 gtt_user_read(struct io_mapping
*mapping
,
282 loff_t base
, int offset
,
283 char __user
*user_data
, int length
)
286 unsigned long unwritten
;
288 /* We can use the cpu mem copy function because this is X86. */
289 vaddr
= io_mapping_map_atomic_wc(mapping
, base
);
290 unwritten
= __copy_to_user_inatomic(user_data
,
291 (void __force
*)vaddr
+ offset
,
293 io_mapping_unmap_atomic(vaddr
);
295 vaddr
= io_mapping_map_wc(mapping
, base
, PAGE_SIZE
);
296 unwritten
= copy_to_user(user_data
,
297 (void __force
*)vaddr
+ offset
,
299 io_mapping_unmap(vaddr
);
304 static struct i915_vma
*i915_gem_gtt_prepare(struct drm_i915_gem_object
*obj
,
305 struct drm_mm_node
*node
,
308 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
309 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
310 struct i915_vma
*vma
;
311 struct i915_gem_ww_ctx ww
;
314 i915_gem_ww_ctx_init(&ww
, true);
316 vma
= ERR_PTR(-ENODEV
);
317 ret
= i915_gem_object_lock(obj
, &ww
);
321 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
325 if (!i915_gem_object_is_tiled(obj
))
326 vma
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, NULL
, 0, 0,
328 PIN_NONBLOCK
/* NOWARN */ |
330 if (vma
== ERR_PTR(-EDEADLK
)) {
333 } else if (!IS_ERR(vma
)) {
334 node
->start
= i915_ggtt_offset(vma
);
337 ret
= insert_mappable_node(ggtt
, node
, PAGE_SIZE
);
340 GEM_BUG_ON(!drm_mm_node_allocated(node
));
344 ret
= i915_gem_object_pin_pages(obj
);
346 if (drm_mm_node_allocated(node
)) {
347 ggtt
->vm
.clear_range(&ggtt
->vm
, node
->start
, node
->size
);
348 remove_mappable_node(ggtt
, node
);
355 if (ret
== -EDEADLK
) {
356 ret
= i915_gem_ww_ctx_backoff(&ww
);
360 i915_gem_ww_ctx_fini(&ww
);
362 return ret
? ERR_PTR(ret
) : vma
;
365 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object
*obj
,
366 struct drm_mm_node
*node
,
367 struct i915_vma
*vma
)
369 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
370 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
372 i915_gem_object_unpin_pages(obj
);
373 if (drm_mm_node_allocated(node
)) {
374 ggtt
->vm
.clear_range(&ggtt
->vm
, node
->start
, node
->size
);
375 remove_mappable_node(ggtt
, node
);
382 i915_gem_gtt_pread(struct drm_i915_gem_object
*obj
,
383 const struct drm_i915_gem_pread
*args
)
385 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
386 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
387 unsigned long remain
, offset
;
388 intel_wakeref_t wakeref
;
389 struct drm_mm_node node
;
390 void __user
*user_data
;
391 struct i915_vma
*vma
;
394 if (overflows_type(args
->size
, remain
) ||
395 overflows_type(args
->offset
, offset
))
398 wakeref
= intel_runtime_pm_get(&i915
->runtime_pm
);
400 vma
= i915_gem_gtt_prepare(obj
, &node
, false);
406 user_data
= u64_to_user_ptr(args
->data_ptr
);
408 offset
= args
->offset
;
411 /* Operation in this page
413 * page_base = page offset within aperture
414 * page_offset = offset within page
415 * page_length = bytes to copy for this page
417 u32 page_base
= node
.start
;
418 unsigned page_offset
= offset_in_page(offset
);
419 unsigned page_length
= PAGE_SIZE
- page_offset
;
420 page_length
= remain
< page_length
? remain
: page_length
;
421 if (drm_mm_node_allocated(&node
)) {
422 ggtt
->vm
.insert_page(&ggtt
->vm
,
423 i915_gem_object_get_dma_address(obj
,
424 offset
>> PAGE_SHIFT
),
426 i915_gem_get_pat_index(i915
,
427 I915_CACHE_NONE
), 0);
429 page_base
+= offset
& PAGE_MASK
;
432 if (gtt_user_read(&ggtt
->iomap
, page_base
, page_offset
,
433 user_data
, page_length
)) {
438 remain
-= page_length
;
439 user_data
+= page_length
;
440 offset
+= page_length
;
443 i915_gem_gtt_cleanup(obj
, &node
, vma
);
445 intel_runtime_pm_put(&i915
->runtime_pm
, wakeref
);
450 * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
451 * @dev: drm device pointer
452 * @data: ioctl data blob
453 * @file: drm file pointer
455 * On error, the contents of *data are undefined.
458 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
459 struct drm_file
*file
)
461 struct drm_i915_private
*i915
= to_i915(dev
);
462 struct drm_i915_gem_pread
*args
= data
;
463 struct drm_i915_gem_object
*obj
;
466 /* PREAD is disallowed for all platforms after TGL-LP. This also
467 * covers all platforms with local memory.
469 if (GRAPHICS_VER(i915
) >= 12 && !IS_TIGERLAKE(i915
))
475 if (!access_ok(u64_to_user_ptr(args
->data_ptr
),
479 obj
= i915_gem_object_lookup(file
, args
->handle
);
483 /* Bounds check source. */
484 if (range_overflows_t(u64
, args
->offset
, args
->size
, obj
->base
.size
)) {
489 trace_i915_gem_object_pread(obj
, args
->offset
, args
->size
);
492 ret
= obj
->ops
->pread(obj
, args
);
496 ret
= i915_gem_object_wait(obj
,
497 I915_WAIT_INTERRUPTIBLE
,
498 MAX_SCHEDULE_TIMEOUT
);
502 ret
= i915_gem_shmem_pread(obj
, args
);
503 if (ret
== -EFAULT
|| ret
== -ENODEV
)
504 ret
= i915_gem_gtt_pread(obj
, args
);
507 i915_gem_object_put(obj
);
511 /* This is the fast write path which cannot handle
512 * page faults in the source data
516 ggtt_write(struct io_mapping
*mapping
,
517 loff_t base
, int offset
,
518 char __user
*user_data
, int length
)
521 unsigned long unwritten
;
523 /* We can use the cpu mem copy function because this is X86. */
524 vaddr
= io_mapping_map_atomic_wc(mapping
, base
);
525 unwritten
= __copy_from_user_inatomic_nocache((void __force
*)vaddr
+ offset
,
527 io_mapping_unmap_atomic(vaddr
);
529 vaddr
= io_mapping_map_wc(mapping
, base
, PAGE_SIZE
);
530 unwritten
= copy_from_user((void __force
*)vaddr
+ offset
,
532 io_mapping_unmap(vaddr
);
539 * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
540 * user into the GTT, uncached.
541 * @obj: i915 GEM object
542 * @args: pwrite arguments structure
545 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object
*obj
,
546 const struct drm_i915_gem_pwrite
*args
)
548 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
549 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
550 struct intel_runtime_pm
*rpm
= &i915
->runtime_pm
;
551 unsigned long remain
, offset
;
552 intel_wakeref_t wakeref
;
553 struct drm_mm_node node
;
554 struct i915_vma
*vma
;
555 void __user
*user_data
;
558 if (overflows_type(args
->size
, remain
) ||
559 overflows_type(args
->offset
, offset
))
562 if (i915_gem_object_has_struct_page(obj
)) {
564 * Avoid waking the device up if we can fallback, as
565 * waking/resuming is very slow (worst-case 10-100 ms
566 * depending on PCI sleeps and our own resume time).
567 * This easily dwarfs any performance advantage from
568 * using the cache bypass of indirect GGTT access.
570 wakeref
= intel_runtime_pm_get_if_in_use(rpm
);
574 /* No backing pages, no fallback, we must force GGTT access */
575 wakeref
= intel_runtime_pm_get(rpm
);
578 vma
= i915_gem_gtt_prepare(obj
, &node
, true);
584 i915_gem_object_invalidate_frontbuffer(obj
, ORIGIN_CPU
);
586 user_data
= u64_to_user_ptr(args
->data_ptr
);
587 offset
= args
->offset
;
590 /* Operation in this page
592 * page_base = page offset within aperture
593 * page_offset = offset within page
594 * page_length = bytes to copy for this page
596 u32 page_base
= node
.start
;
597 unsigned int page_offset
= offset_in_page(offset
);
598 unsigned int page_length
= PAGE_SIZE
- page_offset
;
599 page_length
= remain
< page_length
? remain
: page_length
;
600 if (drm_mm_node_allocated(&node
)) {
601 /* flush the write before we modify the GGTT */
602 intel_gt_flush_ggtt_writes(ggtt
->vm
.gt
);
603 ggtt
->vm
.insert_page(&ggtt
->vm
,
604 i915_gem_object_get_dma_address(obj
,
605 offset
>> PAGE_SHIFT
),
607 i915_gem_get_pat_index(i915
,
608 I915_CACHE_NONE
), 0);
609 wmb(); /* flush modifications to the GGTT (insert_page) */
611 page_base
+= offset
& PAGE_MASK
;
613 /* If we get a fault while copying data, then (presumably) our
614 * source page isn't available. Return the error and we'll
615 * retry in the slow path.
616 * If the object is non-shmem backed, we retry again with the
617 * path that handles page fault.
619 if (ggtt_write(&ggtt
->iomap
, page_base
, page_offset
,
620 user_data
, page_length
)) {
625 remain
-= page_length
;
626 user_data
+= page_length
;
627 offset
+= page_length
;
630 intel_gt_flush_ggtt_writes(ggtt
->vm
.gt
);
631 i915_gem_object_flush_frontbuffer(obj
, ORIGIN_CPU
);
633 i915_gem_gtt_cleanup(obj
, &node
, vma
);
635 intel_runtime_pm_put(rpm
, wakeref
);
639 /* Per-page copy function for the shmem pwrite fastpath.
640 * Flushes invalid cachelines before writing to the target if
641 * needs_clflush_before is set and flushes out any written cachelines after
642 * writing if needs_clflush is set.
645 shmem_pwrite(struct page
*page
, int offset
, int len
, char __user
*user_data
,
646 bool needs_clflush_before
,
647 bool needs_clflush_after
)
654 if (needs_clflush_before
)
655 drm_clflush_virt_range(vaddr
+ offset
, len
);
657 ret
= __copy_from_user(vaddr
+ offset
, user_data
, len
);
658 if (!ret
&& needs_clflush_after
)
659 drm_clflush_virt_range(vaddr
+ offset
, len
);
663 return ret
? -EFAULT
: 0;
667 i915_gem_shmem_pwrite(struct drm_i915_gem_object
*obj
,
668 const struct drm_i915_gem_pwrite
*args
)
670 unsigned int partial_cacheline_write
;
671 unsigned int needs_clflush
;
672 void __user
*user_data
;
673 unsigned long offset
;
678 ret
= i915_gem_object_lock_interruptible(obj
, NULL
);
682 ret
= i915_gem_object_pin_pages(obj
);
686 ret
= i915_gem_object_prepare_write(obj
, &needs_clflush
);
690 i915_gem_object_finish_access(obj
);
691 i915_gem_object_unlock(obj
);
693 /* If we don't overwrite a cacheline completely we need to be
694 * careful to have up-to-date data by first clflushing. Don't
695 * overcomplicate things and flush the entire patch.
697 partial_cacheline_write
= 0;
698 if (needs_clflush
& CLFLUSH_BEFORE
)
699 partial_cacheline_write
= boot_cpu_data
.x86_clflush_size
- 1;
701 user_data
= u64_to_user_ptr(args
->data_ptr
);
703 offset
= offset_in_page(args
->offset
);
704 for (idx
= args
->offset
>> PAGE_SHIFT
; remain
; idx
++) {
705 struct page
*page
= i915_gem_object_get_page(obj
, idx
);
706 unsigned int length
= min_t(u64
, remain
, PAGE_SIZE
- offset
);
708 ret
= shmem_pwrite(page
, offset
, length
, user_data
,
709 (offset
| length
) & partial_cacheline_write
,
710 needs_clflush
& CLFLUSH_AFTER
);
719 i915_gem_object_flush_frontbuffer(obj
, ORIGIN_CPU
);
721 i915_gem_object_unpin_pages(obj
);
725 i915_gem_object_unpin_pages(obj
);
727 i915_gem_object_unlock(obj
);
732 * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
734 * @data: ioctl data blob
737 * On error, the contents of the buffer that were to be modified are undefined.
740 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
741 struct drm_file
*file
)
743 struct drm_i915_private
*i915
= to_i915(dev
);
744 struct drm_i915_gem_pwrite
*args
= data
;
745 struct drm_i915_gem_object
*obj
;
748 /* PWRITE is disallowed for all platforms after TGL-LP. This also
749 * covers all platforms with local memory.
751 if (GRAPHICS_VER(i915
) >= 12 && !IS_TIGERLAKE(i915
))
757 if (!access_ok(u64_to_user_ptr(args
->data_ptr
), args
->size
))
760 obj
= i915_gem_object_lookup(file
, args
->handle
);
764 /* Bounds check destination. */
765 if (range_overflows_t(u64
, args
->offset
, args
->size
, obj
->base
.size
)) {
770 /* Writes not allowed into this read-only object */
771 if (i915_gem_object_is_readonly(obj
)) {
776 trace_i915_gem_object_pwrite(obj
, args
->offset
, args
->size
);
779 if (obj
->ops
->pwrite
)
780 ret
= obj
->ops
->pwrite(obj
, args
);
784 ret
= i915_gem_object_wait(obj
,
785 I915_WAIT_INTERRUPTIBLE
|
787 MAX_SCHEDULE_TIMEOUT
);
792 /* We can only do the GTT pwrite on untiled buffers, as otherwise
793 * it would end up going through the fenced access, and we'll get
794 * different detiling behavior between reading and writing.
795 * pread/pwrite currently are reading and writing from the CPU
796 * perspective, requiring manual detiling by the client.
798 if (!i915_gem_object_has_struct_page(obj
) ||
799 i915_gem_cpu_write_needs_clflush(obj
))
800 /* Note that the gtt paths might fail with non-page-backed user
801 * pointers (e.g. gtt mappings when moving data between
802 * textures). Fallback to the shmem path in that case.
804 ret
= i915_gem_gtt_pwrite_fast(obj
, args
);
806 if (ret
== -EFAULT
|| ret
== -ENOSPC
) {
807 if (i915_gem_object_has_struct_page(obj
))
808 ret
= i915_gem_shmem_pwrite(obj
, args
);
812 i915_gem_object_put(obj
);
817 * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
819 * @data: ioctl data blob
823 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
824 struct drm_file
*file
)
826 struct drm_i915_gem_sw_finish
*args
= data
;
827 struct drm_i915_gem_object
*obj
;
829 obj
= i915_gem_object_lookup(file
, args
->handle
);
834 * Proxy objects are barred from CPU access, so there is no
835 * need to ban sw_finish as it is a nop.
838 /* Pinned buffers may be scanout, so flush the cache */
839 i915_gem_object_flush_if_display(obj
);
840 i915_gem_object_put(obj
);
845 void i915_gem_runtime_suspend(struct drm_i915_private
*i915
)
847 struct drm_i915_gem_object
*obj
, *on
;
851 * Only called during RPM suspend. All users of the userfault_list
852 * must be holding an RPM wakeref to ensure that this can not
853 * run concurrently with themselves (and use the struct_mutex for
854 * protection between themselves).
857 list_for_each_entry_safe(obj
, on
,
858 &to_gt(i915
)->ggtt
->userfault_list
, userfault_link
)
859 __i915_gem_object_release_mmap_gtt(obj
);
861 list_for_each_entry_safe(obj
, on
,
862 &i915
->runtime_pm
.lmem_userfault_list
, userfault_link
)
863 i915_gem_object_runtime_pm_release_mmap_offset(obj
);
866 * The fence will be lost when the device powers down. If any were
867 * in use by hardware (i.e. they are pinned), we should not be powering
868 * down! All other fences will be reacquired by the user upon waking.
870 for (i
= 0; i
< to_gt(i915
)->ggtt
->num_fences
; i
++) {
871 struct i915_fence_reg
*reg
= &to_gt(i915
)->ggtt
->fence_regs
[i
];
874 * Ideally we want to assert that the fence register is not
875 * live at this point (i.e. that no piece of code will be
876 * trying to write through fence + GTT, as that both violates
877 * our tracking of activity and associated locking/barriers,
878 * but also is illegal given that the hw is powered down).
880 * Previously we used reg->pin_count as a "liveness" indicator.
881 * That is not sufficient, and we need a more fine-grained
882 * tool if we want to have a sanity check here.
888 GEM_BUG_ON(i915_vma_has_userfault(reg
->vma
));
893 static void discard_ggtt_vma(struct i915_vma
*vma
)
895 struct drm_i915_gem_object
*obj
= vma
->obj
;
897 spin_lock(&obj
->vma
.lock
);
898 if (!RB_EMPTY_NODE(&vma
->obj_node
)) {
899 rb_erase(&vma
->obj_node
, &obj
->vma
.tree
);
900 RB_CLEAR_NODE(&vma
->obj_node
);
902 spin_unlock(&obj
->vma
.lock
);
906 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object
*obj
,
907 struct i915_gem_ww_ctx
*ww
,
908 const struct i915_gtt_view
*view
,
909 u64 size
, u64 alignment
, u64 flags
)
911 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
912 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
913 struct i915_vma
*vma
;
918 if (flags
& PIN_MAPPABLE
&&
919 (!view
|| view
->type
== I915_GTT_VIEW_NORMAL
)) {
921 * If the required space is larger than the available
922 * aperture, we will not able to find a slot for the
923 * object and unbinding the object now will be in
924 * vain. Worse, doing so may cause us to ping-pong
925 * the object in and out of the Global GTT and
926 * waste a lot of cycles under the mutex.
928 if (obj
->base
.size
> ggtt
->mappable_end
)
929 return ERR_PTR(-E2BIG
);
932 * If NONBLOCK is set the caller is optimistically
933 * trying to cache the full object within the mappable
934 * aperture, and *must* have a fallback in place for
935 * situations where we cannot bind the object. We
936 * can be a little more lax here and use the fallback
937 * more often to avoid costly migrations of ourselves
938 * and other objects within the aperture.
940 * Half-the-aperture is used as a simple heuristic.
941 * More interesting would to do search for a free
942 * block prior to making the commitment to unbind.
943 * That caters for the self-harm case, and with a
944 * little more heuristics (e.g. NOFAULT, NOEVICT)
945 * we could try to minimise harm to others.
947 if (flags
& PIN_NONBLOCK
&&
948 obj
->base
.size
> ggtt
->mappable_end
/ 2)
949 return ERR_PTR(-ENOSPC
);
953 vma
= i915_vma_instance(obj
, &ggtt
->vm
, view
);
957 if (i915_vma_misplaced(vma
, size
, alignment
, flags
)) {
958 if (flags
& PIN_NONBLOCK
) {
959 if (i915_vma_is_pinned(vma
) || i915_vma_is_active(vma
))
960 return ERR_PTR(-ENOSPC
);
963 * If this misplaced vma is too big (i.e, at-least
964 * half the size of aperture) or hasn't been pinned
965 * mappable before, we ignore the misplacement when
966 * PIN_NONBLOCK is set in order to avoid the ping-pong
967 * issue described above. In other words, we try to
968 * avoid the costly operation of unbinding this vma
969 * from the GGTT and rebinding it back because there
970 * may not be enough space for this vma in the aperture.
972 if (flags
& PIN_MAPPABLE
&&
973 (vma
->fence_size
> ggtt
->mappable_end
/ 2 ||
974 !i915_vma_is_map_and_fenceable(vma
)))
975 return ERR_PTR(-ENOSPC
);
978 if (i915_vma_is_pinned(vma
) || i915_vma_is_active(vma
)) {
979 discard_ggtt_vma(vma
);
983 ret
= i915_vma_unbind(vma
);
988 ret
= i915_vma_pin_ww(vma
, ww
, size
, alignment
, flags
| PIN_GLOBAL
);
993 if (vma
->fence
&& !i915_gem_object_is_tiled(obj
)) {
994 mutex_lock(&ggtt
->vm
.mutex
);
995 i915_vma_revoke_fence(vma
);
996 mutex_unlock(&ggtt
->vm
.mutex
);
999 ret
= i915_vma_wait_for_bind(vma
);
1001 i915_vma_unpin(vma
);
1002 return ERR_PTR(ret
);
1008 struct i915_vma
* __must_check
1009 i915_gem_object_ggtt_pin(struct drm_i915_gem_object
*obj
,
1010 const struct i915_gtt_view
*view
,
1011 u64 size
, u64 alignment
, u64 flags
)
1013 struct i915_gem_ww_ctx ww
;
1014 struct i915_vma
*ret
;
1017 for_i915_gem_ww(&ww
, err
, true) {
1018 err
= i915_gem_object_lock(obj
, &ww
);
1022 ret
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, view
, size
,
1028 return err
? ERR_PTR(err
) : ret
;
1032 i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
1033 struct drm_file
*file_priv
)
1035 struct drm_i915_private
*i915
= to_i915(dev
);
1036 struct drm_i915_gem_madvise
*args
= data
;
1037 struct drm_i915_gem_object
*obj
;
1040 switch (args
->madv
) {
1041 case I915_MADV_DONTNEED
:
1042 case I915_MADV_WILLNEED
:
1048 obj
= i915_gem_object_lookup(file_priv
, args
->handle
);
1052 err
= i915_gem_object_lock_interruptible(obj
, NULL
);
1056 if (i915_gem_object_has_pages(obj
) &&
1057 i915_gem_object_is_tiled(obj
) &&
1058 i915
->gem_quirks
& GEM_QUIRK_PIN_SWIZZLED_PAGES
) {
1059 if (obj
->mm
.madv
== I915_MADV_WILLNEED
) {
1060 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj
));
1061 i915_gem_object_clear_tiling_quirk(obj
);
1062 i915_gem_object_make_shrinkable(obj
);
1064 if (args
->madv
== I915_MADV_WILLNEED
) {
1065 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj
));
1066 i915_gem_object_make_unshrinkable(obj
);
1067 i915_gem_object_set_tiling_quirk(obj
);
1071 if (obj
->mm
.madv
!= __I915_MADV_PURGED
) {
1072 obj
->mm
.madv
= args
->madv
;
1073 if (obj
->ops
->adjust_lru
)
1074 obj
->ops
->adjust_lru(obj
);
1077 if (i915_gem_object_has_pages(obj
) ||
1078 i915_gem_object_has_self_managed_shrink_list(obj
)) {
1079 unsigned long flags
;
1081 spin_lock_irqsave(&i915
->mm
.obj_lock
, flags
);
1082 if (!list_empty(&obj
->mm
.link
)) {
1083 struct list_head
*list
;
1085 if (obj
->mm
.madv
!= I915_MADV_WILLNEED
)
1086 list
= &i915
->mm
.purge_list
;
1088 list
= &i915
->mm
.shrink_list
;
1089 list_move_tail(&obj
->mm
.link
, list
);
1092 spin_unlock_irqrestore(&i915
->mm
.obj_lock
, flags
);
1095 /* if the object is no longer attached, discard its backing storage */
1096 if (obj
->mm
.madv
== I915_MADV_DONTNEED
&&
1097 !i915_gem_object_has_pages(obj
))
1098 i915_gem_object_truncate(obj
);
1100 args
->retained
= obj
->mm
.madv
!= __I915_MADV_PURGED
;
1102 i915_gem_object_unlock(obj
);
1104 i915_gem_object_put(obj
);
1109 * A single pass should suffice to release all the freed objects (along most
1110 * call paths), but be a little more paranoid in that freeing the objects does
1111 * take a little amount of time, during which the rcu callbacks could have added
1112 * new objects into the freed list, and armed the work again.
1114 void i915_gem_drain_freed_objects(struct drm_i915_private
*i915
)
1116 while (atomic_read(&i915
->mm
.free_count
)) {
1117 flush_work(&i915
->mm
.free_work
);
1118 drain_workqueue(i915
->bdev
.wq
);
1124 * Similar to objects above (see i915_gem_drain_freed-objects), in general we
1125 * have workers that are armed by RCU and then rearm themselves in their
1126 * callbacks. To be paranoid, we need to drain the workqueue a second time after
1127 * waiting for the RCU grace period so that we catch work queued via RCU from
1128 * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
1129 * result, we make an assumption that we only don't require more than 3 passes
1130 * to catch all _recursive_ RCU delayed work.
1132 void i915_gem_drain_workqueue(struct drm_i915_private
*i915
)
1136 for (i
= 0; i
< 3; i
++) {
1137 flush_workqueue(i915
->wq
);
1139 i915_gem_drain_freed_objects(i915
);
1142 drain_workqueue(i915
->wq
);
1145 int i915_gem_init(struct drm_i915_private
*dev_priv
)
1147 struct intel_gt
*gt
;
1152 * In the proccess of replacing cache_level with pat_index a tricky
1153 * dependency is created on the definition of the enum i915_cache_level.
1154 * in case this enum is changed, PTE encode would be broken.
1155 * Add a WARNING here. And remove when we completely quit using this
1158 BUILD_BUG_ON(I915_CACHE_NONE
!= 0 ||
1159 I915_CACHE_LLC
!= 1 ||
1160 I915_CACHE_L3_LLC
!= 2 ||
1161 I915_CACHE_WT
!= 3 ||
1162 I915_MAX_CACHE_LEVEL
!= 4);
1164 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1165 if (intel_vgpu_active(dev_priv
) && !intel_vgpu_has_huge_gtt(dev_priv
))
1166 RUNTIME_INFO(dev_priv
)->page_sizes
= I915_GTT_PAGE_SIZE_4K
;
1168 ret
= i915_gem_init_userptr(dev_priv
);
1172 for_each_gt(gt
, dev_priv
, i
) {
1173 intel_uc_fetch_firmwares(>
->uc
);
1174 intel_wopcm_init(>
->wopcm
);
1175 if (GRAPHICS_VER(dev_priv
) >= 8)
1176 setup_private_pat(gt
);
1179 ret
= i915_init_ggtt(dev_priv
);
1181 GEM_BUG_ON(ret
== -EIO
);
1186 * Despite its name intel_clock_gating_init applies both display
1187 * clock gating workarounds; GT mmio workarounds and the occasional
1188 * GT power context workaround. Worse, sometimes it includes a context
1189 * register workaround which we need to apply before we record the
1190 * default HW state for all contexts.
1192 * FIXME: break up the workarounds and apply them at the right time!
1194 intel_clock_gating_init(dev_priv
);
1196 for_each_gt(gt
, dev_priv
, i
) {
1197 ret
= intel_gt_init(gt
);
1203 * Register engines early to ensure the engine list is in its final
1204 * rb-tree form, lowering the amount of code that has to deal with
1205 * the intermediate llist state.
1207 intel_engines_driver_register(dev_priv
);
1212 * Unwinding is complicated by that we want to handle -EIO to mean
1213 * disable GPU submission but keep KMS alive. We want to mark the
1214 * HW as irrevisibly wedged, but keep enough state around that the
1215 * driver doesn't explode during runtime.
1218 i915_gem_drain_workqueue(dev_priv
);
1221 for_each_gt(gt
, dev_priv
, i
) {
1222 intel_gt_driver_remove(gt
);
1223 intel_gt_driver_release(gt
);
1224 intel_uc_cleanup_firmwares(>
->uc
);
1230 * Allow engines or uC initialisation to fail by marking the GPU
1231 * as wedged. But we only want to do this when the GPU is angry,
1232 * for all other failure, such as an allocation failure, bail.
1234 for_each_gt(gt
, dev_priv
, i
) {
1235 if (!intel_gt_is_wedged(gt
)) {
1236 i915_probe_error(dev_priv
,
1237 "Failed to initialize GPU, declaring it wedged!\n");
1238 intel_gt_set_wedged(gt
);
1242 /* Minimal basic recovery for KMS */
1243 ret
= i915_ggtt_enable_hw(dev_priv
);
1244 i915_ggtt_resume(to_gt(dev_priv
)->ggtt
);
1245 intel_clock_gating_init(dev_priv
);
1248 i915_gem_drain_freed_objects(dev_priv
);
1253 void i915_gem_driver_register(struct drm_i915_private
*i915
)
1255 i915_gem_driver_register__shrinker(i915
);
1258 void i915_gem_driver_unregister(struct drm_i915_private
*i915
)
1260 i915_gem_driver_unregister__shrinker(i915
);
1263 void i915_gem_driver_remove(struct drm_i915_private
*dev_priv
)
1265 struct intel_gt
*gt
;
1268 i915_gem_suspend_late(dev_priv
);
1269 for_each_gt(gt
, dev_priv
, i
)
1270 intel_gt_driver_remove(gt
);
1271 dev_priv
->uabi_engines
= RB_ROOT
;
1273 /* Flush any outstanding unpin_work. */
1274 i915_gem_drain_workqueue(dev_priv
);
1277 void i915_gem_driver_release(struct drm_i915_private
*dev_priv
)
1279 struct intel_gt
*gt
;
1282 for_each_gt(gt
, dev_priv
, i
) {
1283 intel_gt_driver_release(gt
);
1284 intel_uc_cleanup_firmwares(>
->uc
);
1287 /* Flush any outstanding work, including i915_gem_context.release_work. */
1288 i915_gem_drain_workqueue(dev_priv
);
1290 drm_WARN_ON(&dev_priv
->drm
, !list_empty(&dev_priv
->gem
.contexts
.list
));
1293 static void i915_gem_init__mm(struct drm_i915_private
*i915
)
1295 spin_lock_init(&i915
->mm
.obj_lock
);
1297 init_llist_head(&i915
->mm
.free_list
);
1299 INIT_LIST_HEAD(&i915
->mm
.purge_list
);
1300 INIT_LIST_HEAD(&i915
->mm
.shrink_list
);
1302 i915_gem_init__objects(i915
);
1305 void i915_gem_init_early(struct drm_i915_private
*dev_priv
)
1307 i915_gem_init__mm(dev_priv
);
1308 i915_gem_init__contexts(dev_priv
);
1311 void i915_gem_cleanup_early(struct drm_i915_private
*dev_priv
)
1313 i915_gem_drain_workqueue(dev_priv
);
1314 GEM_BUG_ON(!llist_empty(&dev_priv
->mm
.free_list
));
1315 GEM_BUG_ON(atomic_read(&dev_priv
->mm
.free_count
));
1316 drm_WARN_ON(&dev_priv
->drm
, dev_priv
->mm
.shrink_count
);
1319 int i915_gem_open(struct drm_i915_private
*i915
, struct drm_file
*file
)
1321 struct drm_i915_file_private
*file_priv
;
1322 struct i915_drm_client
*client
;
1325 drm_dbg(&i915
->drm
, "\n");
1327 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
1331 client
= i915_drm_client_alloc();
1335 file
->driver_priv
= file_priv
;
1336 file_priv
->i915
= i915
;
1337 file_priv
->file
= file
;
1338 file_priv
->client
= client
;
1340 file_priv
->bsd_engine
= -1;
1341 file_priv
->hang_timestamp
= jiffies
;
1343 ret
= i915_gem_context_open(i915
, file
);
1350 i915_drm_client_put(client
);
1357 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1358 #include "selftests/mock_gem_device.c"
1359 #include "selftests/i915_gem.c"