2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/dma-fence-array.h>
29 #include <linux/kthread.h>
30 #include <linux/dma-resv.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/slab.h>
33 #include <linux/stop_machine.h>
34 #include <linux/swap.h>
35 #include <linux/pci.h>
36 #include <linux/dma-buf.h>
37 #include <linux/mman.h>
39 #include <drm/drm_cache.h>
40 #include <drm/drm_vma_manager.h>
42 #include "display/intel_display.h"
44 #include "gem/i915_gem_clflush.h"
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_ioctls.h"
47 #include "gem/i915_gem_mman.h"
48 #include "gem/i915_gem_object_frontbuffer.h"
49 #include "gem/i915_gem_pm.h"
50 #include "gem/i915_gem_region.h"
51 #include "gt/intel_engine_user.h"
52 #include "gt/intel_gt.h"
53 #include "gt/intel_gt_pm.h"
54 #include "gt/intel_workarounds.h"
57 #include "i915_file_private.h"
58 #include "i915_trace.h"
59 #include "i915_vgpu.h"
60 #include "intel_clock_gating.h"
63 insert_mappable_node(struct i915_ggtt
*ggtt
, struct drm_mm_node
*node
, u32 size
)
67 err
= mutex_lock_interruptible(&ggtt
->vm
.mutex
);
71 memset(node
, 0, sizeof(*node
));
72 err
= drm_mm_insert_node_in_range(&ggtt
->vm
.mm
, node
,
73 size
, 0, I915_COLOR_UNEVICTABLE
,
74 0, ggtt
->mappable_end
,
77 mutex_unlock(&ggtt
->vm
.mutex
);
83 remove_mappable_node(struct i915_ggtt
*ggtt
, struct drm_mm_node
*node
)
85 mutex_lock(&ggtt
->vm
.mutex
);
86 drm_mm_remove_node(node
);
87 mutex_unlock(&ggtt
->vm
.mutex
);
91 i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
92 struct drm_file
*file
)
94 struct drm_i915_private
*i915
= to_i915(dev
);
95 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
96 struct drm_i915_gem_get_aperture
*args
= data
;
100 if (mutex_lock_interruptible(&ggtt
->vm
.mutex
))
103 pinned
= ggtt
->vm
.reserved
;
104 list_for_each_entry(vma
, &ggtt
->vm
.bound_list
, vm_link
)
105 if (i915_vma_is_pinned(vma
))
106 pinned
+= vma
->node
.size
;
108 mutex_unlock(&ggtt
->vm
.mutex
);
110 args
->aper_size
= ggtt
->vm
.total
;
111 args
->aper_available_size
= args
->aper_size
- pinned
;
116 int i915_gem_object_unbind(struct drm_i915_gem_object
*obj
,
119 struct intel_runtime_pm
*rpm
= &to_i915(obj
->base
.dev
)->runtime_pm
;
120 bool vm_trylock
= !!(flags
& I915_GEM_OBJECT_UNBIND_VM_TRYLOCK
);
121 LIST_HEAD(still_in_list
);
122 intel_wakeref_t wakeref
;
123 struct i915_vma
*vma
;
126 assert_object_held(obj
);
128 if (list_empty(&obj
->vma
.list
))
132 * As some machines use ACPI to handle runtime-resume callbacks, and
133 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
134 * as they are required by the shrinker. Ergo, we wake the device up
135 * first just in case.
137 wakeref
= intel_runtime_pm_get(rpm
);
141 spin_lock(&obj
->vma
.lock
);
142 while (!ret
&& (vma
= list_first_entry_or_null(&obj
->vma
.list
,
145 list_move_tail(&vma
->obj_link
, &still_in_list
);
146 if (!i915_vma_is_bound(vma
, I915_VMA_BIND_MASK
))
149 if (flags
& I915_GEM_OBJECT_UNBIND_TEST
) {
155 * Requiring the vm destructor to take the object lock
156 * before destroying a vma would help us eliminate the
157 * i915_vm_tryget() here, AND thus also the barrier stuff
158 * at the end. That's an easy fix, but sleeping locks in
159 * a kthread should generally be avoided.
162 if (!i915_vm_tryget(vma
->vm
))
165 spin_unlock(&obj
->vma
.lock
);
168 * Since i915_vma_parked() takes the object lock
169 * before vma destruction, it won't race us here,
170 * and destroy the vma from under us.
174 if (flags
& I915_GEM_OBJECT_UNBIND_ASYNC
) {
175 assert_object_held(vma
->obj
);
176 ret
= i915_vma_unbind_async(vma
, vm_trylock
);
179 if (ret
== -EBUSY
&& (flags
& I915_GEM_OBJECT_UNBIND_ACTIVE
||
180 !i915_vma_is_active(vma
))) {
182 if (mutex_trylock(&vma
->vm
->mutex
)) {
183 ret
= __i915_vma_unbind(vma
);
184 mutex_unlock(&vma
->vm
->mutex
);
187 ret
= i915_vma_unbind(vma
);
191 i915_vm_put(vma
->vm
);
192 spin_lock(&obj
->vma
.lock
);
194 list_splice_init(&still_in_list
, &obj
->vma
.list
);
195 spin_unlock(&obj
->vma
.lock
);
197 if (ret
== -EAGAIN
&& flags
& I915_GEM_OBJECT_UNBIND_BARRIER
) {
198 rcu_barrier(); /* flush the i915_vm_release() */
202 intel_runtime_pm_put(rpm
, wakeref
);
208 shmem_pread(struct page
*page
, int offset
, int len
, char __user
*user_data
,
217 drm_clflush_virt_range(vaddr
+ offset
, len
);
219 ret
= __copy_to_user(user_data
, vaddr
+ offset
, len
);
223 return ret
? -EFAULT
: 0;
227 i915_gem_shmem_pread(struct drm_i915_gem_object
*obj
,
228 struct drm_i915_gem_pread
*args
)
230 unsigned int needs_clflush
;
231 char __user
*user_data
;
232 unsigned long offset
;
237 ret
= i915_gem_object_lock_interruptible(obj
, NULL
);
241 ret
= i915_gem_object_pin_pages(obj
);
245 ret
= i915_gem_object_prepare_read(obj
, &needs_clflush
);
249 i915_gem_object_finish_access(obj
);
250 i915_gem_object_unlock(obj
);
253 user_data
= u64_to_user_ptr(args
->data_ptr
);
254 offset
= offset_in_page(args
->offset
);
255 for (idx
= args
->offset
>> PAGE_SHIFT
; remain
; idx
++) {
256 struct page
*page
= i915_gem_object_get_page(obj
, idx
);
257 unsigned int length
= min_t(u64
, remain
, PAGE_SIZE
- offset
);
259 ret
= shmem_pread(page
, offset
, length
, user_data
,
269 i915_gem_object_unpin_pages(obj
);
273 i915_gem_object_unpin_pages(obj
);
275 i915_gem_object_unlock(obj
);
280 gtt_user_read(struct io_mapping
*mapping
,
281 loff_t base
, int offset
,
282 char __user
*user_data
, int length
)
285 unsigned long unwritten
;
287 /* We can use the cpu mem copy function because this is X86. */
288 vaddr
= io_mapping_map_atomic_wc(mapping
, base
);
289 unwritten
= __copy_to_user_inatomic(user_data
,
290 (void __force
*)vaddr
+ offset
,
292 io_mapping_unmap_atomic(vaddr
);
294 vaddr
= io_mapping_map_wc(mapping
, base
, PAGE_SIZE
);
295 unwritten
= copy_to_user(user_data
,
296 (void __force
*)vaddr
+ offset
,
298 io_mapping_unmap(vaddr
);
303 static struct i915_vma
*i915_gem_gtt_prepare(struct drm_i915_gem_object
*obj
,
304 struct drm_mm_node
*node
,
307 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
308 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
309 struct i915_vma
*vma
;
310 struct i915_gem_ww_ctx ww
;
313 i915_gem_ww_ctx_init(&ww
, true);
315 vma
= ERR_PTR(-ENODEV
);
316 ret
= i915_gem_object_lock(obj
, &ww
);
320 ret
= i915_gem_object_set_to_gtt_domain(obj
, write
);
324 if (!i915_gem_object_is_tiled(obj
))
325 vma
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, NULL
, 0, 0,
327 PIN_NONBLOCK
/* NOWARN */ |
329 if (vma
== ERR_PTR(-EDEADLK
)) {
332 } else if (!IS_ERR(vma
)) {
333 node
->start
= i915_ggtt_offset(vma
);
336 ret
= insert_mappable_node(ggtt
, node
, PAGE_SIZE
);
339 GEM_BUG_ON(!drm_mm_node_allocated(node
));
343 ret
= i915_gem_object_pin_pages(obj
);
345 if (drm_mm_node_allocated(node
)) {
346 ggtt
->vm
.clear_range(&ggtt
->vm
, node
->start
, node
->size
);
347 remove_mappable_node(ggtt
, node
);
354 if (ret
== -EDEADLK
) {
355 ret
= i915_gem_ww_ctx_backoff(&ww
);
359 i915_gem_ww_ctx_fini(&ww
);
361 return ret
? ERR_PTR(ret
) : vma
;
364 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object
*obj
,
365 struct drm_mm_node
*node
,
366 struct i915_vma
*vma
)
368 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
369 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
371 i915_gem_object_unpin_pages(obj
);
372 if (drm_mm_node_allocated(node
)) {
373 ggtt
->vm
.clear_range(&ggtt
->vm
, node
->start
, node
->size
);
374 remove_mappable_node(ggtt
, node
);
381 i915_gem_gtt_pread(struct drm_i915_gem_object
*obj
,
382 const struct drm_i915_gem_pread
*args
)
384 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
385 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
386 unsigned long remain
, offset
;
387 intel_wakeref_t wakeref
;
388 struct drm_mm_node node
;
389 void __user
*user_data
;
390 struct i915_vma
*vma
;
393 if (overflows_type(args
->size
, remain
) ||
394 overflows_type(args
->offset
, offset
))
397 wakeref
= intel_runtime_pm_get(&i915
->runtime_pm
);
399 vma
= i915_gem_gtt_prepare(obj
, &node
, false);
405 user_data
= u64_to_user_ptr(args
->data_ptr
);
407 offset
= args
->offset
;
410 /* Operation in this page
412 * page_base = page offset within aperture
413 * page_offset = offset within page
414 * page_length = bytes to copy for this page
416 u32 page_base
= node
.start
;
417 unsigned page_offset
= offset_in_page(offset
);
418 unsigned page_length
= PAGE_SIZE
- page_offset
;
419 page_length
= remain
< page_length
? remain
: page_length
;
420 if (drm_mm_node_allocated(&node
)) {
421 ggtt
->vm
.insert_page(&ggtt
->vm
,
422 i915_gem_object_get_dma_address(obj
,
423 offset
>> PAGE_SHIFT
),
425 i915_gem_get_pat_index(i915
,
426 I915_CACHE_NONE
), 0);
428 page_base
+= offset
& PAGE_MASK
;
431 if (gtt_user_read(&ggtt
->iomap
, page_base
, page_offset
,
432 user_data
, page_length
)) {
437 remain
-= page_length
;
438 user_data
+= page_length
;
439 offset
+= page_length
;
442 i915_gem_gtt_cleanup(obj
, &node
, vma
);
444 intel_runtime_pm_put(&i915
->runtime_pm
, wakeref
);
449 * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
450 * @dev: drm device pointer
451 * @data: ioctl data blob
452 * @file: drm file pointer
454 * On error, the contents of *data are undefined.
457 i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
458 struct drm_file
*file
)
460 struct drm_i915_private
*i915
= to_i915(dev
);
461 struct drm_i915_gem_pread
*args
= data
;
462 struct drm_i915_gem_object
*obj
;
465 /* PREAD is disallowed for all platforms after TGL-LP. This also
466 * covers all platforms with local memory.
468 if (GRAPHICS_VER(i915
) >= 12 && !IS_TIGERLAKE(i915
))
474 if (!access_ok(u64_to_user_ptr(args
->data_ptr
),
478 obj
= i915_gem_object_lookup(file
, args
->handle
);
482 /* Bounds check source. */
483 if (range_overflows_t(u64
, args
->offset
, args
->size
, obj
->base
.size
)) {
488 trace_i915_gem_object_pread(obj
, args
->offset
, args
->size
);
491 ret
= obj
->ops
->pread(obj
, args
);
495 ret
= i915_gem_object_wait(obj
,
496 I915_WAIT_INTERRUPTIBLE
,
497 MAX_SCHEDULE_TIMEOUT
);
501 ret
= i915_gem_shmem_pread(obj
, args
);
502 if (ret
== -EFAULT
|| ret
== -ENODEV
)
503 ret
= i915_gem_gtt_pread(obj
, args
);
506 i915_gem_object_put(obj
);
510 /* This is the fast write path which cannot handle
511 * page faults in the source data
515 ggtt_write(struct io_mapping
*mapping
,
516 loff_t base
, int offset
,
517 char __user
*user_data
, int length
)
520 unsigned long unwritten
;
522 /* We can use the cpu mem copy function because this is X86. */
523 vaddr
= io_mapping_map_atomic_wc(mapping
, base
);
524 unwritten
= __copy_from_user_inatomic_nocache((void __force
*)vaddr
+ offset
,
526 io_mapping_unmap_atomic(vaddr
);
528 vaddr
= io_mapping_map_wc(mapping
, base
, PAGE_SIZE
);
529 unwritten
= copy_from_user((void __force
*)vaddr
+ offset
,
531 io_mapping_unmap(vaddr
);
538 * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
539 * user into the GTT, uncached.
540 * @obj: i915 GEM object
541 * @args: pwrite arguments structure
544 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object
*obj
,
545 const struct drm_i915_gem_pwrite
*args
)
547 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
548 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
549 struct intel_runtime_pm
*rpm
= &i915
->runtime_pm
;
550 unsigned long remain
, offset
;
551 intel_wakeref_t wakeref
;
552 struct drm_mm_node node
;
553 struct i915_vma
*vma
;
554 void __user
*user_data
;
557 if (overflows_type(args
->size
, remain
) ||
558 overflows_type(args
->offset
, offset
))
561 if (i915_gem_object_has_struct_page(obj
)) {
563 * Avoid waking the device up if we can fallback, as
564 * waking/resuming is very slow (worst-case 10-100 ms
565 * depending on PCI sleeps and our own resume time).
566 * This easily dwarfs any performance advantage from
567 * using the cache bypass of indirect GGTT access.
569 wakeref
= intel_runtime_pm_get_if_in_use(rpm
);
573 /* No backing pages, no fallback, we must force GGTT access */
574 wakeref
= intel_runtime_pm_get(rpm
);
577 vma
= i915_gem_gtt_prepare(obj
, &node
, true);
583 i915_gem_object_invalidate_frontbuffer(obj
, ORIGIN_CPU
);
585 user_data
= u64_to_user_ptr(args
->data_ptr
);
586 offset
= args
->offset
;
589 /* Operation in this page
591 * page_base = page offset within aperture
592 * page_offset = offset within page
593 * page_length = bytes to copy for this page
595 u32 page_base
= node
.start
;
596 unsigned int page_offset
= offset_in_page(offset
);
597 unsigned int page_length
= PAGE_SIZE
- page_offset
;
598 page_length
= remain
< page_length
? remain
: page_length
;
599 if (drm_mm_node_allocated(&node
)) {
600 /* flush the write before we modify the GGTT */
601 intel_gt_flush_ggtt_writes(ggtt
->vm
.gt
);
602 ggtt
->vm
.insert_page(&ggtt
->vm
,
603 i915_gem_object_get_dma_address(obj
,
604 offset
>> PAGE_SHIFT
),
606 i915_gem_get_pat_index(i915
,
607 I915_CACHE_NONE
), 0);
608 wmb(); /* flush modifications to the GGTT (insert_page) */
610 page_base
+= offset
& PAGE_MASK
;
612 /* If we get a fault while copying data, then (presumably) our
613 * source page isn't available. Return the error and we'll
614 * retry in the slow path.
615 * If the object is non-shmem backed, we retry again with the
616 * path that handles page fault.
618 if (ggtt_write(&ggtt
->iomap
, page_base
, page_offset
,
619 user_data
, page_length
)) {
624 remain
-= page_length
;
625 user_data
+= page_length
;
626 offset
+= page_length
;
629 intel_gt_flush_ggtt_writes(ggtt
->vm
.gt
);
630 i915_gem_object_flush_frontbuffer(obj
, ORIGIN_CPU
);
632 i915_gem_gtt_cleanup(obj
, &node
, vma
);
634 intel_runtime_pm_put(rpm
, wakeref
);
638 /* Per-page copy function for the shmem pwrite fastpath.
639 * Flushes invalid cachelines before writing to the target if
640 * needs_clflush_before is set and flushes out any written cachelines after
641 * writing if needs_clflush is set.
644 shmem_pwrite(struct page
*page
, int offset
, int len
, char __user
*user_data
,
645 bool needs_clflush_before
,
646 bool needs_clflush_after
)
653 if (needs_clflush_before
)
654 drm_clflush_virt_range(vaddr
+ offset
, len
);
656 ret
= __copy_from_user(vaddr
+ offset
, user_data
, len
);
657 if (!ret
&& needs_clflush_after
)
658 drm_clflush_virt_range(vaddr
+ offset
, len
);
662 return ret
? -EFAULT
: 0;
666 i915_gem_shmem_pwrite(struct drm_i915_gem_object
*obj
,
667 const struct drm_i915_gem_pwrite
*args
)
669 unsigned int partial_cacheline_write
;
670 unsigned int needs_clflush
;
671 void __user
*user_data
;
672 unsigned long offset
;
677 ret
= i915_gem_object_lock_interruptible(obj
, NULL
);
681 ret
= i915_gem_object_pin_pages(obj
);
685 ret
= i915_gem_object_prepare_write(obj
, &needs_clflush
);
689 i915_gem_object_finish_access(obj
);
690 i915_gem_object_unlock(obj
);
692 /* If we don't overwrite a cacheline completely we need to be
693 * careful to have up-to-date data by first clflushing. Don't
694 * overcomplicate things and flush the entire patch.
696 partial_cacheline_write
= 0;
697 if (needs_clflush
& CLFLUSH_BEFORE
)
698 partial_cacheline_write
= boot_cpu_data
.x86_clflush_size
- 1;
700 user_data
= u64_to_user_ptr(args
->data_ptr
);
702 offset
= offset_in_page(args
->offset
);
703 for (idx
= args
->offset
>> PAGE_SHIFT
; remain
; idx
++) {
704 struct page
*page
= i915_gem_object_get_page(obj
, idx
);
705 unsigned int length
= min_t(u64
, remain
, PAGE_SIZE
- offset
);
707 ret
= shmem_pwrite(page
, offset
, length
, user_data
,
708 (offset
| length
) & partial_cacheline_write
,
709 needs_clflush
& CLFLUSH_AFTER
);
718 i915_gem_object_flush_frontbuffer(obj
, ORIGIN_CPU
);
720 i915_gem_object_unpin_pages(obj
);
724 i915_gem_object_unpin_pages(obj
);
726 i915_gem_object_unlock(obj
);
731 * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
733 * @data: ioctl data blob
736 * On error, the contents of the buffer that were to be modified are undefined.
739 i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
740 struct drm_file
*file
)
742 struct drm_i915_private
*i915
= to_i915(dev
);
743 struct drm_i915_gem_pwrite
*args
= data
;
744 struct drm_i915_gem_object
*obj
;
747 /* PWRITE is disallowed for all platforms after TGL-LP. This also
748 * covers all platforms with local memory.
750 if (GRAPHICS_VER(i915
) >= 12 && !IS_TIGERLAKE(i915
))
756 if (!access_ok(u64_to_user_ptr(args
->data_ptr
), args
->size
))
759 obj
= i915_gem_object_lookup(file
, args
->handle
);
763 /* Bounds check destination. */
764 if (range_overflows_t(u64
, args
->offset
, args
->size
, obj
->base
.size
)) {
769 /* Writes not allowed into this read-only object */
770 if (i915_gem_object_is_readonly(obj
)) {
775 trace_i915_gem_object_pwrite(obj
, args
->offset
, args
->size
);
778 if (obj
->ops
->pwrite
)
779 ret
= obj
->ops
->pwrite(obj
, args
);
783 ret
= i915_gem_object_wait(obj
,
784 I915_WAIT_INTERRUPTIBLE
|
786 MAX_SCHEDULE_TIMEOUT
);
791 /* We can only do the GTT pwrite on untiled buffers, as otherwise
792 * it would end up going through the fenced access, and we'll get
793 * different detiling behavior between reading and writing.
794 * pread/pwrite currently are reading and writing from the CPU
795 * perspective, requiring manual detiling by the client.
797 if (!i915_gem_object_has_struct_page(obj
) ||
798 i915_gem_cpu_write_needs_clflush(obj
))
799 /* Note that the gtt paths might fail with non-page-backed user
800 * pointers (e.g. gtt mappings when moving data between
801 * textures). Fallback to the shmem path in that case.
803 ret
= i915_gem_gtt_pwrite_fast(obj
, args
);
805 if (ret
== -EFAULT
|| ret
== -ENOSPC
) {
806 if (i915_gem_object_has_struct_page(obj
))
807 ret
= i915_gem_shmem_pwrite(obj
, args
);
811 i915_gem_object_put(obj
);
816 * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
818 * @data: ioctl data blob
822 i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
823 struct drm_file
*file
)
825 struct drm_i915_gem_sw_finish
*args
= data
;
826 struct drm_i915_gem_object
*obj
;
828 obj
= i915_gem_object_lookup(file
, args
->handle
);
833 * Proxy objects are barred from CPU access, so there is no
834 * need to ban sw_finish as it is a nop.
837 /* Pinned buffers may be scanout, so flush the cache */
838 i915_gem_object_flush_if_display(obj
);
839 i915_gem_object_put(obj
);
844 void i915_gem_runtime_suspend(struct drm_i915_private
*i915
)
846 struct drm_i915_gem_object
*obj
, *on
;
850 * Only called during RPM suspend. All users of the userfault_list
851 * must be holding an RPM wakeref to ensure that this can not
852 * run concurrently with themselves (and use the struct_mutex for
853 * protection between themselves).
856 list_for_each_entry_safe(obj
, on
,
857 &to_gt(i915
)->ggtt
->userfault_list
, userfault_link
)
858 __i915_gem_object_release_mmap_gtt(obj
);
860 list_for_each_entry_safe(obj
, on
,
861 &i915
->runtime_pm
.lmem_userfault_list
, userfault_link
)
862 i915_gem_object_runtime_pm_release_mmap_offset(obj
);
865 * The fence will be lost when the device powers down. If any were
866 * in use by hardware (i.e. they are pinned), we should not be powering
867 * down! All other fences will be reacquired by the user upon waking.
869 for (i
= 0; i
< to_gt(i915
)->ggtt
->num_fences
; i
++) {
870 struct i915_fence_reg
*reg
= &to_gt(i915
)->ggtt
->fence_regs
[i
];
873 * Ideally we want to assert that the fence register is not
874 * live at this point (i.e. that no piece of code will be
875 * trying to write through fence + GTT, as that both violates
876 * our tracking of activity and associated locking/barriers,
877 * but also is illegal given that the hw is powered down).
879 * Previously we used reg->pin_count as a "liveness" indicator.
880 * That is not sufficient, and we need a more fine-grained
881 * tool if we want to have a sanity check here.
887 GEM_BUG_ON(i915_vma_has_userfault(reg
->vma
));
892 static void discard_ggtt_vma(struct i915_vma
*vma
)
894 struct drm_i915_gem_object
*obj
= vma
->obj
;
896 spin_lock(&obj
->vma
.lock
);
897 if (!RB_EMPTY_NODE(&vma
->obj_node
)) {
898 rb_erase(&vma
->obj_node
, &obj
->vma
.tree
);
899 RB_CLEAR_NODE(&vma
->obj_node
);
901 spin_unlock(&obj
->vma
.lock
);
905 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object
*obj
,
906 struct i915_gem_ww_ctx
*ww
,
907 const struct i915_gtt_view
*view
,
908 u64 size
, u64 alignment
, u64 flags
)
910 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
911 struct i915_ggtt
*ggtt
= to_gt(i915
)->ggtt
;
912 struct i915_vma
*vma
;
917 if (flags
& PIN_MAPPABLE
&&
918 (!view
|| view
->type
== I915_GTT_VIEW_NORMAL
)) {
920 * If the required space is larger than the available
921 * aperture, we will not able to find a slot for the
922 * object and unbinding the object now will be in
923 * vain. Worse, doing so may cause us to ping-pong
924 * the object in and out of the Global GTT and
925 * waste a lot of cycles under the mutex.
927 if (obj
->base
.size
> ggtt
->mappable_end
)
928 return ERR_PTR(-E2BIG
);
931 * If NONBLOCK is set the caller is optimistically
932 * trying to cache the full object within the mappable
933 * aperture, and *must* have a fallback in place for
934 * situations where we cannot bind the object. We
935 * can be a little more lax here and use the fallback
936 * more often to avoid costly migrations of ourselves
937 * and other objects within the aperture.
939 * Half-the-aperture is used as a simple heuristic.
940 * More interesting would to do search for a free
941 * block prior to making the commitment to unbind.
942 * That caters for the self-harm case, and with a
943 * little more heuristics (e.g. NOFAULT, NOEVICT)
944 * we could try to minimise harm to others.
946 if (flags
& PIN_NONBLOCK
&&
947 obj
->base
.size
> ggtt
->mappable_end
/ 2)
948 return ERR_PTR(-ENOSPC
);
952 vma
= i915_vma_instance(obj
, &ggtt
->vm
, view
);
956 if (i915_vma_misplaced(vma
, size
, alignment
, flags
)) {
957 if (flags
& PIN_NONBLOCK
) {
958 if (i915_vma_is_pinned(vma
) || i915_vma_is_active(vma
))
959 return ERR_PTR(-ENOSPC
);
962 * If this misplaced vma is too big (i.e, at-least
963 * half the size of aperture) or hasn't been pinned
964 * mappable before, we ignore the misplacement when
965 * PIN_NONBLOCK is set in order to avoid the ping-pong
966 * issue described above. In other words, we try to
967 * avoid the costly operation of unbinding this vma
968 * from the GGTT and rebinding it back because there
969 * may not be enough space for this vma in the aperture.
971 if (flags
& PIN_MAPPABLE
&&
972 (vma
->fence_size
> ggtt
->mappable_end
/ 2 ||
973 !i915_vma_is_map_and_fenceable(vma
)))
974 return ERR_PTR(-ENOSPC
);
977 if (i915_vma_is_pinned(vma
) || i915_vma_is_active(vma
)) {
978 discard_ggtt_vma(vma
);
982 ret
= i915_vma_unbind(vma
);
987 ret
= i915_vma_pin_ww(vma
, ww
, size
, alignment
, flags
| PIN_GLOBAL
);
992 if (vma
->fence
&& !i915_gem_object_is_tiled(obj
)) {
993 mutex_lock(&ggtt
->vm
.mutex
);
994 i915_vma_revoke_fence(vma
);
995 mutex_unlock(&ggtt
->vm
.mutex
);
998 ret
= i915_vma_wait_for_bind(vma
);
1000 i915_vma_unpin(vma
);
1001 return ERR_PTR(ret
);
1007 struct i915_vma
* __must_check
1008 i915_gem_object_ggtt_pin(struct drm_i915_gem_object
*obj
,
1009 const struct i915_gtt_view
*view
,
1010 u64 size
, u64 alignment
, u64 flags
)
1012 struct i915_gem_ww_ctx ww
;
1013 struct i915_vma
*ret
;
1016 for_i915_gem_ww(&ww
, err
, true) {
1017 err
= i915_gem_object_lock(obj
, &ww
);
1021 ret
= i915_gem_object_ggtt_pin_ww(obj
, &ww
, view
, size
,
1027 return err
? ERR_PTR(err
) : ret
;
1031 i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
1032 struct drm_file
*file_priv
)
1034 struct drm_i915_private
*i915
= to_i915(dev
);
1035 struct drm_i915_gem_madvise
*args
= data
;
1036 struct drm_i915_gem_object
*obj
;
1039 switch (args
->madv
) {
1040 case I915_MADV_DONTNEED
:
1041 case I915_MADV_WILLNEED
:
1047 obj
= i915_gem_object_lookup(file_priv
, args
->handle
);
1051 err
= i915_gem_object_lock_interruptible(obj
, NULL
);
1055 if (i915_gem_object_has_pages(obj
) &&
1056 i915_gem_object_is_tiled(obj
) &&
1057 i915
->gem_quirks
& GEM_QUIRK_PIN_SWIZZLED_PAGES
) {
1058 if (obj
->mm
.madv
== I915_MADV_WILLNEED
) {
1059 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj
));
1060 i915_gem_object_clear_tiling_quirk(obj
);
1061 i915_gem_object_make_shrinkable(obj
);
1063 if (args
->madv
== I915_MADV_WILLNEED
) {
1064 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj
));
1065 i915_gem_object_make_unshrinkable(obj
);
1066 i915_gem_object_set_tiling_quirk(obj
);
1070 if (obj
->mm
.madv
!= __I915_MADV_PURGED
) {
1071 obj
->mm
.madv
= args
->madv
;
1072 if (obj
->ops
->adjust_lru
)
1073 obj
->ops
->adjust_lru(obj
);
1076 if (i915_gem_object_has_pages(obj
) ||
1077 i915_gem_object_has_self_managed_shrink_list(obj
)) {
1078 unsigned long flags
;
1080 spin_lock_irqsave(&i915
->mm
.obj_lock
, flags
);
1081 if (!list_empty(&obj
->mm
.link
)) {
1082 struct list_head
*list
;
1084 if (obj
->mm
.madv
!= I915_MADV_WILLNEED
)
1085 list
= &i915
->mm
.purge_list
;
1087 list
= &i915
->mm
.shrink_list
;
1088 list_move_tail(&obj
->mm
.link
, list
);
1091 spin_unlock_irqrestore(&i915
->mm
.obj_lock
, flags
);
1094 /* if the object is no longer attached, discard its backing storage */
1095 if (obj
->mm
.madv
== I915_MADV_DONTNEED
&&
1096 !i915_gem_object_has_pages(obj
))
1097 i915_gem_object_truncate(obj
);
1099 args
->retained
= obj
->mm
.madv
!= __I915_MADV_PURGED
;
1101 i915_gem_object_unlock(obj
);
1103 i915_gem_object_put(obj
);
1108 * A single pass should suffice to release all the freed objects (along most
1109 * call paths), but be a little more paranoid in that freeing the objects does
1110 * take a little amount of time, during which the rcu callbacks could have added
1111 * new objects into the freed list, and armed the work again.
1113 void i915_gem_drain_freed_objects(struct drm_i915_private
*i915
)
1115 while (atomic_read(&i915
->mm
.free_count
)) {
1116 flush_work(&i915
->mm
.free_work
);
1117 drain_workqueue(i915
->bdev
.wq
);
1123 * Similar to objects above (see i915_gem_drain_freed-objects), in general we
1124 * have workers that are armed by RCU and then rearm themselves in their
1125 * callbacks. To be paranoid, we need to drain the workqueue a second time after
1126 * waiting for the RCU grace period so that we catch work queued via RCU from
1127 * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
1128 * result, we make an assumption that we only don't require more than 3 passes
1129 * to catch all _recursive_ RCU delayed work.
1131 void i915_gem_drain_workqueue(struct drm_i915_private
*i915
)
1135 for (i
= 0; i
< 3; i
++) {
1136 flush_workqueue(i915
->wq
);
1138 i915_gem_drain_freed_objects(i915
);
1141 drain_workqueue(i915
->wq
);
1144 int i915_gem_init(struct drm_i915_private
*dev_priv
)
1146 struct intel_gt
*gt
;
1151 * In the proccess of replacing cache_level with pat_index a tricky
1152 * dependency is created on the definition of the enum i915_cache_level.
1153 * in case this enum is changed, PTE encode would be broken.
1154 * Add a WARNING here. And remove when we completely quit using this
1157 BUILD_BUG_ON(I915_CACHE_NONE
!= 0 ||
1158 I915_CACHE_LLC
!= 1 ||
1159 I915_CACHE_L3_LLC
!= 2 ||
1160 I915_CACHE_WT
!= 3 ||
1161 I915_MAX_CACHE_LEVEL
!= 4);
1163 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1164 if (intel_vgpu_active(dev_priv
) && !intel_vgpu_has_huge_gtt(dev_priv
))
1165 RUNTIME_INFO(dev_priv
)->page_sizes
= I915_GTT_PAGE_SIZE_4K
;
1167 for_each_gt(gt
, dev_priv
, i
) {
1168 intel_uc_fetch_firmwares(>
->uc
);
1169 intel_wopcm_init(>
->wopcm
);
1170 if (GRAPHICS_VER(dev_priv
) >= 8)
1171 setup_private_pat(gt
);
1174 ret
= i915_init_ggtt(dev_priv
);
1176 GEM_BUG_ON(ret
== -EIO
);
1181 * Despite its name intel_clock_gating_init applies both display
1182 * clock gating workarounds; GT mmio workarounds and the occasional
1183 * GT power context workaround. Worse, sometimes it includes a context
1184 * register workaround which we need to apply before we record the
1185 * default HW state for all contexts.
1187 * FIXME: break up the workarounds and apply them at the right time!
1189 intel_clock_gating_init(dev_priv
);
1191 for_each_gt(gt
, dev_priv
, i
) {
1192 ret
= intel_gt_init(gt
);
1198 * Register engines early to ensure the engine list is in its final
1199 * rb-tree form, lowering the amount of code that has to deal with
1200 * the intermediate llist state.
1202 intel_engines_driver_register(dev_priv
);
1207 * Unwinding is complicated by that we want to handle -EIO to mean
1208 * disable GPU submission but keep KMS alive. We want to mark the
1209 * HW as irrevisibly wedged, but keep enough state around that the
1210 * driver doesn't explode during runtime.
1213 i915_gem_drain_workqueue(dev_priv
);
1216 for_each_gt(gt
, dev_priv
, i
) {
1217 intel_gt_driver_remove(gt
);
1218 intel_gt_driver_release(gt
);
1219 intel_uc_cleanup_firmwares(>
->uc
);
1225 * Allow engines or uC initialisation to fail by marking the GPU
1226 * as wedged. But we only want to do this when the GPU is angry,
1227 * for all other failure, such as an allocation failure, bail.
1229 for_each_gt(gt
, dev_priv
, i
) {
1230 if (!intel_gt_is_wedged(gt
)) {
1231 i915_probe_error(dev_priv
,
1232 "Failed to initialize GPU, declaring it wedged!\n");
1233 intel_gt_set_wedged(gt
);
1237 /* Minimal basic recovery for KMS */
1238 ret
= i915_ggtt_enable_hw(dev_priv
);
1239 i915_ggtt_resume(to_gt(dev_priv
)->ggtt
);
1240 intel_clock_gating_init(dev_priv
);
1243 i915_gem_drain_freed_objects(dev_priv
);
1248 void i915_gem_driver_register(struct drm_i915_private
*i915
)
1250 i915_gem_driver_register__shrinker(i915
);
1253 void i915_gem_driver_unregister(struct drm_i915_private
*i915
)
1255 i915_gem_driver_unregister__shrinker(i915
);
1258 void i915_gem_driver_remove(struct drm_i915_private
*dev_priv
)
1260 struct intel_gt
*gt
;
1263 i915_gem_suspend_late(dev_priv
);
1264 for_each_gt(gt
, dev_priv
, i
)
1265 intel_gt_driver_remove(gt
);
1266 dev_priv
->uabi_engines
= RB_ROOT
;
1268 /* Flush any outstanding unpin_work. */
1269 i915_gem_drain_workqueue(dev_priv
);
1272 void i915_gem_driver_release(struct drm_i915_private
*dev_priv
)
1274 struct intel_gt
*gt
;
1277 for_each_gt(gt
, dev_priv
, i
) {
1278 intel_gt_driver_release(gt
);
1279 intel_uc_cleanup_firmwares(>
->uc
);
1282 /* Flush any outstanding work, including i915_gem_context.release_work. */
1283 i915_gem_drain_workqueue(dev_priv
);
1285 drm_WARN_ON(&dev_priv
->drm
, !list_empty(&dev_priv
->gem
.contexts
.list
));
1288 static void i915_gem_init__mm(struct drm_i915_private
*i915
)
1290 spin_lock_init(&i915
->mm
.obj_lock
);
1292 init_llist_head(&i915
->mm
.free_list
);
1294 INIT_LIST_HEAD(&i915
->mm
.purge_list
);
1295 INIT_LIST_HEAD(&i915
->mm
.shrink_list
);
1297 i915_gem_init__objects(i915
);
1300 void i915_gem_init_early(struct drm_i915_private
*dev_priv
)
1302 i915_gem_init__mm(dev_priv
);
1303 i915_gem_init__contexts(dev_priv
);
1306 void i915_gem_cleanup_early(struct drm_i915_private
*dev_priv
)
1308 i915_gem_drain_workqueue(dev_priv
);
1309 GEM_BUG_ON(!llist_empty(&dev_priv
->mm
.free_list
));
1310 GEM_BUG_ON(atomic_read(&dev_priv
->mm
.free_count
));
1311 drm_WARN_ON(&dev_priv
->drm
, dev_priv
->mm
.shrink_count
);
1314 int i915_gem_open(struct drm_i915_private
*i915
, struct drm_file
*file
)
1316 struct drm_i915_file_private
*file_priv
;
1317 struct i915_drm_client
*client
;
1320 drm_dbg(&i915
->drm
, "\n");
1322 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
1326 client
= i915_drm_client_alloc();
1330 file
->driver_priv
= file_priv
;
1331 file_priv
->i915
= i915
;
1332 file_priv
->file
= file
;
1333 file_priv
->client
= client
;
1335 file_priv
->bsd_engine
= -1;
1336 file_priv
->hang_timestamp
= jiffies
;
1338 ret
= i915_gem_context_open(i915
, file
);
1345 i915_drm_client_put(client
);
1352 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1353 #include "selftests/mock_gem_device.c"
1354 #include "selftests/i915_gem.c"