]>
Commit | Line | Data |
---|---|---|
b8f55be6 CW |
1 | /* |
2 | * Copyright © 2017 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
e9b67ec2 | 25 | #include <linux/highmem.h> |
f86dbacb DV |
26 | #include <linux/sched/mm.h> |
27 | ||
5f2ec909 JN |
28 | #include <drm/drm_cache.h> |
29 | ||
df0566a6 | 30 | #include "display/intel_frontbuffer.h" |
d3ac8d42 | 31 | #include "pxp/intel_pxp.h" |
c8eb426d | 32 | |
b8f55be6 | 33 | #include "i915_drv.h" |
5472b3f2 | 34 | #include "i915_file_private.h" |
b414fcd5 | 35 | #include "i915_gem_clflush.h" |
10be98a7 | 36 | #include "i915_gem_context.h" |
c8eb426d | 37 | #include "i915_gem_dmabuf.h" |
cc662126 | 38 | #include "i915_gem_mman.h" |
10be98a7 | 39 | #include "i915_gem_object.h" |
f6c466b8 | 40 | #include "i915_gem_ttm.h" |
5fbc2c2b | 41 | #include "i915_memcpy.h" |
a09d9a80 | 42 | #include "i915_trace.h" |
b8f55be6 | 43 | |
c8ad09af | 44 | static struct kmem_cache *slab_objects; |
13f1bfd3 | 45 | |
10012620 TZ |
46 | static const struct drm_gem_object_funcs i915_gem_object_funcs; |
47 | ||
13f1bfd3 CW |
48 | struct drm_i915_gem_object *i915_gem_object_alloc(void) |
49 | { | |
10012620 TZ |
50 | struct drm_i915_gem_object *obj; |
51 | ||
c8ad09af | 52 | obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL); |
10012620 TZ |
53 | if (!obj) |
54 | return NULL; | |
55 | obj->base.funcs = &i915_gem_object_funcs; | |
56 | ||
57 | return obj; | |
13f1bfd3 CW |
58 | } |
59 | ||
60 | void i915_gem_object_free(struct drm_i915_gem_object *obj) | |
61 | { | |
c8ad09af | 62 | return kmem_cache_free(slab_objects, obj); |
13f1bfd3 CW |
63 | } |
64 | ||
8475355f | 65 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
7867d709 | 66 | const struct drm_i915_gem_object_ops *ops, |
c471748d | 67 | struct lock_class_key *key, unsigned flags) |
8475355f | 68 | { |
f4db23f2 TH |
69 | /* |
70 | * A gem object is embedded both in a struct ttm_buffer_object :/ and | |
71 | * in a drm_i915_gem_object. Make sure they are aliased. | |
72 | */ | |
73 | BUILD_BUG_ON(offsetof(typeof(*obj), base) != | |
74 | offsetof(typeof(*obj), __do_not_access.base)); | |
75 | ||
8475355f CW |
76 | spin_lock_init(&obj->vma.lock); |
77 | INIT_LIST_HEAD(&obj->vma.list); | |
78 | ||
1aff1903 CW |
79 | INIT_LIST_HEAD(&obj->mm.link); |
80 | ||
8475355f | 81 | INIT_LIST_HEAD(&obj->lut_list); |
096a42dd | 82 | spin_lock_init(&obj->lut_lock); |
8475355f | 83 | |
cc662126 | 84 | spin_lock_init(&obj->mmo.lock); |
78655598 | 85 | obj->mmo.offsets = RB_ROOT; |
cc662126 | 86 | |
8475355f CW |
87 | init_rcu_head(&obj->rcu); |
88 | ||
89 | obj->ops = ops; | |
c471748d ML |
90 | GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); |
91 | obj->flags = flags; | |
8475355f | 92 | |
8475355f CW |
93 | obj->mm.madv = I915_MADV_WILLNEED; |
94 | INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); | |
95 | mutex_init(&obj->mm.get_page.lock); | |
934941ed TU |
96 | INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); |
97 | mutex_init(&obj->mm.get_dma_page.lock); | |
8475355f CW |
98 | } |
99 | ||
068396bb | 100 | /** |
0af4cbfa | 101 | * __i915_gem_object_fini - Clean up a GEM object initialization |
068396bb TH |
102 | * @obj: The gem object to cleanup |
103 | * | |
104 | * This function cleans up gem object fields that are set up by | |
105 | * drm_gem_private_object_init() and i915_gem_object_init(). | |
106 | * It's primarily intended as a helper for backends that need to | |
107 | * clean up the gem object in separate steps. | |
108 | */ | |
109 | void __i915_gem_object_fini(struct drm_i915_gem_object *obj) | |
110 | { | |
111 | mutex_destroy(&obj->mm.get_page.lock); | |
112 | mutex_destroy(&obj->mm.get_dma_page.lock); | |
113 | dma_resv_fini(&obj->base._resv); | |
114 | } | |
115 | ||
b8f55be6 | 116 | /** |
0af4cbfa RD |
117 | * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels |
118 | * for a given cache_level | |
b8f55be6 CW |
119 | * @obj: #drm_i915_gem_object |
120 | * @cache_level: cache level | |
121 | */ | |
122 | void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, | |
123 | unsigned int cache_level) | |
124 | { | |
068b1bd0 MA |
125 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
126 | ||
b8f55be6 CW |
127 | obj->cache_level = cache_level; |
128 | ||
129 | if (cache_level != I915_CACHE_NONE) | |
130 | obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | | |
131 | I915_BO_CACHE_COHERENT_FOR_WRITE); | |
068b1bd0 | 132 | else if (HAS_LLC(i915)) |
b8f55be6 CW |
133 | obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; |
134 | else | |
135 | obj->cache_coherent = 0; | |
136 | ||
137 | obj->cache_dirty = | |
068b1bd0 MA |
138 | !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && |
139 | !IS_DGFX(i915); | |
b8f55be6 | 140 | } |
13f1bfd3 | 141 | |
30f1dccd MA |
142 | bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) |
143 | { | |
144 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
145 | ||
146 | /* | |
147 | * This is purely from a security perspective, so we simply don't care | |
148 | * about non-userspace objects being able to bypass the LLC. | |
149 | */ | |
150 | if (!(obj->flags & I915_BO_ALLOC_USER)) | |
151 | return false; | |
152 | ||
153 | /* | |
154 | * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it | |
155 | * possible for userspace to bypass the GTT caching bits set by the | |
156 | * kernel, as per the given object cache_level. This is troublesome | |
157 | * since the heavy flush we apply when first gathering the pages is | |
158 | * skipped if the kernel thinks the object is coherent with the GPU. As | |
159 | * a result it might be possible to bypass the cache and read the | |
160 | * contents of the page directly, which could be stale data. If it's | |
161 | * just a case of userspace shooting themselves in the foot then so be | |
162 | * it, but since i915 takes the stance of always zeroing memory before | |
163 | * handing it to userspace, we need to prevent this. | |
164 | */ | |
165 | return IS_JSL_EHL(i915); | |
166 | } | |
167 | ||
10012620 | 168 | static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) |
8475355f | 169 | { |
8475355f CW |
170 | struct drm_i915_gem_object *obj = to_intel_bo(gem); |
171 | struct drm_i915_file_private *fpriv = file->driver_priv; | |
096a42dd | 172 | struct i915_lut_handle bookmark = {}; |
78655598 | 173 | struct i915_mmap_offset *mmo, *mn; |
8475355f | 174 | struct i915_lut_handle *lut, *ln; |
155ab883 | 175 | LIST_HEAD(close); |
8475355f | 176 | |
096a42dd | 177 | spin_lock(&obj->lut_lock); |
8475355f CW |
178 | list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { |
179 | struct i915_gem_context *ctx = lut->ctx; | |
8475355f | 180 | |
096a42dd CW |
181 | if (ctx && ctx->file_priv == fpriv) { |
182 | i915_gem_context_get(ctx); | |
183 | list_move(&lut->obj_link, &close); | |
184 | } | |
8475355f | 185 | |
096a42dd CW |
186 | /* Break long locks, and carefully continue on from this spot */ |
187 | if (&ln->obj_link != &obj->lut_list) { | |
188 | list_add_tail(&bookmark.obj_link, &ln->obj_link); | |
189 | if (cond_resched_lock(&obj->lut_lock)) | |
190 | list_safe_reset_next(&bookmark, ln, obj_link); | |
191 | __list_del_entry(&bookmark.obj_link); | |
192 | } | |
155ab883 | 193 | } |
096a42dd | 194 | spin_unlock(&obj->lut_lock); |
155ab883 | 195 | |
cc662126 | 196 | spin_lock(&obj->mmo.lock); |
78655598 | 197 | rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) |
cc662126 | 198 | drm_vma_node_revoke(&mmo->vma_node, file); |
cc662126 AJ |
199 | spin_unlock(&obj->mmo.lock); |
200 | ||
155ab883 CW |
201 | list_for_each_entry_safe(lut, ln, &close, obj_link) { |
202 | struct i915_gem_context *ctx = lut->ctx; | |
203 | struct i915_vma *vma; | |
8475355f | 204 | |
155ab883 CW |
205 | /* |
206 | * We allow the process to have multiple handles to the same | |
8475355f CW |
207 | * vma, in the same fd namespace, by virtue of flink/open. |
208 | */ | |
8475355f | 209 | |
f7ce8639 | 210 | mutex_lock(&ctx->lut_mutex); |
155ab883 CW |
211 | vma = radix_tree_delete(&ctx->handles_vma, lut->handle); |
212 | if (vma) { | |
213 | GEM_BUG_ON(vma->obj != obj); | |
214 | GEM_BUG_ON(!atomic_read(&vma->open_count)); | |
50689771 | 215 | i915_vma_close(vma); |
155ab883 | 216 | } |
f7ce8639 | 217 | mutex_unlock(&ctx->lut_mutex); |
155ab883 CW |
218 | |
219 | i915_gem_context_put(lut->ctx); | |
8475355f | 220 | i915_lut_handle_free(lut); |
c017cf6b | 221 | i915_gem_object_put(obj); |
8475355f | 222 | } |
8475355f CW |
223 | } |
224 | ||
213d5092 | 225 | void __i915_gem_free_object_rcu(struct rcu_head *head) |
c03467ba CW |
226 | { |
227 | struct drm_i915_gem_object *obj = | |
228 | container_of(head, typeof(*obj), rcu); | |
229 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
230 | ||
231 | i915_gem_object_free(obj); | |
232 | ||
233 | GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); | |
234 | atomic_dec(&i915->mm.free_count); | |
235 | } | |
236 | ||
db833785 CW |
237 | static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) |
238 | { | |
239 | /* Skip serialisation and waking the device if known to be not used. */ | |
240 | ||
241 | if (obj->userfault_count) | |
242 | i915_gem_object_release_mmap_gtt(obj); | |
243 | ||
244 | if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { | |
245 | struct i915_mmap_offset *mmo, *mn; | |
246 | ||
247 | i915_gem_object_release_mmap_offset(obj); | |
248 | ||
249 | rbtree_postorder_for_each_entry_safe(mmo, mn, | |
250 | &obj->mmo.offsets, | |
251 | offset) { | |
252 | drm_vma_offset_remove(obj->base.dev->vma_offset_manager, | |
253 | &mmo->vma_node); | |
254 | kfree(mmo); | |
255 | } | |
256 | obj->mmo.offsets = RB_ROOT; | |
257 | } | |
258 | } | |
259 | ||
068396bb TH |
260 | /** |
261 | * __i915_gem_object_pages_fini - Clean up pages use of a gem object | |
262 | * @obj: The gem object to clean up | |
263 | * | |
264 | * This function cleans up usage of the object mm.pages member. It | |
265 | * is intended for backends that need to clean up a gem object in | |
266 | * separate steps and needs to be called when the object is idle before | |
267 | * the object's backing memory is freed. | |
268 | */ | |
269 | void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) | |
8475355f | 270 | { |
2826d447 | 271 | assert_object_held_shared(obj); |
be7612fd | 272 | |
213d5092 TH |
273 | if (!list_empty(&obj->vma.list)) { |
274 | struct i915_vma *vma; | |
275 | ||
213d5092 TH |
276 | spin_lock(&obj->vma.lock); |
277 | while ((vma = list_first_entry_or_null(&obj->vma.list, | |
278 | struct i915_vma, | |
279 | obj_link))) { | |
280 | GEM_BUG_ON(vma->obj != obj); | |
281 | spin_unlock(&obj->vma.lock); | |
8475355f | 282 | |
c03d9826 | 283 | i915_vma_destroy(vma); |
2850748e | 284 | |
2850748e | 285 | spin_lock(&obj->vma.lock); |
213d5092 TH |
286 | } |
287 | spin_unlock(&obj->vma.lock); | |
288 | } | |
2850748e | 289 | |
213d5092 | 290 | __i915_gem_object_free_mmaps(obj); |
2850748e | 291 | |
213d5092 TH |
292 | atomic_set(&obj->mm.pages_pin_count, 0); |
293 | __i915_gem_object_put_pages(obj); | |
294 | GEM_BUG_ON(i915_gem_object_has_pages(obj)); | |
068396bb TH |
295 | } |
296 | ||
297 | void __i915_gem_free_object(struct drm_i915_gem_object *obj) | |
298 | { | |
299 | trace_i915_gem_object_destroy(obj); | |
300 | ||
301 | GEM_BUG_ON(!list_empty(&obj->lut_list)); | |
302 | ||
213d5092 | 303 | bitmap_free(obj->bit_17); |
cc662126 | 304 | |
213d5092 TH |
305 | if (obj->base.import_attach) |
306 | drm_prime_gem_destroy(&obj->base, NULL); | |
8475355f | 307 | |
213d5092 | 308 | drm_gem_free_mmap_offset(&obj->base); |
8475355f | 309 | |
213d5092 TH |
310 | if (obj->ops->release) |
311 | obj->ops->release(obj); | |
8475355f | 312 | |
213d5092 TH |
313 | if (obj->mm.n_placements > 1) |
314 | kfree(obj->mm.placements); | |
0c159ffe | 315 | |
213d5092 TH |
316 | if (obj->shares_resv_from) |
317 | i915_vm_resv_put(obj->shares_resv_from); | |
068396bb TH |
318 | |
319 | __i915_gem_object_fini(obj); | |
213d5092 | 320 | } |
8475355f | 321 | |
213d5092 TH |
322 | static void __i915_gem_free_objects(struct drm_i915_private *i915, |
323 | struct llist_node *freed) | |
324 | { | |
325 | struct drm_i915_gem_object *obj, *on; | |
2459e56f | 326 | |
213d5092 TH |
327 | llist_for_each_entry_safe(obj, on, freed, freed) { |
328 | might_sleep(); | |
329 | if (obj->ops->delayed_free) { | |
330 | obj->ops->delayed_free(obj); | |
331 | continue; | |
332 | } | |
be7612fd | 333 | |
068396bb | 334 | __i915_gem_object_pages_fini(obj); |
213d5092 | 335 | __i915_gem_free_object(obj); |
4d8151ae | 336 | |
c03467ba CW |
337 | /* But keep the pointer alive for RCU-protected lookups */ |
338 | call_rcu(&obj->rcu, __i915_gem_free_object_rcu); | |
deeee411 | 339 | cond_resched(); |
8475355f | 340 | } |
8475355f CW |
341 | } |
342 | ||
343 | void i915_gem_flush_free_objects(struct drm_i915_private *i915) | |
344 | { | |
515b8b7e CW |
345 | struct llist_node *freed = llist_del_all(&i915->mm.free_list); |
346 | ||
347 | if (unlikely(freed)) | |
8475355f | 348 | __i915_gem_free_objects(i915, freed); |
8475355f CW |
349 | } |
350 | ||
351 | static void __i915_gem_free_work(struct work_struct *work) | |
352 | { | |
353 | struct drm_i915_private *i915 = | |
2826d447 | 354 | container_of(work, struct drm_i915_private, mm.free_work); |
8475355f | 355 | |
515b8b7e | 356 | i915_gem_flush_free_objects(i915); |
8475355f CW |
357 | } |
358 | ||
10012620 | 359 | static void i915_gem_free_object(struct drm_gem_object *gem_obj) |
8475355f | 360 | { |
c03467ba | 361 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
8475355f CW |
362 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
363 | ||
8e7cb179 CW |
364 | GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); |
365 | ||
8475355f | 366 | /* |
c03467ba CW |
367 | * Before we free the object, make sure any pure RCU-only |
368 | * read-side critical sections are complete, e.g. | |
369 | * i915_gem_busy_ioctl(). For the corresponding synchronized | |
370 | * lookup see i915_gem_object_lookup_rcu(). | |
8475355f | 371 | */ |
c03467ba CW |
372 | atomic_inc(&i915->mm.free_count); |
373 | ||
8475355f CW |
374 | /* |
375 | * Since we require blocking on struct_mutex to unbind the freed | |
376 | * object from the GPU before releasing resources back to the | |
377 | * system, we can not do that directly from the RCU callback (which may | |
378 | * be a softirq context), but must instead then defer that work onto a | |
379 | * kthread. We use the RCU callback rather than move the freed object | |
380 | * directly onto the work queue so that we can mix between using the | |
381 | * worker and performing frees directly from subsequent allocations for | |
382 | * crude but effective memory throttling. | |
383 | */ | |
213d5092 | 384 | |
8475355f | 385 | if (llist_add(&obj->freed, &i915->mm.free_list)) |
2826d447 | 386 | queue_work(i915->wq, &i915->mm.free_work); |
8475355f CW |
387 | } |
388 | ||
da42104f CW |
389 | void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, |
390 | enum fb_op_origin origin) | |
391 | { | |
392 | struct intel_frontbuffer *front; | |
393 | ||
394 | front = __intel_frontbuffer_get(obj); | |
395 | if (front) { | |
396 | intel_frontbuffer_flush(front, origin); | |
397 | intel_frontbuffer_put(front); | |
398 | } | |
399 | } | |
400 | ||
401 | void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | |
402 | enum fb_op_origin origin) | |
403 | { | |
404 | struct intel_frontbuffer *front; | |
405 | ||
406 | front = __intel_frontbuffer_get(obj); | |
407 | if (front) { | |
408 | intel_frontbuffer_invalidate(front, origin); | |
409 | intel_frontbuffer_put(front); | |
410 | } | |
411 | } | |
412 | ||
5fbc2c2b ID |
413 | static void |
414 | i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) | |
415 | { | |
416 | void *src_map; | |
417 | void *src_ptr; | |
418 | ||
419 | src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT)); | |
420 | ||
421 | src_ptr = src_map + offset_in_page(offset); | |
422 | if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) | |
423 | drm_clflush_virt_range(src_ptr, size); | |
424 | memcpy(dst, src_ptr, size); | |
425 | ||
426 | kunmap_atomic(src_map); | |
427 | } | |
428 | ||
429 | static void | |
430 | i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) | |
431 | { | |
432 | void __iomem *src_map; | |
433 | void __iomem *src_ptr; | |
434 | dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT); | |
435 | ||
436 | src_map = io_mapping_map_wc(&obj->mm.region->iomap, | |
437 | dma - obj->mm.region->region.start, | |
438 | PAGE_SIZE); | |
439 | ||
440 | src_ptr = src_map + offset_in_page(offset); | |
441 | if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size)) | |
442 | memcpy_fromio(dst, src_ptr, size); | |
443 | ||
444 | io_mapping_unmap(src_map); | |
445 | } | |
446 | ||
447 | /** | |
448 | * i915_gem_object_read_from_page - read data from the page of a GEM object | |
449 | * @obj: GEM object to read from | |
450 | * @offset: offset within the object | |
451 | * @dst: buffer to store the read data | |
452 | * @size: size to read | |
453 | * | |
454 | * Reads data from @obj at the specified offset. The requested region to read | |
455 | * from can't cross a page boundary. The caller must ensure that @obj pages | |
456 | * are pinned and that @obj is synced wrt. any related writes. | |
457 | * | |
0af4cbfa | 458 | * Return: %0 on success or -ENODEV if the type of @obj's backing store is |
5fbc2c2b ID |
459 | * unsupported. |
460 | */ | |
461 | int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) | |
462 | { | |
463 | GEM_BUG_ON(offset >= obj->base.size); | |
464 | GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); | |
465 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); | |
466 | ||
467 | if (i915_gem_object_has_struct_page(obj)) | |
468 | i915_gem_object_read_from_page_kmap(obj, offset, dst, size); | |
469 | else if (i915_gem_object_has_iomem(obj)) | |
470 | i915_gem_object_read_from_page_iomap(obj, offset, dst, size); | |
471 | else | |
472 | return -ENODEV; | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
213d5092 TH |
477 | /** |
478 | * i915_gem_object_evictable - Whether object is likely evictable after unbind. | |
479 | * @obj: The object to check | |
480 | * | |
481 | * This function checks whether the object is likely unvictable after unbind. | |
482 | * If the object is not locked when checking, the result is only advisory. | |
483 | * If the object is locked when checking, and the function returns true, | |
484 | * then an eviction should indeed be possible. But since unlocked vma | |
485 | * unpinning and unbinding is currently possible, the object can actually | |
486 | * become evictable even if this function returns false. | |
487 | * | |
488 | * Return: true if the object may be evictable. False otherwise. | |
489 | */ | |
490 | bool i915_gem_object_evictable(struct drm_i915_gem_object *obj) | |
491 | { | |
492 | struct i915_vma *vma; | |
493 | int pin_count = atomic_read(&obj->mm.pages_pin_count); | |
494 | ||
495 | if (!pin_count) | |
496 | return true; | |
497 | ||
498 | spin_lock(&obj->vma.lock); | |
499 | list_for_each_entry(vma, &obj->vma.list, obj_link) { | |
500 | if (i915_vma_is_pinned(vma)) { | |
501 | spin_unlock(&obj->vma.lock); | |
502 | return false; | |
503 | } | |
504 | if (atomic_read(&vma->pages_count)) | |
505 | pin_count--; | |
506 | } | |
507 | spin_unlock(&obj->vma.lock); | |
508 | GEM_WARN_ON(pin_count < 0); | |
509 | ||
510 | return pin_count == 0; | |
511 | } | |
512 | ||
2e53d7c1 TH |
513 | /** |
514 | * i915_gem_object_migratable - Whether the object is migratable out of the | |
515 | * current region. | |
516 | * @obj: Pointer to the object. | |
517 | * | |
518 | * Return: Whether the object is allowed to be resident in other | |
519 | * regions than the current while pages are present. | |
520 | */ | |
521 | bool i915_gem_object_migratable(struct drm_i915_gem_object *obj) | |
522 | { | |
523 | struct intel_memory_region *mr = READ_ONCE(obj->mm.region); | |
524 | ||
525 | if (!mr) | |
526 | return false; | |
527 | ||
528 | return obj->mm.n_placements > 1; | |
529 | } | |
530 | ||
0ff37575 TH |
531 | /** |
532 | * i915_gem_object_has_struct_page - Whether the object is page-backed | |
533 | * @obj: The object to query. | |
534 | * | |
535 | * This function should only be called while the object is locked or pinned, | |
536 | * otherwise the page backing may change under the caller. | |
537 | * | |
538 | * Return: True if page-backed, false otherwise. | |
539 | */ | |
540 | bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | |
541 | { | |
542 | #ifdef CONFIG_LOCKDEP | |
543 | if (IS_DGFX(to_i915(obj->base.dev)) && | |
544 | i915_gem_object_evictable((void __force *)obj)) | |
545 | assert_object_held_shared(obj); | |
546 | #endif | |
547 | return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE; | |
548 | } | |
549 | ||
550 | /** | |
551 | * i915_gem_object_has_iomem - Whether the object is iomem-backed | |
552 | * @obj: The object to query. | |
553 | * | |
554 | * This function should only be called while the object is locked or pinned, | |
555 | * otherwise the iomem backing may change under the caller. | |
556 | * | |
557 | * Return: True if iomem-backed, false otherwise. | |
558 | */ | |
559 | bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj) | |
560 | { | |
561 | #ifdef CONFIG_LOCKDEP | |
562 | if (IS_DGFX(to_i915(obj->base.dev)) && | |
563 | i915_gem_object_evictable((void __force *)obj)) | |
564 | assert_object_held_shared(obj); | |
565 | #endif | |
566 | return obj->mem_flags & I915_BO_FLAG_IOMEM; | |
567 | } | |
568 | ||
b6e913e1 TH |
569 | /** |
570 | * i915_gem_object_can_migrate - Whether an object likely can be migrated | |
571 | * | |
572 | * @obj: The object to migrate | |
573 | * @id: The region intended to migrate to | |
574 | * | |
575 | * Check whether the object backend supports migration to the | |
576 | * given region. Note that pinning may affect the ability to migrate as | |
577 | * returned by this function. | |
578 | * | |
579 | * This function is primarily intended as a helper for checking the | |
580 | * possibility to migrate objects and might be slightly less permissive | |
581 | * than i915_gem_object_migrate() when it comes to objects with the | |
582 | * I915_BO_ALLOC_USER flag set. | |
583 | * | |
584 | * Return: true if migration is possible, false otherwise. | |
585 | */ | |
586 | bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, | |
587 | enum intel_region_id id) | |
588 | { | |
589 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
590 | unsigned int num_allowed = obj->mm.n_placements; | |
591 | struct intel_memory_region *mr; | |
592 | unsigned int i; | |
593 | ||
594 | GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); | |
595 | GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); | |
596 | ||
597 | mr = i915->mm.regions[id]; | |
598 | if (!mr) | |
599 | return false; | |
600 | ||
a7ce8f82 MA |
601 | if (!IS_ALIGNED(obj->base.size, mr->min_page_size)) |
602 | return false; | |
603 | ||
b6e913e1 TH |
604 | if (obj->mm.region == mr) |
605 | return true; | |
606 | ||
607 | if (!i915_gem_object_evictable(obj)) | |
608 | return false; | |
609 | ||
610 | if (!obj->ops->migrate) | |
611 | return false; | |
612 | ||
613 | if (!(obj->flags & I915_BO_ALLOC_USER)) | |
614 | return true; | |
615 | ||
616 | if (num_allowed == 0) | |
617 | return false; | |
618 | ||
619 | for (i = 0; i < num_allowed; ++i) { | |
620 | if (mr == obj->mm.placements[i]) | |
621 | return true; | |
622 | } | |
623 | ||
624 | return false; | |
625 | } | |
626 | ||
627 | /** | |
628 | * i915_gem_object_migrate - Migrate an object to the desired region id | |
629 | * @obj: The object to migrate. | |
630 | * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may | |
631 | * not be successful in evicting other objects to make room for this object. | |
632 | * @id: The region id to migrate to. | |
633 | * | |
634 | * Attempt to migrate the object to the desired memory region. The | |
635 | * object backend must support migration and the object may not be | |
636 | * pinned, (explicitly pinned pages or pinned vmas). The object must | |
637 | * be locked. | |
638 | * On successful completion, the object will have pages pointing to | |
639 | * memory in the new region, but an async migration task may not have | |
640 | * completed yet, and to accomplish that, i915_gem_object_wait_migration() | |
641 | * must be called. | |
642 | * | |
b6e913e1 TH |
643 | * Note: the @ww parameter is not used yet, but included to make sure |
644 | * callers put some effort into obtaining a valid ww ctx if one is | |
645 | * available. | |
646 | * | |
647 | * Return: 0 on success. Negative error code on failure. In particular may | |
648 | * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance | |
649 | * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and | |
650 | * -EBUSY if the object is pinned. | |
651 | */ | |
652 | int i915_gem_object_migrate(struct drm_i915_gem_object *obj, | |
653 | struct i915_gem_ww_ctx *ww, | |
654 | enum intel_region_id id) | |
655 | { | |
656 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
657 | struct intel_memory_region *mr; | |
658 | ||
659 | GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); | |
660 | GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); | |
661 | assert_object_held(obj); | |
662 | ||
663 | mr = i915->mm.regions[id]; | |
664 | GEM_BUG_ON(!mr); | |
665 | ||
f3170ba8 JE |
666 | if (!i915_gem_object_can_migrate(obj, id)) |
667 | return -EINVAL; | |
b6e913e1 | 668 | |
76b62448 JE |
669 | if (!obj->ops->migrate) { |
670 | if (GEM_WARN_ON(obj->mm.region != mr)) | |
671 | return -EINVAL; | |
672 | return 0; | |
673 | } | |
674 | ||
b6e913e1 TH |
675 | return obj->ops->migrate(obj, mr); |
676 | } | |
677 | ||
b3f450d9 MA |
678 | /** |
679 | * i915_gem_object_placement_possible - Check whether the object can be | |
680 | * placed at certain memory type | |
681 | * @obj: Pointer to the object | |
682 | * @type: The memory type to check | |
683 | * | |
684 | * Return: True if the object can be placed in @type. False otherwise. | |
685 | */ | |
686 | bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, | |
687 | enum intel_memory_type type) | |
688 | { | |
689 | unsigned int i; | |
690 | ||
691 | if (!obj->mm.n_placements) { | |
692 | switch (type) { | |
693 | case INTEL_MEMORY_LOCAL: | |
694 | return i915_gem_object_has_iomem(obj); | |
695 | case INTEL_MEMORY_SYSTEM: | |
696 | return i915_gem_object_has_pages(obj); | |
697 | default: | |
698 | /* Ignore stolen for now */ | |
699 | GEM_BUG_ON(1); | |
700 | return false; | |
701 | } | |
702 | } | |
703 | ||
704 | for (i = 0; i < obj->mm.n_placements; i++) { | |
705 | if (obj->mm.placements[i]->type == type) | |
706 | return true; | |
707 | } | |
708 | ||
709 | return false; | |
710 | } | |
711 | ||
efeb3caf MA |
712 | /** |
713 | * i915_gem_object_needs_ccs_pages - Check whether the object requires extra | |
714 | * pages when placed in system-memory, in order to save and later restore the | |
715 | * flat-CCS aux state when the object is moved between local-memory and | |
716 | * system-memory | |
717 | * @obj: Pointer to the object | |
718 | * | |
719 | * Return: True if the object needs extra ccs pages. False otherwise. | |
720 | */ | |
721 | bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) | |
722 | { | |
723 | bool lmem_placement = false; | |
724 | int i; | |
725 | ||
151e0e0f MA |
726 | if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) |
727 | return false; | |
728 | ||
efeb3caf MA |
729 | for (i = 0; i < obj->mm.n_placements; i++) { |
730 | /* Compression is not allowed for the objects with smem placement */ | |
731 | if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) | |
732 | return false; | |
733 | if (!lmem_placement && | |
734 | obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL) | |
735 | lmem_placement = true; | |
736 | } | |
737 | ||
738 | return lmem_placement; | |
739 | } | |
740 | ||
8475355f CW |
741 | void i915_gem_init__objects(struct drm_i915_private *i915) |
742 | { | |
2826d447 | 743 | INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); |
8475355f CW |
744 | } |
745 | ||
c8ad09af | 746 | void i915_objects_module_exit(void) |
103b76ee | 747 | { |
c8ad09af | 748 | kmem_cache_destroy(slab_objects); |
103b76ee CW |
749 | } |
750 | ||
c8ad09af | 751 | int __init i915_objects_module_init(void) |
13f1bfd3 | 752 | { |
c8ad09af DV |
753 | slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); |
754 | if (!slab_objects) | |
13f1bfd3 CW |
755 | return -ENOMEM; |
756 | ||
757 | return 0; | |
758 | } | |
10be98a7 | 759 | |
10012620 TZ |
760 | static const struct drm_gem_object_funcs i915_gem_object_funcs = { |
761 | .free = i915_gem_free_object, | |
762 | .close = i915_gem_close_object, | |
763 | .export = i915_gem_prime_export, | |
764 | }; | |
765 | ||
f6c466b8 ML |
766 | /** |
767 | * i915_gem_object_get_moving_fence - Get the object's moving fence if any | |
768 | * @obj: The object whose moving fence to get. | |
1d7f5e6c | 769 | * @fence: The resulting fence |
f6c466b8 ML |
770 | * |
771 | * A non-signaled moving fence means that there is an async operation | |
772 | * pending on the object that needs to be waited on before setting up | |
773 | * any GPU- or CPU PTEs to the object's pages. | |
774 | * | |
1d7f5e6c | 775 | * Return: Negative error code or 0 for success. |
f6c466b8 | 776 | */ |
1d7f5e6c CK |
777 | int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, |
778 | struct dma_fence **fence) | |
f6c466b8 | 779 | { |
1d7f5e6c CK |
780 | return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL, |
781 | fence); | |
950505ca TH |
782 | } |
783 | ||
f6c466b8 ML |
784 | /** |
785 | * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any | |
786 | * @obj: The object whose moving fence to wait for. | |
787 | * @intr: Whether to wait interruptible. | |
788 | * | |
789 | * If the moving fence signaled without an error, it is detached from the | |
790 | * object and put. | |
791 | * | |
792 | * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, | |
793 | * negative error code if the async operation represented by the | |
794 | * moving fence failed. | |
795 | */ | |
796 | int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, | |
797 | bool intr) | |
798 | { | |
9362a07a | 799 | long ret; |
f6c466b8 ML |
800 | |
801 | assert_object_held(obj); | |
f6c466b8 | 802 | |
9362a07a MA |
803 | ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, |
804 | intr, MAX_SCHEDULE_TIMEOUT); | |
805 | if (!ret) | |
806 | ret = -ETIME; | |
bfe53be2 MA |
807 | else if (ret > 0 && i915_gem_object_has_unknown_state(obj)) |
808 | ret = -EIO; | |
f6c466b8 | 809 | |
9362a07a | 810 | return ret < 0 ? ret : 0; |
f6c466b8 ML |
811 | } |
812 | ||
bfe53be2 MA |
813 | /** |
814 | * i915_gem_object_has_unknown_state - Return true if the object backing pages are | |
815 | * in an unknown_state. This means that userspace must NEVER be allowed to touch | |
816 | * the pages, with either the GPU or CPU. | |
817 | * | |
818 | * ONLY valid to be called after ensuring that all kernel fences have signalled | |
819 | * (in particular the fence for moving/clearing the object). | |
820 | */ | |
821 | bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj) | |
822 | { | |
823 | /* | |
824 | * The below barrier pairs with the dma_fence_signal() in | |
825 | * __memcpy_work(). We should only sample the unknown_state after all | |
826 | * the kernel fences have signalled. | |
827 | */ | |
828 | smp_rmb(); | |
829 | return obj->mm.unknown_state; | |
830 | } | |
831 | ||
10be98a7 CW |
832 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
833 | #include "selftests/huge_gem_object.c" | |
834 | #include "selftests/huge_pages.c" | |
bf74a18c | 835 | #include "selftests/i915_gem_migrate.c" |
10be98a7 CW |
836 | #include "selftests/i915_gem_object.c" |
837 | #include "selftests/i915_gem_coherency.c" | |
838 | #endif |