]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/i915_gem_object.h
Merge tag 'pwm/for-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / i915_gem_object.h
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_GEM_OBJECT_H__
26 #define __I915_GEM_OBJECT_H__
27
28 #include <linux/reservation.h>
29
30 #include <drm/drm_vma_manager.h>
31 #include <drm/drm_gem.h>
32 #include <drm/drm_file.h>
33 #include <drm/drm_device.h>
34
35 #include <drm/i915_drm.h>
36
37 #include "i915_request.h"
38 #include "i915_selftest.h"
39
40 struct drm_i915_gem_object;
41
42 /*
43 * struct i915_lut_handle tracks the fast lookups from handle to vma used
44 * for execbuf. Although we use a radixtree for that mapping, in order to
45 * remove them as the object or context is closed, we need a secondary list
46 * and a translation entry (i915_lut_handle).
47 */
48 struct i915_lut_handle {
49 struct list_head obj_link;
50 struct list_head ctx_link;
51 struct i915_gem_context *ctx;
52 u32 handle;
53 };
54
55 struct drm_i915_gem_object_ops {
56 unsigned int flags;
57 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
58 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
59 #define I915_GEM_OBJECT_IS_PROXY BIT(2)
60 #define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
61
62 /* Interface between the GEM object and its backing storage.
63 * get_pages() is called once prior to the use of the associated set
64 * of pages before to binding them into the GTT, and put_pages() is
65 * called after we no longer need them. As we expect there to be
66 * associated cost with migrating pages between the backing storage
67 * and making them available for the GPU (e.g. clflush), we may hold
68 * onto the pages after they are no longer referenced by the GPU
69 * in case they may be used again shortly (for example migrating the
70 * pages to a different memory domain within the GTT). put_pages()
71 * will therefore most likely be called when the object itself is
72 * being released or under memory pressure (where we attempt to
73 * reap pages for the shrinker).
74 */
75 int (*get_pages)(struct drm_i915_gem_object *);
76 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
77
78 int (*pwrite)(struct drm_i915_gem_object *,
79 const struct drm_i915_gem_pwrite *);
80
81 int (*dmabuf_export)(struct drm_i915_gem_object *);
82 void (*release)(struct drm_i915_gem_object *);
83 };
84
85 struct drm_i915_gem_object {
86 struct drm_gem_object base;
87
88 const struct drm_i915_gem_object_ops *ops;
89
90 struct {
91 /**
92 * @vma.lock: protect the list/tree of vmas
93 */
94 spinlock_t lock;
95
96 /**
97 * @vma.list: List of VMAs backed by this object
98 *
99 * The VMA on this list are ordered by type, all GGTT vma are
100 * placed at the head and all ppGTT vma are placed at the tail.
101 * The different types of GGTT vma are unordered between
102 * themselves, use the @vma.tree (which has a defined order
103 * between all VMA) to quickly find an exact match.
104 */
105 struct list_head list;
106
107 /**
108 * @vma.tree: Ordered tree of VMAs backed by this object
109 *
110 * All VMA created for this object are placed in the @vma.tree
111 * for fast retrieval via a binary search in
112 * i915_vma_instance(). They are also added to @vma.list for
113 * easy iteration.
114 */
115 struct rb_root tree;
116 } vma;
117
118 /**
119 * @lut_list: List of vma lookup entries in use for this object.
120 *
121 * If this object is closed, we need to remove all of its VMA from
122 * the fast lookup index in associated contexts; @lut_list provides
123 * this translation from object to context->handles_vma.
124 */
125 struct list_head lut_list;
126
127 /** Stolen memory for this object, instead of being backed by shmem. */
128 struct drm_mm_node *stolen;
129 union {
130 struct rcu_head rcu;
131 struct llist_node freed;
132 };
133
134 /**
135 * Whether the object is currently in the GGTT mmap.
136 */
137 unsigned int userfault_count;
138 struct list_head userfault_link;
139
140 struct list_head batch_pool_link;
141 I915_SELFTEST_DECLARE(struct list_head st_link);
142
143 unsigned long flags;
144
145 /**
146 * Have we taken a reference for the object for incomplete GPU
147 * activity?
148 */
149 #define I915_BO_ACTIVE_REF 0
150
151 /*
152 * Is the object to be mapped as read-only to the GPU
153 * Only honoured if hardware has relevant pte bit
154 */
155 unsigned int cache_level:3;
156 unsigned int cache_coherent:2;
157 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
158 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
159 unsigned int cache_dirty:1;
160
161 /**
162 * @read_domains: Read memory domains.
163 *
164 * These monitor which caches contain read/write data related to the
165 * object. When transitioning from one set of domains to another,
166 * the driver is called to ensure that caches are suitably flushed and
167 * invalidated.
168 */
169 u16 read_domains;
170
171 /**
172 * @write_domain: Corresponding unique write memory domain.
173 */
174 u16 write_domain;
175
176 atomic_t frontbuffer_bits;
177 unsigned int frontbuffer_ggtt_origin; /* write once */
178 struct i915_active_request frontbuffer_write;
179
180 /** Current tiling stride for the object, if it's tiled. */
181 unsigned int tiling_and_stride;
182 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
183 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
184 #define STRIDE_MASK (~TILING_MASK)
185
186 /** Count of VMA actually bound by this object */
187 unsigned int bind_count;
188 unsigned int active_count;
189 /** Count of how many global VMA are currently pinned for use by HW */
190 unsigned int pin_global;
191
192 struct {
193 struct mutex lock; /* protects the pages and their use */
194 atomic_t pages_pin_count;
195
196 struct sg_table *pages;
197 void *mapping;
198
199 /* TODO: whack some of this into the error state */
200 struct i915_page_sizes {
201 /**
202 * The sg mask of the pages sg_table. i.e the mask of
203 * of the lengths for each sg entry.
204 */
205 unsigned int phys;
206
207 /**
208 * The gtt page sizes we are allowed to use given the
209 * sg mask and the supported page sizes. This will
210 * express the smallest unit we can use for the whole
211 * object, as well as the larger sizes we may be able
212 * to use opportunistically.
213 */
214 unsigned int sg;
215
216 /**
217 * The actual gtt page size usage. Since we can have
218 * multiple vma associated with this object we need to
219 * prevent any trampling of state, hence a copy of this
220 * struct also lives in each vma, therefore the gtt
221 * value here should only be read/write through the vma.
222 */
223 unsigned int gtt;
224 } page_sizes;
225
226 I915_SELFTEST_DECLARE(unsigned int page_mask);
227
228 struct i915_gem_object_page_iter {
229 struct scatterlist *sg_pos;
230 unsigned int sg_idx; /* in pages, but 32bit eek! */
231
232 struct radix_tree_root radix;
233 struct mutex lock; /* protects this cache */
234 } get_page;
235
236 /**
237 * Element within i915->mm.unbound_list or i915->mm.bound_list,
238 * locked by i915->mm.obj_lock.
239 */
240 struct list_head link;
241
242 /**
243 * Advice: are the backing pages purgeable?
244 */
245 unsigned int madv:2;
246
247 /**
248 * This is set if the object has been written to since the
249 * pages were last acquired.
250 */
251 bool dirty:1;
252
253 /**
254 * This is set if the object has been pinned due to unknown
255 * swizzling.
256 */
257 bool quirked:1;
258 } mm;
259
260 /** Breadcrumb of last rendering to the buffer.
261 * There can only be one writer, but we allow for multiple readers.
262 * If there is a writer that necessarily implies that all other
263 * read requests are complete - but we may only be lazily clearing
264 * the read requests. A read request is naturally the most recent
265 * request on a ring, so we may have two different write and read
266 * requests on one ring where the write request is older than the
267 * read request. This allows for the CPU to read from an active
268 * buffer by only waiting for the write to complete.
269 */
270 struct reservation_object *resv;
271
272 /** References from framebuffers, locks out tiling changes. */
273 unsigned int framebuffer_references;
274
275 /** Record of address bit 17 of each page at last unbind. */
276 unsigned long *bit_17;
277
278 union {
279 struct i915_gem_userptr {
280 uintptr_t ptr;
281
282 struct i915_mm_struct *mm;
283 struct i915_mmu_object *mmu_object;
284 struct work_struct *work;
285 } userptr;
286
287 unsigned long scratch;
288
289 void *gvt_info;
290 };
291
292 /** for phys allocated objects */
293 struct drm_dma_handle *phys_handle;
294
295 struct reservation_object __builtin_resv;
296 };
297
298 static inline struct drm_i915_gem_object *
299 to_intel_bo(struct drm_gem_object *gem)
300 {
301 /* Assert that to_intel_bo(NULL) == NULL */
302 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
303
304 return container_of(gem, struct drm_i915_gem_object, base);
305 }
306
307 struct drm_i915_gem_object *i915_gem_object_alloc(void);
308 void i915_gem_object_free(struct drm_i915_gem_object *obj);
309
310 /**
311 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
312 * @filp: DRM file private date
313 * @handle: userspace handle
314 *
315 * Returns:
316 *
317 * A pointer to the object named by the handle if such exists on @filp, NULL
318 * otherwise. This object is only valid whilst under the RCU read lock, and
319 * note carefully the object may be in the process of being destroyed.
320 */
321 static inline struct drm_i915_gem_object *
322 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
323 {
324 #ifdef CONFIG_LOCKDEP
325 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
326 #endif
327 return idr_find(&file->object_idr, handle);
328 }
329
330 static inline struct drm_i915_gem_object *
331 i915_gem_object_lookup(struct drm_file *file, u32 handle)
332 {
333 struct drm_i915_gem_object *obj;
334
335 rcu_read_lock();
336 obj = i915_gem_object_lookup_rcu(file, handle);
337 if (obj && !kref_get_unless_zero(&obj->base.refcount))
338 obj = NULL;
339 rcu_read_unlock();
340
341 return obj;
342 }
343
344 __deprecated
345 extern struct drm_gem_object *
346 drm_gem_object_lookup(struct drm_file *file, u32 handle);
347
348 __attribute__((nonnull))
349 static inline struct drm_i915_gem_object *
350 i915_gem_object_get(struct drm_i915_gem_object *obj)
351 {
352 drm_gem_object_get(&obj->base);
353 return obj;
354 }
355
356 __attribute__((nonnull))
357 static inline void
358 i915_gem_object_put(struct drm_i915_gem_object *obj)
359 {
360 __drm_gem_object_put(&obj->base);
361 }
362
363 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
364 {
365 reservation_object_lock(obj->resv, NULL);
366 }
367
368 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
369 {
370 reservation_object_unlock(obj->resv);
371 }
372
373 static inline void
374 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
375 {
376 obj->base.vma_node.readonly = true;
377 }
378
379 static inline bool
380 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
381 {
382 return obj->base.vma_node.readonly;
383 }
384
385 static inline bool
386 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
387 {
388 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
389 }
390
391 static inline bool
392 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
393 {
394 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
395 }
396
397 static inline bool
398 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
399 {
400 return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
401 }
402
403 static inline bool
404 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
405 {
406 return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
407 }
408
409 static inline bool
410 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
411 {
412 return obj->active_count;
413 }
414
415 static inline bool
416 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
417 {
418 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
419 }
420
421 static inline void
422 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
423 {
424 lockdep_assert_held(&obj->base.dev->struct_mutex);
425 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
426 }
427
428 static inline void
429 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
430 {
431 lockdep_assert_held(&obj->base.dev->struct_mutex);
432 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
433 }
434
435 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
436
437 static inline bool
438 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
439 {
440 return READ_ONCE(obj->framebuffer_references);
441 }
442
443 static inline unsigned int
444 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
445 {
446 return obj->tiling_and_stride & TILING_MASK;
447 }
448
449 static inline bool
450 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
451 {
452 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
453 }
454
455 static inline unsigned int
456 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
457 {
458 return obj->tiling_and_stride & STRIDE_MASK;
459 }
460
461 static inline unsigned int
462 i915_gem_tile_height(unsigned int tiling)
463 {
464 GEM_BUG_ON(!tiling);
465 return tiling == I915_TILING_Y ? 32 : 8;
466 }
467
468 static inline unsigned int
469 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
470 {
471 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
472 }
473
474 static inline unsigned int
475 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
476 {
477 return (i915_gem_object_get_stride(obj) *
478 i915_gem_object_get_tile_height(obj));
479 }
480
481 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
482 unsigned int tiling, unsigned int stride);
483
484 static inline struct intel_engine_cs *
485 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
486 {
487 struct intel_engine_cs *engine = NULL;
488 struct dma_fence *fence;
489
490 rcu_read_lock();
491 fence = reservation_object_get_excl_rcu(obj->resv);
492 rcu_read_unlock();
493
494 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
495 engine = to_request(fence)->engine;
496 dma_fence_put(fence);
497
498 return engine;
499 }
500
501 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
502 unsigned int cache_level);
503 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
504
505 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
506 struct sg_table *pages,
507 bool needs_clflush);
508
509 #endif