]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/i915_gem_object.h
drm/amd/display: Check hpd_gpio for NULL before accessing it
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / i915_gem_object.h
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef __I915_GEM_OBJECT_H__
26 #define __I915_GEM_OBJECT_H__
27
28 #include <linux/reservation.h>
29
30 #include <drm/drm_vma_manager.h>
31 #include <drm/drm_gem.h>
32 #include <drm/drmP.h>
33
34 #include <drm/i915_drm.h>
35
36 #include "i915_request.h"
37 #include "i915_selftest.h"
38
39 struct drm_i915_gem_object;
40
41 /*
42 * struct i915_lut_handle tracks the fast lookups from handle to vma used
43 * for execbuf. Although we use a radixtree for that mapping, in order to
44 * remove them as the object or context is closed, we need a secondary list
45 * and a translation entry (i915_lut_handle).
46 */
47 struct i915_lut_handle {
48 struct list_head obj_link;
49 struct list_head ctx_link;
50 struct i915_gem_context *ctx;
51 u32 handle;
52 };
53
54 struct drm_i915_gem_object_ops {
55 unsigned int flags;
56 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
57 #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
58 #define I915_GEM_OBJECT_IS_PROXY BIT(2)
59
60 /* Interface between the GEM object and its backing storage.
61 * get_pages() is called once prior to the use of the associated set
62 * of pages before to binding them into the GTT, and put_pages() is
63 * called after we no longer need them. As we expect there to be
64 * associated cost with migrating pages between the backing storage
65 * and making them available for the GPU (e.g. clflush), we may hold
66 * onto the pages after they are no longer referenced by the GPU
67 * in case they may be used again shortly (for example migrating the
68 * pages to a different memory domain within the GTT). put_pages()
69 * will therefore most likely be called when the object itself is
70 * being released or under memory pressure (where we attempt to
71 * reap pages for the shrinker).
72 */
73 int (*get_pages)(struct drm_i915_gem_object *);
74 void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
75
76 int (*pwrite)(struct drm_i915_gem_object *,
77 const struct drm_i915_gem_pwrite *);
78
79 int (*dmabuf_export)(struct drm_i915_gem_object *);
80 void (*release)(struct drm_i915_gem_object *);
81 };
82
83 struct drm_i915_gem_object {
84 struct drm_gem_object base;
85
86 const struct drm_i915_gem_object_ops *ops;
87
88 /**
89 * @vma_list: List of VMAs backed by this object
90 *
91 * The VMA on this list are ordered by type, all GGTT vma are placed
92 * at the head and all ppGTT vma are placed at the tail. The different
93 * types of GGTT vma are unordered between themselves, use the
94 * @vma_tree (which has a defined order between all VMA) to find an
95 * exact match.
96 */
97 struct list_head vma_list;
98 /**
99 * @vma_tree: Ordered tree of VMAs backed by this object
100 *
101 * All VMA created for this object are placed in the @vma_tree for
102 * fast retrieval via a binary search in i915_vma_instance().
103 * They are also added to @vma_list for easy iteration.
104 */
105 struct rb_root vma_tree;
106
107 /**
108 * @lut_list: List of vma lookup entries in use for this object.
109 *
110 * If this object is closed, we need to remove all of its VMA from
111 * the fast lookup index in associated contexts; @lut_list provides
112 * this translation from object to context->handles_vma.
113 */
114 struct list_head lut_list;
115
116 /** Stolen memory for this object, instead of being backed by shmem. */
117 struct drm_mm_node *stolen;
118 union {
119 struct rcu_head rcu;
120 struct llist_node freed;
121 };
122
123 /**
124 * Whether the object is currently in the GGTT mmap.
125 */
126 unsigned int userfault_count;
127 struct list_head userfault_link;
128
129 struct list_head batch_pool_link;
130 I915_SELFTEST_DECLARE(struct list_head st_link);
131
132 unsigned long flags;
133
134 /**
135 * Have we taken a reference for the object for incomplete GPU
136 * activity?
137 */
138 #define I915_BO_ACTIVE_REF 0
139
140 /*
141 * Is the object to be mapped as read-only to the GPU
142 * Only honoured if hardware has relevant pte bit
143 */
144 unsigned int cache_level:3;
145 unsigned int cache_coherent:2;
146 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
147 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
148 unsigned int cache_dirty:1;
149
150 /**
151 * @read_domains: Read memory domains.
152 *
153 * These monitor which caches contain read/write data related to the
154 * object. When transitioning from one set of domains to another,
155 * the driver is called to ensure that caches are suitably flushed and
156 * invalidated.
157 */
158 u16 read_domains;
159
160 /**
161 * @write_domain: Corresponding unique write memory domain.
162 */
163 u16 write_domain;
164
165 atomic_t frontbuffer_bits;
166 unsigned int frontbuffer_ggtt_origin; /* write once */
167 struct i915_gem_active frontbuffer_write;
168
169 /** Current tiling stride for the object, if it's tiled. */
170 unsigned int tiling_and_stride;
171 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
172 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
173 #define STRIDE_MASK (~TILING_MASK)
174
175 /** Count of VMA actually bound by this object */
176 unsigned int bind_count;
177 unsigned int active_count;
178 /** Count of how many global VMA are currently pinned for use by HW */
179 unsigned int pin_global;
180
181 struct {
182 struct mutex lock; /* protects the pages and their use */
183 atomic_t pages_pin_count;
184
185 struct sg_table *pages;
186 void *mapping;
187
188 /* TODO: whack some of this into the error state */
189 struct i915_page_sizes {
190 /**
191 * The sg mask of the pages sg_table. i.e the mask of
192 * of the lengths for each sg entry.
193 */
194 unsigned int phys;
195
196 /**
197 * The gtt page sizes we are allowed to use given the
198 * sg mask and the supported page sizes. This will
199 * express the smallest unit we can use for the whole
200 * object, as well as the larger sizes we may be able
201 * to use opportunistically.
202 */
203 unsigned int sg;
204
205 /**
206 * The actual gtt page size usage. Since we can have
207 * multiple vma associated with this object we need to
208 * prevent any trampling of state, hence a copy of this
209 * struct also lives in each vma, therefore the gtt
210 * value here should only be read/write through the vma.
211 */
212 unsigned int gtt;
213 } page_sizes;
214
215 I915_SELFTEST_DECLARE(unsigned int page_mask);
216
217 struct i915_gem_object_page_iter {
218 struct scatterlist *sg_pos;
219 unsigned int sg_idx; /* in pages, but 32bit eek! */
220
221 struct radix_tree_root radix;
222 struct mutex lock; /* protects this cache */
223 } get_page;
224
225 /**
226 * Element within i915->mm.unbound_list or i915->mm.bound_list,
227 * locked by i915->mm.obj_lock.
228 */
229 struct list_head link;
230
231 /**
232 * Advice: are the backing pages purgeable?
233 */
234 unsigned int madv:2;
235
236 /**
237 * This is set if the object has been written to since the
238 * pages were last acquired.
239 */
240 bool dirty:1;
241
242 /**
243 * This is set if the object has been pinned due to unknown
244 * swizzling.
245 */
246 bool quirked:1;
247 } mm;
248
249 /** Breadcrumb of last rendering to the buffer.
250 * There can only be one writer, but we allow for multiple readers.
251 * If there is a writer that necessarily implies that all other
252 * read requests are complete - but we may only be lazily clearing
253 * the read requests. A read request is naturally the most recent
254 * request on a ring, so we may have two different write and read
255 * requests on one ring where the write request is older than the
256 * read request. This allows for the CPU to read from an active
257 * buffer by only waiting for the write to complete.
258 */
259 struct reservation_object *resv;
260
261 /** References from framebuffers, locks out tiling changes. */
262 unsigned int framebuffer_references;
263
264 /** Record of address bit 17 of each page at last unbind. */
265 unsigned long *bit_17;
266
267 union {
268 struct i915_gem_userptr {
269 uintptr_t ptr;
270
271 struct i915_mm_struct *mm;
272 struct i915_mmu_object *mmu_object;
273 struct work_struct *work;
274 } userptr;
275
276 unsigned long scratch;
277
278 void *gvt_info;
279 };
280
281 /** for phys allocated objects */
282 struct drm_dma_handle *phys_handle;
283
284 struct reservation_object __builtin_resv;
285 };
286
287 static inline struct drm_i915_gem_object *
288 to_intel_bo(struct drm_gem_object *gem)
289 {
290 /* Assert that to_intel_bo(NULL) == NULL */
291 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
292
293 return container_of(gem, struct drm_i915_gem_object, base);
294 }
295
296 /**
297 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
298 * @filp: DRM file private date
299 * @handle: userspace handle
300 *
301 * Returns:
302 *
303 * A pointer to the object named by the handle if such exists on @filp, NULL
304 * otherwise. This object is only valid whilst under the RCU read lock, and
305 * note carefully the object may be in the process of being destroyed.
306 */
307 static inline struct drm_i915_gem_object *
308 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
309 {
310 #ifdef CONFIG_LOCKDEP
311 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
312 #endif
313 return idr_find(&file->object_idr, handle);
314 }
315
316 static inline struct drm_i915_gem_object *
317 i915_gem_object_lookup(struct drm_file *file, u32 handle)
318 {
319 struct drm_i915_gem_object *obj;
320
321 rcu_read_lock();
322 obj = i915_gem_object_lookup_rcu(file, handle);
323 if (obj && !kref_get_unless_zero(&obj->base.refcount))
324 obj = NULL;
325 rcu_read_unlock();
326
327 return obj;
328 }
329
330 __deprecated
331 extern struct drm_gem_object *
332 drm_gem_object_lookup(struct drm_file *file, u32 handle);
333
334 __attribute__((nonnull))
335 static inline struct drm_i915_gem_object *
336 i915_gem_object_get(struct drm_i915_gem_object *obj)
337 {
338 drm_gem_object_get(&obj->base);
339 return obj;
340 }
341
342 __attribute__((nonnull))
343 static inline void
344 i915_gem_object_put(struct drm_i915_gem_object *obj)
345 {
346 __drm_gem_object_put(&obj->base);
347 }
348
349 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
350 {
351 reservation_object_lock(obj->resv, NULL);
352 }
353
354 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
355 {
356 reservation_object_unlock(obj->resv);
357 }
358
359 static inline void
360 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
361 {
362 obj->base.vma_node.readonly = true;
363 }
364
365 static inline bool
366 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
367 {
368 return obj->base.vma_node.readonly;
369 }
370
371 static inline bool
372 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
373 {
374 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
375 }
376
377 static inline bool
378 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
379 {
380 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
381 }
382
383 static inline bool
384 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
385 {
386 return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
387 }
388
389 static inline bool
390 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
391 {
392 return obj->active_count;
393 }
394
395 static inline bool
396 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
397 {
398 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
399 }
400
401 static inline void
402 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
403 {
404 lockdep_assert_held(&obj->base.dev->struct_mutex);
405 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
406 }
407
408 static inline void
409 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
410 {
411 lockdep_assert_held(&obj->base.dev->struct_mutex);
412 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
413 }
414
415 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
416
417 static inline bool
418 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
419 {
420 return READ_ONCE(obj->framebuffer_references);
421 }
422
423 static inline unsigned int
424 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
425 {
426 return obj->tiling_and_stride & TILING_MASK;
427 }
428
429 static inline bool
430 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
431 {
432 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
433 }
434
435 static inline unsigned int
436 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
437 {
438 return obj->tiling_and_stride & STRIDE_MASK;
439 }
440
441 static inline unsigned int
442 i915_gem_tile_height(unsigned int tiling)
443 {
444 GEM_BUG_ON(!tiling);
445 return tiling == I915_TILING_Y ? 32 : 8;
446 }
447
448 static inline unsigned int
449 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
450 {
451 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
452 }
453
454 static inline unsigned int
455 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
456 {
457 return (i915_gem_object_get_stride(obj) *
458 i915_gem_object_get_tile_height(obj));
459 }
460
461 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
462 unsigned int tiling, unsigned int stride);
463
464 static inline struct intel_engine_cs *
465 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
466 {
467 struct intel_engine_cs *engine = NULL;
468 struct dma_fence *fence;
469
470 rcu_read_lock();
471 fence = reservation_object_get_excl_rcu(obj->resv);
472 rcu_read_unlock();
473
474 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
475 engine = to_request(fence)->engine;
476 dma_fence_put(fence);
477
478 return engine;
479 }
480
481 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
482 unsigned int cache_level);
483 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
484
485 #endif
486