]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / drivers / gpu / drm / vmwgfx / vmwgfx_bo.h
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef VMWGFX_BO_H
29 #define VMWGFX_BO_H
30
31 #include "device_include/svga_reg.h"
32
33 #include <drm/ttm/ttm_bo.h>
34 #include <drm/ttm/ttm_placement.h>
35
36 #include <linux/rbtree_types.h>
37 #include <linux/types.h>
38
39 struct vmw_bo_dirty;
40 struct vmw_fence_obj;
41 struct vmw_private;
42 struct vmw_resource;
43
44 enum vmw_bo_domain {
45 VMW_BO_DOMAIN_SYS = BIT(0),
46 VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1),
47 VMW_BO_DOMAIN_VRAM = BIT(2),
48 VMW_BO_DOMAIN_GMR = BIT(3),
49 VMW_BO_DOMAIN_MOB = BIT(4),
50 };
51
52 struct vmw_bo_params {
53 u32 domain;
54 u32 busy_domain;
55 enum ttm_bo_type bo_type;
56 size_t size;
57 bool pin;
58 };
59
60 /**
61 * struct vmw_bo - TTM buffer object with vmwgfx additions
62 * @tbo: The TTM buffer object
63 * @placement: The preferred placement for this buffer object
64 * @places: The chosen places for the preferred placement.
65 * @busy_places: Chosen busy places for the preferred placement
66 * @map: Kmap object for semi-persistent mappings
67 * @res_tree: RB tree of resources using this buffer object as a backing MOB
68 * @res_prios: Eviction priority counts for attached resources
69 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
70 * increased. May be decreased without reservation.
71 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
72 * @dirty: structure for user-space dirty-tracking
73 */
74 struct vmw_bo {
75 struct ttm_buffer_object tbo;
76
77 struct ttm_placement placement;
78 struct ttm_place places[5];
79 struct ttm_place busy_places[5];
80
81 /* Protected by reservation */
82 struct ttm_bo_kmap_obj map;
83
84 struct rb_root res_tree;
85 u32 res_prios[TTM_MAX_BO_PRIORITY];
86
87 atomic_t cpu_writers;
88 /* Not ref-counted. Protected by binding_mutex */
89 struct vmw_resource *dx_query_ctx;
90 struct vmw_bo_dirty *dirty;
91 };
92
93 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
94 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
95
96 int vmw_bo_create(struct vmw_private *dev_priv,
97 struct vmw_bo_params *params,
98 struct vmw_bo **p_bo);
99
100 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
101 struct drm_file *file_priv);
102
103 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
104 struct vmw_bo *buf,
105 bool interruptible);
106 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
107 struct vmw_bo *buf,
108 bool interruptible);
109 int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
110 struct vmw_bo *bo,
111 bool interruptible);
112 void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
113 int vmw_bo_unpin(struct vmw_private *vmw_priv,
114 struct vmw_bo *bo,
115 bool interruptible);
116
117 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
118 SVGAGuestPtr *ptr);
119 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
120 struct drm_file *file_priv);
121 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
122 struct vmw_fence_obj *fence);
123
124 void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
125 void vmw_bo_unmap(struct vmw_bo *vbo);
126
127 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
128 struct ttm_resource *mem);
129 void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
130
131 int vmw_user_bo_lookup(struct drm_file *filp,
132 u32 handle,
133 struct vmw_bo **out);
134 /**
135 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
136 * according to attached resources
137 * @vbo: The struct vmw_bo
138 */
139 static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
140 {
141 int i = ARRAY_SIZE(vbo->res_prios);
142
143 while (i--) {
144 if (vbo->res_prios[i]) {
145 vbo->tbo.priority = i;
146 return;
147 }
148 }
149
150 vbo->tbo.priority = 3;
151 }
152
153 /**
154 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
155 * eviction priority
156 * @vbo: The struct vmw_bo
157 * @prio: The resource priority
158 *
159 * After being notified, the code assigns the highest resource eviction priority
160 * to the backing buffer object (mob).
161 */
162 static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
163 {
164 if (vbo->res_prios[prio]++ == 0)
165 vmw_bo_prio_adjust(vbo);
166 }
167
168 /**
169 * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
170 * priority being removed
171 * @vbo: The struct vmw_bo
172 * @prio: The resource priority
173 *
174 * After being notified, the code assigns the highest resource eviction priority
175 * to the backing buffer object (mob).
176 */
177 static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
178 {
179 if (--vbo->res_prios[prio] == 0)
180 vmw_bo_prio_adjust(vbo);
181 }
182
183 static inline void vmw_bo_unreference(struct vmw_bo **buf)
184 {
185 struct vmw_bo *tmp_buf = *buf;
186
187 *buf = NULL;
188 if (tmp_buf)
189 ttm_bo_put(&tmp_buf->tbo);
190 }
191
192 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
193 {
194 ttm_bo_get(&buf->tbo);
195 return buf;
196 }
197
198 static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
199 {
200 drm_gem_object_get(&vbo->tbo.base);
201 return vbo;
202 }
203
204 static inline void vmw_user_bo_unref(struct vmw_bo **buf)
205 {
206 struct vmw_bo *tmp_buf = *buf;
207
208 *buf = NULL;
209 if (tmp_buf)
210 drm_gem_object_put(&tmp_buf->tbo.base);
211 }
212
213 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
214 {
215 return container_of((gobj), struct vmw_bo, tbo.base);
216 }
217
218 #endif // VMWGFX_BO_H