]>
Commit | Line | Data |
---|---|---|
8038d2a9 | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
d991ef03 JB |
2 | /************************************************************************** |
3 | * | |
09881d29 | 4 | * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA |
d991ef03 JB |
5 | * All Rights Reserved. |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
27 | **************************************************************************/ | |
28 | ||
09881d29 | 29 | #include "vmwgfx_bo.h" |
d991ef03 | 30 | #include "vmwgfx_drv.h" |
e9431ea5 TH |
31 | |
32 | ||
09881d29 | 33 | #include <drm/ttm/ttm_placement.h> |
e9431ea5 | 34 | |
668b2066 ZR |
35 | static void vmw_bo_release(struct vmw_bo *vbo) |
36 | { | |
91398b41 ZR |
37 | WARN_ON(vbo->tbo.base.funcs && |
38 | kref_read(&vbo->tbo.base.refcount) != 0); | |
668b2066 ZR |
39 | vmw_bo_unmap(vbo); |
40 | drm_gem_object_release(&vbo->tbo.base); | |
41 | } | |
42 | ||
6b2e8aa4 | 43 | /** |
09881d29 | 44 | * vmw_bo_free - vmw_bo destructor |
6b2e8aa4 ZR |
45 | * |
46 | * @bo: Pointer to the embedded struct ttm_buffer_object | |
47 | */ | |
09881d29 | 48 | static void vmw_bo_free(struct ttm_buffer_object *bo) |
6b2e8aa4 | 49 | { |
09881d29 | 50 | struct vmw_bo *vbo = to_vmw_bo(&bo->base); |
6b2e8aa4 | 51 | |
09881d29 ZR |
52 | WARN_ON(vbo->dirty); |
53 | WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); | |
668b2066 | 54 | vmw_bo_release(vbo); |
09881d29 | 55 | kfree(vbo); |
6b2e8aa4 ZR |
56 | } |
57 | ||
d991ef03 | 58 | /** |
f1d34bfd | 59 | * vmw_bo_pin_in_placement - Validate a buffer to placement. |
d991ef03 | 60 | * |
b37a6b9a TH |
61 | * @dev_priv: Driver private. |
62 | * @buf: DMA buffer to move. | |
459d0fa7 | 63 | * @placement: The placement to pin it. |
b37a6b9a | 64 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
65 | * Return: Zero on success, Negative error code on failure. In particular |
66 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 67 | */ |
6703e28f ZR |
68 | static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, |
69 | struct vmw_bo *buf, | |
70 | struct ttm_placement *placement, | |
71 | bool interruptible) | |
d991ef03 | 72 | { |
19be5570 | 73 | struct ttm_operation_ctx ctx = {interruptible, false }; |
668b2066 | 74 | struct ttm_buffer_object *bo = &buf->tbo; |
d991ef03 JB |
75 | int ret; |
76 | ||
c0951b79 | 77 | vmw_execbuf_release_pinned_bo(dev_priv); |
e2fa3a76 | 78 | |
dfd5e50e | 79 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
d991ef03 JB |
80 | if (unlikely(ret != 0)) |
81 | goto err; | |
82 | ||
f87c1f0b | 83 | ret = ttm_bo_validate(bo, placement, &ctx); |
459d0fa7 TH |
84 | if (!ret) |
85 | vmw_bo_pin_reserved(buf, true); | |
d991ef03 JB |
86 | |
87 | ttm_bo_unreserve(bo); | |
d991ef03 | 88 | err: |
d991ef03 JB |
89 | return ret; |
90 | } | |
91 | ||
e9431ea5 | 92 | |
d991ef03 | 93 | /** |
f1d34bfd | 94 | * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr. |
d991ef03 | 95 | * |
459d0fa7 TH |
96 | * This function takes the reservation_sem in write mode. |
97 | * Flushes and unpins the query bo to avoid failures. | |
d991ef03 JB |
98 | * |
99 | * @dev_priv: Driver private. | |
100 | * @buf: DMA buffer to move. | |
d991ef03 | 101 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
102 | * Return: Zero on success, Negative error code on failure. In particular |
103 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 104 | */ |
f1d34bfd | 105 | int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, |
09881d29 | 106 | struct vmw_bo *buf, |
f1d34bfd | 107 | bool interruptible) |
d991ef03 | 108 | { |
19be5570 | 109 | struct ttm_operation_ctx ctx = {interruptible, false }; |
668b2066 | 110 | struct ttm_buffer_object *bo = &buf->tbo; |
d991ef03 JB |
111 | int ret; |
112 | ||
459d0fa7 | 113 | vmw_execbuf_release_pinned_bo(dev_priv); |
e2fa3a76 | 114 | |
dfd5e50e | 115 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
d991ef03 JB |
116 | if (unlikely(ret != 0)) |
117 | goto err; | |
118 | ||
39985eea ZR |
119 | vmw_bo_placement_set(buf, |
120 | VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, | |
121 | VMW_BO_DOMAIN_GMR); | |
122 | ret = ttm_bo_validate(bo, &buf->placement, &ctx); | |
d991ef03 | 123 | if (likely(ret == 0) || ret == -ERESTARTSYS) |
459d0fa7 | 124 | goto out_unreserve; |
d991ef03 | 125 | |
39985eea ZR |
126 | vmw_bo_placement_set(buf, |
127 | VMW_BO_DOMAIN_VRAM, | |
128 | VMW_BO_DOMAIN_VRAM); | |
129 | ret = ttm_bo_validate(bo, &buf->placement, &ctx); | |
d991ef03 | 130 | |
459d0fa7 TH |
131 | out_unreserve: |
132 | if (!ret) | |
133 | vmw_bo_pin_reserved(buf, true); | |
d991ef03 | 134 | |
d991ef03 JB |
135 | ttm_bo_unreserve(bo); |
136 | err: | |
d991ef03 JB |
137 | return ret; |
138 | } | |
139 | ||
e9431ea5 | 140 | |
d991ef03 | 141 | /** |
f1d34bfd | 142 | * vmw_bo_pin_in_vram - Move a buffer to vram. |
d991ef03 | 143 | * |
459d0fa7 TH |
144 | * This function takes the reservation_sem in write mode. |
145 | * Flushes and unpins the query bo to avoid failures. | |
d991ef03 JB |
146 | * |
147 | * @dev_priv: Driver private. | |
148 | * @buf: DMA buffer to move. | |
d991ef03 | 149 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
150 | * Return: Zero on success, Negative error code on failure. In particular |
151 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 152 | */ |
f1d34bfd | 153 | int vmw_bo_pin_in_vram(struct vmw_private *dev_priv, |
09881d29 | 154 | struct vmw_bo *buf, |
f1d34bfd | 155 | bool interruptible) |
d991ef03 | 156 | { |
f1d34bfd TH |
157 | return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement, |
158 | interruptible); | |
d991ef03 JB |
159 | } |
160 | ||
e9431ea5 | 161 | |
d991ef03 | 162 | /** |
f1d34bfd | 163 | * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram. |
d991ef03 | 164 | * |
459d0fa7 TH |
165 | * This function takes the reservation_sem in write mode. |
166 | * Flushes and unpins the query bo to avoid failures. | |
d991ef03 JB |
167 | * |
168 | * @dev_priv: Driver private. | |
459d0fa7 | 169 | * @buf: DMA buffer to pin. |
d991ef03 | 170 | * @interruptible: Use interruptible wait. |
e9431ea5 TH |
171 | * Return: Zero on success, Negative error code on failure. In particular |
172 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 173 | */ |
f1d34bfd | 174 | int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, |
09881d29 | 175 | struct vmw_bo *buf, |
f1d34bfd | 176 | bool interruptible) |
d991ef03 | 177 | { |
19be5570 | 178 | struct ttm_operation_ctx ctx = {interruptible, false }; |
668b2066 | 179 | struct ttm_buffer_object *bo = &buf->tbo; |
d991ef03 JB |
180 | int ret = 0; |
181 | ||
459d0fa7 | 182 | vmw_execbuf_release_pinned_bo(dev_priv); |
dfd5e50e | 183 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
d991ef03 JB |
184 | if (unlikely(ret != 0)) |
185 | goto err_unlock; | |
186 | ||
459d0fa7 TH |
187 | /* |
188 | * Is this buffer already in vram but not at the start of it? | |
189 | * In that case, evict it first because TTM isn't good at handling | |
190 | * that situation. | |
191 | */ | |
d3116756 | 192 | if (bo->resource->mem_type == TTM_PL_VRAM && |
e3c92eb4 | 193 | bo->resource->start < PFN_UP(bo->resource->size) && |
d3116756 | 194 | bo->resource->start > 0 && |
668b2066 | 195 | buf->tbo.pin_count == 0) { |
19be5570 | 196 | ctx.interruptible = false; |
39985eea ZR |
197 | vmw_bo_placement_set(buf, |
198 | VMW_BO_DOMAIN_SYS, | |
199 | VMW_BO_DOMAIN_SYS); | |
200 | (void)ttm_bo_validate(bo, &buf->placement, &ctx); | |
19be5570 | 201 | } |
d991ef03 | 202 | |
39985eea ZR |
203 | vmw_bo_placement_set(buf, |
204 | VMW_BO_DOMAIN_VRAM, | |
205 | VMW_BO_DOMAIN_VRAM); | |
206 | buf->places[0].lpfn = PFN_UP(bo->resource->size); | |
207 | ret = ttm_bo_validate(bo, &buf->placement, &ctx); | |
d991ef03 | 208 | |
459d0fa7 | 209 | /* For some reason we didn't end up at the start of vram */ |
d3116756 | 210 | WARN_ON(ret == 0 && bo->resource->start != 0); |
459d0fa7 TH |
211 | if (!ret) |
212 | vmw_bo_pin_reserved(buf, true); | |
d991ef03 JB |
213 | |
214 | ttm_bo_unreserve(bo); | |
215 | err_unlock: | |
d991ef03 JB |
216 | |
217 | return ret; | |
218 | } | |
219 | ||
e9431ea5 | 220 | |
d991ef03 | 221 | /** |
f1d34bfd | 222 | * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer. |
d991ef03 | 223 | * |
459d0fa7 | 224 | * This function takes the reservation_sem in write mode. |
d991ef03 JB |
225 | * |
226 | * @dev_priv: Driver private. | |
227 | * @buf: DMA buffer to unpin. | |
228 | * @interruptible: Use interruptible wait. | |
e9431ea5 TH |
229 | * Return: Zero on success, Negative error code on failure. In particular |
230 | * -ERESTARTSYS if interrupted by a signal | |
d991ef03 | 231 | */ |
f1d34bfd | 232 | int vmw_bo_unpin(struct vmw_private *dev_priv, |
09881d29 | 233 | struct vmw_bo *buf, |
f1d34bfd | 234 | bool interruptible) |
d991ef03 | 235 | { |
668b2066 | 236 | struct ttm_buffer_object *bo = &buf->tbo; |
459d0fa7 | 237 | int ret; |
d991ef03 | 238 | |
dfd5e50e | 239 | ret = ttm_bo_reserve(bo, interruptible, false, NULL); |
459d0fa7 TH |
240 | if (unlikely(ret != 0)) |
241 | goto err; | |
242 | ||
243 | vmw_bo_pin_reserved(buf, false); | |
244 | ||
245 | ttm_bo_unreserve(bo); | |
246 | ||
247 | err: | |
459d0fa7 TH |
248 | return ret; |
249 | } | |
b37a6b9a | 250 | |
d991ef03 | 251 | /** |
b37a6b9a TH |
252 | * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement |
253 | * of a buffer. | |
d991ef03 | 254 | * |
b37a6b9a TH |
255 | * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved. |
256 | * @ptr: SVGAGuestPtr returning the result. | |
d991ef03 | 257 | */ |
b37a6b9a TH |
258 | void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, |
259 | SVGAGuestPtr *ptr) | |
d991ef03 | 260 | { |
d3116756 | 261 | if (bo->resource->mem_type == TTM_PL_VRAM) { |
d991ef03 | 262 | ptr->gmrId = SVGA_GMR_FRAMEBUFFER; |
d3116756 | 263 | ptr->offset = bo->resource->start << PAGE_SHIFT; |
d991ef03 | 264 | } else { |
d3116756 | 265 | ptr->gmrId = bo->resource->start; |
d991ef03 JB |
266 | ptr->offset = 0; |
267 | } | |
268 | } | |
e2fa3a76 TH |
269 | |
270 | ||
271 | /** | |
459d0fa7 | 272 | * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it. |
e2fa3a76 | 273 | * |
459d0fa7 | 274 | * @vbo: The buffer object. Must be reserved. |
e2fa3a76 TH |
275 | * @pin: Whether to pin or unpin. |
276 | * | |
277 | */ | |
09881d29 | 278 | void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin) |
e2fa3a76 | 279 | { |
19be5570 | 280 | struct ttm_operation_ctx ctx = { false, true }; |
f1217ed0 | 281 | struct ttm_place pl; |
e2fa3a76 | 282 | struct ttm_placement placement; |
668b2066 | 283 | struct ttm_buffer_object *bo = &vbo->tbo; |
d3116756 | 284 | uint32_t old_mem_type = bo->resource->mem_type; |
e2fa3a76 TH |
285 | int ret; |
286 | ||
52791eee | 287 | dma_resv_assert_held(bo->base.resv); |
e2fa3a76 | 288 | |
fbe86ca5 CK |
289 | if (pin == !!bo->pin_count) |
290 | return; | |
459d0fa7 | 291 | |
f1217ed0 CK |
292 | pl.fpfn = 0; |
293 | pl.lpfn = 0; | |
d3116756 CK |
294 | pl.mem_type = bo->resource->mem_type; |
295 | pl.flags = bo->resource->placement; | |
e2fa3a76 TH |
296 | |
297 | memset(&placement, 0, sizeof(placement)); | |
298 | placement.num_placement = 1; | |
f1217ed0 | 299 | placement.placement = &pl; |
e2fa3a76 | 300 | |
19be5570 | 301 | ret = ttm_bo_validate(bo, &placement, &ctx); |
e2fa3a76 | 302 | |
d3116756 | 303 | BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); |
bf833fd3 | 304 | |
fbe86ca5 CK |
305 | if (pin) |
306 | ttm_bo_pin(bo); | |
307 | else | |
308 | ttm_bo_unpin(bo); | |
309 | } | |
bf833fd3 | 310 | |
e9431ea5 TH |
311 | /** |
312 | * vmw_bo_map_and_cache - Map a buffer object and cache the map | |
bf833fd3 TH |
313 | * |
314 | * @vbo: The buffer object to map | |
315 | * Return: A kernel virtual address or NULL if mapping failed. | |
316 | * | |
317 | * This function maps a buffer object into the kernel address space, or | |
318 | * returns the virtual kernel address of an already existing map. The virtual | |
319 | * address remains valid as long as the buffer object is pinned or reserved. | |
320 | * The cached map is torn down on either | |
321 | * 1) Buffer object move | |
322 | * 2) Buffer object swapout | |
323 | * 3) Buffer object destruction | |
324 | * | |
325 | */ | |
09881d29 | 326 | void *vmw_bo_map_and_cache(struct vmw_bo *vbo) |
bf833fd3 | 327 | { |
668b2066 | 328 | struct ttm_buffer_object *bo = &vbo->tbo; |
bf833fd3 TH |
329 | bool not_used; |
330 | void *virtual; | |
331 | int ret; | |
332 | ||
333 | virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); | |
334 | if (virtual) | |
335 | return virtual; | |
336 | ||
e3c92eb4 | 337 | ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); |
bf833fd3 TH |
338 | if (ret) |
339 | DRM_ERROR("Buffer object map failed: %d.\n", ret); | |
340 | ||
341 | return ttm_kmap_obj_virtual(&vbo->map, ¬_used); | |
342 | } | |
e9431ea5 TH |
343 | |
344 | ||
345 | /** | |
346 | * vmw_bo_unmap - Tear down a cached buffer object map. | |
347 | * | |
348 | * @vbo: The buffer object whose map we are tearing down. | |
349 | * | |
350 | * This function tears down a cached map set up using | |
09881d29 | 351 | * vmw_bo_map_and_cache(). |
e9431ea5 | 352 | */ |
09881d29 | 353 | void vmw_bo_unmap(struct vmw_bo *vbo) |
e9431ea5 TH |
354 | { |
355 | if (vbo->map.bo == NULL) | |
356 | return; | |
357 | ||
358 | ttm_bo_kunmap(&vbo->map); | |
668b2066 | 359 | vbo->map.bo = NULL; |
e9431ea5 TH |
360 | } |
361 | ||
35079323 | 362 | |
b254557c | 363 | /** |
668b2066 | 364 | * vmw_bo_init - Initialize a vmw buffer object |
b254557c CK |
365 | * |
366 | * @dev_priv: Pointer to the device private struct | |
668b2066 ZR |
367 | * @vmw_bo: Buffer object to initialize |
368 | * @params: Parameters used to initialize the buffer object | |
369 | * @destroy: The function used to delete the buffer object | |
370 | * Returns: Zero on success, negative error code on error. | |
b254557c | 371 | * |
b254557c | 372 | */ |
668b2066 ZR |
373 | static int vmw_bo_init(struct vmw_private *dev_priv, |
374 | struct vmw_bo *vmw_bo, | |
375 | struct vmw_bo_params *params, | |
376 | void (*destroy)(struct ttm_buffer_object *)) | |
b254557c | 377 | { |
8aadeb8a | 378 | struct ttm_operation_ctx ctx = { |
668b2066 | 379 | .interruptible = params->bo_type != ttm_bo_type_kernel, |
8aadeb8a ZR |
380 | .no_wait_gpu = false |
381 | }; | |
668b2066 | 382 | struct ttm_device *bdev = &dev_priv->bdev; |
8afa13a0 | 383 | struct drm_device *vdev = &dev_priv->drm; |
b254557c CK |
384 | int ret; |
385 | ||
668b2066 | 386 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
b254557c | 387 | |
668b2066 ZR |
388 | BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); |
389 | vmw_bo->tbo.priority = 3; | |
390 | vmw_bo->res_tree = RB_ROOT; | |
8afa13a0 | 391 | |
668b2066 ZR |
392 | params->size = ALIGN(params->size, PAGE_SIZE); |
393 | drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); | |
d02117f8 | 394 | |
668b2066 ZR |
395 | vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain); |
396 | ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type, | |
397 | &vmw_bo->placement, 0, &ctx, NULL, | |
398 | NULL, destroy); | |
b254557c | 399 | if (unlikely(ret)) |
668b2066 | 400 | return ret; |
b254557c | 401 | |
668b2066 ZR |
402 | if (params->pin) |
403 | ttm_bo_pin(&vmw_bo->tbo); | |
404 | ttm_bo_unreserve(&vmw_bo->tbo); | |
b254557c CK |
405 | |
406 | return 0; | |
b254557c | 407 | } |
e9431ea5 | 408 | |
8afa13a0 | 409 | int vmw_bo_create(struct vmw_private *vmw, |
668b2066 | 410 | struct vmw_bo_params *params, |
09881d29 | 411 | struct vmw_bo **p_bo) |
8afa13a0 ZR |
412 | { |
413 | int ret; | |
414 | ||
415 | *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); | |
416 | if (unlikely(!*p_bo)) { | |
417 | DRM_ERROR("Failed to allocate a buffer.\n"); | |
418 | return -ENOMEM; | |
419 | } | |
420 | ||
36d421e6 ZR |
421 | /* |
422 | * vmw_bo_init will delete the *p_bo object if it fails | |
423 | */ | |
668b2066 | 424 | ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free); |
8afa13a0 ZR |
425 | if (unlikely(ret != 0)) |
426 | goto out_error; | |
427 | ||
428 | return ret; | |
429 | out_error: | |
8afa13a0 ZR |
430 | *p_bo = NULL; |
431 | return ret; | |
432 | } | |
433 | ||
e9431ea5 | 434 | /** |
09881d29 | 435 | * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu |
e9431ea5 TH |
436 | * access, idling previous GPU operations on the buffer and optionally |
437 | * blocking it for further command submissions. | |
438 | * | |
8afa13a0 | 439 | * @vmw_bo: Pointer to the buffer object being grabbed for CPU access |
e9431ea5 TH |
440 | * @flags: Flags indicating how the grab should be performed. |
441 | * Return: Zero on success, Negative error code on error. In particular, | |
442 | * -EBUSY will be returned if a dontblock operation is requested and the | |
443 | * buffer object is busy, and -ERESTARTSYS will be returned if a wait is | |
444 | * interrupted by a signal. | |
445 | * | |
446 | * A blocking grab will be automatically released when @tfile is closed. | |
447 | */ | |
09881d29 | 448 | static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo, |
e9431ea5 TH |
449 | uint32_t flags) |
450 | { | |
7fb03cc3 | 451 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
668b2066 | 452 | struct ttm_buffer_object *bo = &vmw_bo->tbo; |
e9431ea5 TH |
453 | int ret; |
454 | ||
455 | if (flags & drm_vmw_synccpu_allow_cs) { | |
e9431ea5 TH |
456 | long lret; |
457 | ||
7bc80a54 CK |
458 | lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, |
459 | true, nonblock ? 0 : | |
d3fae3b3 | 460 | MAX_SCHEDULE_TIMEOUT); |
e9431ea5 TH |
461 | if (!lret) |
462 | return -EBUSY; | |
463 | else if (lret < 0) | |
464 | return lret; | |
465 | return 0; | |
466 | } | |
467 | ||
7fb03cc3 CK |
468 | ret = ttm_bo_reserve(bo, true, nonblock, NULL); |
469 | if (unlikely(ret != 0)) | |
470 | return ret; | |
471 | ||
472 | ret = ttm_bo_wait(bo, true, nonblock); | |
473 | if (likely(ret == 0)) | |
8afa13a0 | 474 | atomic_inc(&vmw_bo->cpu_writers); |
7fb03cc3 CK |
475 | |
476 | ttm_bo_unreserve(bo); | |
e9431ea5 TH |
477 | if (unlikely(ret != 0)) |
478 | return ret; | |
479 | ||
e9431ea5 TH |
480 | return ret; |
481 | } | |
482 | ||
483 | /** | |
484 | * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, | |
485 | * and unblock command submission on the buffer if blocked. | |
486 | * | |
8afa13a0 | 487 | * @filp: Identifying the caller. |
e9431ea5 | 488 | * @handle: Handle identifying the buffer object. |
e9431ea5 TH |
489 | * @flags: Flags indicating the type of release. |
490 | */ | |
8afa13a0 ZR |
491 | static int vmw_user_bo_synccpu_release(struct drm_file *filp, |
492 | uint32_t handle, | |
493 | uint32_t flags) | |
e9431ea5 | 494 | { |
09881d29 | 495 | struct vmw_bo *vmw_bo; |
8afa13a0 | 496 | int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); |
e9431ea5 | 497 | |
60c9ecd7 ZR |
498 | if (!ret) { |
499 | if (!(flags & drm_vmw_synccpu_allow_cs)) { | |
500 | atomic_dec(&vmw_bo->cpu_writers); | |
501 | } | |
91398b41 | 502 | vmw_user_bo_unref(&vmw_bo); |
8afa13a0 | 503 | } |
8afa13a0 ZR |
504 | |
505 | return ret; | |
e9431ea5 TH |
506 | } |
507 | ||
508 | ||
509 | /** | |
510 | * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu | |
511 | * functionality. | |
512 | * | |
513 | * @dev: Identifies the drm device. | |
514 | * @data: Pointer to the ioctl argument. | |
515 | * @file_priv: Identifies the caller. | |
516 | * Return: Zero on success, negative error code on error. | |
517 | * | |
518 | * This function checks the ioctl arguments for validity and calls the | |
519 | * relevant synccpu functions. | |
520 | */ | |
521 | int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, | |
522 | struct drm_file *file_priv) | |
523 | { | |
524 | struct drm_vmw_synccpu_arg *arg = | |
525 | (struct drm_vmw_synccpu_arg *) data; | |
09881d29 | 526 | struct vmw_bo *vbo; |
e9431ea5 TH |
527 | int ret; |
528 | ||
529 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 | |
530 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | | |
531 | drm_vmw_synccpu_dontblock | | |
532 | drm_vmw_synccpu_allow_cs)) != 0) { | |
533 | DRM_ERROR("Illegal synccpu flags.\n"); | |
534 | return -EINVAL; | |
535 | } | |
536 | ||
537 | switch (arg->op) { | |
538 | case drm_vmw_synccpu_grab: | |
8afa13a0 | 539 | ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); |
e9431ea5 TH |
540 | if (unlikely(ret != 0)) |
541 | return ret; | |
542 | ||
8afa13a0 | 543 | ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); |
91398b41 | 544 | vmw_user_bo_unref(&vbo); |
298799a2 ZR |
545 | if (unlikely(ret != 0)) { |
546 | if (ret == -ERESTARTSYS || ret == -EBUSY) | |
547 | return -EBUSY; | |
e9431ea5 TH |
548 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
549 | (unsigned int) arg->handle); | |
550 | return ret; | |
551 | } | |
552 | break; | |
553 | case drm_vmw_synccpu_release: | |
8afa13a0 ZR |
554 | ret = vmw_user_bo_synccpu_release(file_priv, |
555 | arg->handle, | |
e9431ea5 TH |
556 | arg->flags); |
557 | if (unlikely(ret != 0)) { | |
558 | DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", | |
559 | (unsigned int) arg->handle); | |
560 | return ret; | |
561 | } | |
562 | break; | |
563 | default: | |
564 | DRM_ERROR("Invalid synccpu operation.\n"); | |
565 | return -EINVAL; | |
566 | } | |
567 | ||
568 | return 0; | |
569 | } | |
570 | ||
e9431ea5 TH |
571 | /** |
572 | * vmw_bo_unref_ioctl - Generic handle close ioctl. | |
573 | * | |
574 | * @dev: Identifies the drm device. | |
575 | * @data: Pointer to the ioctl argument. | |
576 | * @file_priv: Identifies the caller. | |
577 | * Return: Zero on success, negative error code on error. | |
578 | * | |
579 | * This function checks the ioctl arguments for validity and closes a | |
580 | * handle to a TTM base object, optionally freeing the object. | |
581 | */ | |
582 | int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, | |
583 | struct drm_file *file_priv) | |
584 | { | |
585 | struct drm_vmw_unref_dmabuf_arg *arg = | |
586 | (struct drm_vmw_unref_dmabuf_arg *)data; | |
587 | ||
668b2066 | 588 | return drm_gem_handle_delete(file_priv, arg->handle); |
e9431ea5 TH |
589 | } |
590 | ||
591 | ||
592 | /** | |
593 | * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. | |
594 | * | |
8afa13a0 | 595 | * @filp: The file the handle is registered with. |
e9431ea5 TH |
596 | * @handle: The user buffer object handle |
597 | * @out: Pointer to a where a pointer to the embedded | |
09881d29 | 598 | * struct vmw_bo should be placed. |
e9431ea5 TH |
599 | * Return: Zero on success, Negative error code on error. |
600 | * | |
9ef8d83e | 601 | * The vmw buffer object pointer will be refcounted (both ttm and gem) |
e9431ea5 | 602 | */ |
8afa13a0 | 603 | int vmw_user_bo_lookup(struct drm_file *filp, |
668b2066 | 604 | u32 handle, |
09881d29 | 605 | struct vmw_bo **out) |
e9431ea5 | 606 | { |
8afa13a0 | 607 | struct drm_gem_object *gobj; |
e9431ea5 | 608 | |
8afa13a0 ZR |
609 | gobj = drm_gem_object_lookup(filp, handle); |
610 | if (!gobj) { | |
e9431ea5 TH |
611 | DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", |
612 | (unsigned long)handle); | |
613 | return -ESRCH; | |
614 | } | |
615 | ||
09881d29 | 616 | *out = to_vmw_bo(gobj); |
e9431ea5 TH |
617 | |
618 | return 0; | |
619 | } | |
620 | ||
e9431ea5 TH |
621 | /** |
622 | * vmw_bo_fence_single - Utility function to fence a single TTM buffer | |
623 | * object without unreserving it. | |
624 | * | |
625 | * @bo: Pointer to the struct ttm_buffer_object to fence. | |
626 | * @fence: Pointer to the fence. If NULL, this function will | |
627 | * insert a fence into the command stream.. | |
628 | * | |
629 | * Contrary to the ttm_eu version of this function, it takes only | |
630 | * a single buffer object instead of a list, and it also doesn't | |
631 | * unreserve the buffer object, which needs to be done separately. | |
632 | */ | |
633 | void vmw_bo_fence_single(struct ttm_buffer_object *bo, | |
634 | struct vmw_fence_obj *fence) | |
635 | { | |
8af8a109 | 636 | struct ttm_device *bdev = bo->bdev; |
668b2066 | 637 | struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev); |
c8d4c18b | 638 | int ret; |
e9431ea5 | 639 | |
c8d4c18b | 640 | if (fence == NULL) |
e9431ea5 | 641 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
c8d4c18b CK |
642 | else |
643 | dma_fence_get(&fence->base); | |
644 | ||
645 | ret = dma_resv_reserve_fences(bo->base.resv, 1); | |
646 | if (!ret) | |
73511edf | 647 | dma_resv_add_fence(bo->base.resv, &fence->base, |
b29895e1 | 648 | DMA_RESV_USAGE_KERNEL); |
c8d4c18b CK |
649 | else |
650 | /* Last resort fallback when we are OOM */ | |
651 | dma_fence_wait(&fence->base, false); | |
652 | dma_fence_put(&fence->base); | |
e9431ea5 TH |
653 | } |
654 | ||
655 | ||
656 | /** | |
657 | * vmw_dumb_create - Create a dumb kms buffer | |
658 | * | |
659 | * @file_priv: Pointer to a struct drm_file identifying the caller. | |
660 | * @dev: Pointer to the drm device. | |
661 | * @args: Pointer to a struct drm_mode_create_dumb structure | |
662 | * Return: Zero on success, negative error code on failure. | |
663 | * | |
664 | * This is a driver callback for the core drm create_dumb functionality. | |
665 | * Note that this is very similar to the vmw_bo_alloc ioctl, except | |
666 | * that the arguments have a different format. | |
667 | */ | |
668 | int vmw_dumb_create(struct drm_file *file_priv, | |
669 | struct drm_device *dev, | |
670 | struct drm_mode_create_dumb *args) | |
671 | { | |
672 | struct vmw_private *dev_priv = vmw_priv(dev); | |
09881d29 | 673 | struct vmw_bo *vbo; |
1c8d537b | 674 | int cpp = DIV_ROUND_UP(args->bpp, 8); |
e9431ea5 TH |
675 | int ret; |
676 | ||
1c8d537b ZR |
677 | switch (cpp) { |
678 | case 1: /* DRM_FORMAT_C8 */ | |
679 | case 2: /* DRM_FORMAT_RGB565 */ | |
680 | case 4: /* DRM_FORMAT_XRGB8888 */ | |
681 | break; | |
682 | default: | |
683 | /* | |
684 | * Dumb buffers don't allow anything else. | |
685 | * This is tested via IGT's dumb_buffers | |
686 | */ | |
687 | return -EINVAL; | |
688 | } | |
689 | ||
690 | args->pitch = args->width * cpp; | |
8afa13a0 | 691 | args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); |
e9431ea5 | 692 | |
8afa13a0 ZR |
693 | ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, |
694 | args->size, &args->handle, | |
695 | &vbo); | |
9ef8d83e ZR |
696 | /* drop reference from allocate - handle holds it now */ |
697 | drm_gem_object_put(&vbo->tbo.base); | |
e9431ea5 TH |
698 | return ret; |
699 | } | |
700 | ||
e9431ea5 TH |
701 | /** |
702 | * vmw_bo_swap_notify - swapout notify callback. | |
703 | * | |
704 | * @bo: The buffer object to be swapped out. | |
705 | */ | |
706 | void vmw_bo_swap_notify(struct ttm_buffer_object *bo) | |
707 | { | |
e9431ea5 | 708 | /* Kill any cached kernel maps before swapout */ |
09881d29 | 709 | vmw_bo_unmap(to_vmw_bo(&bo->base)); |
e9431ea5 TH |
710 | } |
711 | ||
712 | ||
713 | /** | |
714 | * vmw_bo_move_notify - TTM move_notify_callback | |
715 | * | |
716 | * @bo: The TTM buffer object about to move. | |
2966141a | 717 | * @mem: The struct ttm_resource indicating to what memory |
e9431ea5 TH |
718 | * region the move is taking place. |
719 | * | |
720 | * Detaches cached maps and device bindings that require that the | |
721 | * buffer doesn't move. | |
722 | */ | |
723 | void vmw_bo_move_notify(struct ttm_buffer_object *bo, | |
2966141a | 724 | struct ttm_resource *mem) |
e9431ea5 | 725 | { |
668b2066 | 726 | struct vmw_bo *vbo = to_vmw_bo(&bo->base); |
e9431ea5 TH |
727 | |
728 | /* | |
098d7d53 TH |
729 | * Kill any cached kernel maps before move to or from VRAM. |
730 | * With other types of moves, the underlying pages stay the same, | |
731 | * and the map can be kept. | |
e9431ea5 | 732 | */ |
d3116756 | 733 | if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) |
098d7d53 | 734 | vmw_bo_unmap(vbo); |
e9431ea5 TH |
735 | |
736 | /* | |
737 | * If we're moving a backup MOB out of MOB placement, then make sure we | |
738 | * read back all resource content first, and unbind the MOB from | |
739 | * the resource. | |
740 | */ | |
d3116756 | 741 | if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) |
e9431ea5 TH |
742 | vmw_resource_unbind_list(vbo); |
743 | } | |
39985eea ZR |
744 | |
745 | static u32 | |
746 | set_placement_list(struct ttm_place *pl, u32 domain) | |
747 | { | |
748 | u32 n = 0; | |
749 | ||
750 | /* | |
751 | * The placements are ordered according to our preferences | |
752 | */ | |
753 | if (domain & VMW_BO_DOMAIN_MOB) { | |
754 | pl[n].mem_type = VMW_PL_MOB; | |
755 | pl[n].flags = 0; | |
756 | pl[n].fpfn = 0; | |
757 | pl[n].lpfn = 0; | |
758 | n++; | |
759 | } | |
760 | if (domain & VMW_BO_DOMAIN_GMR) { | |
761 | pl[n].mem_type = VMW_PL_GMR; | |
762 | pl[n].flags = 0; | |
763 | pl[n].fpfn = 0; | |
764 | pl[n].lpfn = 0; | |
765 | n++; | |
766 | } | |
767 | if (domain & VMW_BO_DOMAIN_VRAM) { | |
768 | pl[n].mem_type = TTM_PL_VRAM; | |
769 | pl[n].flags = 0; | |
770 | pl[n].fpfn = 0; | |
771 | pl[n].lpfn = 0; | |
772 | n++; | |
773 | } | |
39985eea ZR |
774 | if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) { |
775 | pl[n].mem_type = VMW_PL_SYSTEM; | |
776 | pl[n].flags = 0; | |
777 | pl[n].fpfn = 0; | |
778 | pl[n].lpfn = 0; | |
779 | n++; | |
780 | } | |
781 | if (domain & VMW_BO_DOMAIN_SYS) { | |
782 | pl[n].mem_type = TTM_PL_SYSTEM; | |
783 | pl[n].flags = 0; | |
784 | pl[n].fpfn = 0; | |
785 | pl[n].lpfn = 0; | |
786 | n++; | |
787 | } | |
788 | ||
789 | WARN_ON(!n); | |
790 | if (!n) { | |
791 | pl[n].mem_type = TTM_PL_SYSTEM; | |
792 | pl[n].flags = 0; | |
793 | pl[n].fpfn = 0; | |
794 | pl[n].lpfn = 0; | |
795 | n++; | |
796 | } | |
797 | return n; | |
798 | } | |
799 | ||
800 | void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain) | |
801 | { | |
668b2066 ZR |
802 | struct ttm_device *bdev = bo->tbo.bdev; |
803 | struct vmw_private *vmw = vmw_priv_from_ttm(bdev); | |
39985eea ZR |
804 | struct ttm_placement *pl = &bo->placement; |
805 | bool mem_compatible = false; | |
806 | u32 i; | |
807 | ||
808 | pl->placement = bo->places; | |
809 | pl->num_placement = set_placement_list(bo->places, domain); | |
810 | ||
668b2066 | 811 | if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) { |
39985eea | 812 | for (i = 0; i < pl->num_placement; ++i) { |
668b2066 ZR |
813 | if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM || |
814 | bo->tbo.resource->mem_type == pl->placement[i].mem_type) | |
39985eea ZR |
815 | mem_compatible = true; |
816 | } | |
817 | if (!mem_compatible) | |
818 | drm_warn(&vmw->drm, | |
819 | "%s: Incompatible transition from " | |
820 | "bo->base.resource->mem_type = %u to domain = %u\n", | |
668b2066 | 821 | __func__, bo->tbo.resource->mem_type, domain); |
39985eea ZR |
822 | } |
823 | ||
824 | pl->busy_placement = bo->busy_places; | |
825 | pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain); | |
826 | } | |
827 | ||
828 | void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) | |
829 | { | |
668b2066 ZR |
830 | struct ttm_device *bdev = bo->tbo.bdev; |
831 | struct vmw_private *vmw = vmw_priv_from_ttm(bdev); | |
39985eea ZR |
832 | u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM; |
833 | ||
834 | if (vmw->has_mob) | |
835 | domain = VMW_BO_DOMAIN_MOB; | |
836 | ||
837 | vmw_bo_placement_set(bo, domain, domain); | |
838 | } |