1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_binding.h"
31 #include "vmwgfx_bo.h"
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
38 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
41 void vmw_resource_mob_attach(struct vmw_resource
*res
)
43 struct vmw_bo
*gbo
= res
->guest_memory_bo
;
44 struct rb_node
**new = &gbo
->res_tree
.rb_node
, *parent
= NULL
;
46 dma_resv_assert_held(gbo
->tbo
.base
.resv
);
47 res
->used_prio
= (res
->res_dirty
) ? res
->func
->dirty_prio
:
51 struct vmw_resource
*this =
52 container_of(*new, struct vmw_resource
, mob_node
);
55 new = (res
->guest_memory_offset
< this->guest_memory_offset
) ?
56 &((*new)->rb_left
) : &((*new)->rb_right
);
59 rb_link_node(&res
->mob_node
, parent
, new);
60 rb_insert_color(&res
->mob_node
, &gbo
->res_tree
);
62 vmw_bo_prio_add(gbo
, res
->used_prio
);
66 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
69 void vmw_resource_mob_detach(struct vmw_resource
*res
)
71 struct vmw_bo
*gbo
= res
->guest_memory_bo
;
73 dma_resv_assert_held(gbo
->tbo
.base
.resv
);
74 if (vmw_resource_mob_attached(res
)) {
75 rb_erase(&res
->mob_node
, &gbo
->res_tree
);
76 RB_CLEAR_NODE(&res
->mob_node
);
77 vmw_bo_prio_del(gbo
, res
->used_prio
);
81 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
88 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
)
90 return kref_get_unless_zero(&res
->kref
) ? res
: NULL
;
94 * vmw_resource_release_id - release a resource id to the id manager.
96 * @res: Pointer to the resource.
98 * Release the resource id to the resource id manager and set it to -1
100 void vmw_resource_release_id(struct vmw_resource
*res
)
102 struct vmw_private
*dev_priv
= res
->dev_priv
;
103 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
105 spin_lock(&dev_priv
->resource_lock
);
107 idr_remove(idr
, res
->id
);
109 spin_unlock(&dev_priv
->resource_lock
);
112 static void vmw_resource_release(struct kref
*kref
)
114 struct vmw_resource
*res
=
115 container_of(kref
, struct vmw_resource
, kref
);
116 struct vmw_private
*dev_priv
= res
->dev_priv
;
119 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
121 spin_lock(&dev_priv
->resource_lock
);
122 list_del_init(&res
->lru_head
);
123 spin_unlock(&dev_priv
->resource_lock
);
124 if (res
->guest_memory_bo
) {
125 struct ttm_buffer_object
*bo
= &res
->guest_memory_bo
->tbo
;
127 ret
= ttm_bo_reserve(bo
, false, false, NULL
);
129 if (vmw_resource_mob_attached(res
) &&
130 res
->func
->unbind
!= NULL
) {
131 struct ttm_validate_buffer val_buf
;
134 val_buf
.num_shared
= 0;
135 res
->func
->unbind(res
, false, &val_buf
);
137 res
->guest_memory_size
= false;
138 vmw_resource_mob_detach(res
);
140 res
->func
->dirty_free(res
);
142 vmw_bo_dirty_release(res
->guest_memory_bo
);
143 ttm_bo_unreserve(bo
);
144 vmw_user_bo_unref(&res
->guest_memory_bo
);
147 if (likely(res
->hw_destroy
!= NULL
)) {
148 mutex_lock(&dev_priv
->binding_mutex
);
149 vmw_binding_res_list_kill(&res
->binding_head
);
150 mutex_unlock(&dev_priv
->binding_mutex
);
151 res
->hw_destroy(res
);
155 if (res
->res_free
!= NULL
)
160 spin_lock(&dev_priv
->resource_lock
);
163 spin_unlock(&dev_priv
->resource_lock
);
166 void vmw_resource_unreference(struct vmw_resource
**p_res
)
168 struct vmw_resource
*res
= *p_res
;
171 kref_put(&res
->kref
, vmw_resource_release
);
176 * vmw_resource_alloc_id - release a resource id to the id manager.
178 * @res: Pointer to the resource.
180 * Allocate the lowest free resource from the resource manager, and set
181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183 int vmw_resource_alloc_id(struct vmw_resource
*res
)
185 struct vmw_private
*dev_priv
= res
->dev_priv
;
187 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
189 BUG_ON(res
->id
!= -1);
191 idr_preload(GFP_KERNEL
);
192 spin_lock(&dev_priv
->resource_lock
);
194 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
198 spin_unlock(&dev_priv
->resource_lock
);
200 return ret
< 0 ? ret
: 0;
204 * vmw_resource_init - initialize a struct vmw_resource
206 * @dev_priv: Pointer to a device private struct.
207 * @res: The struct vmw_resource to initialize.
208 * @delay_id: Boolean whether to defer device id allocation until
209 * the first validation.
210 * @res_free: Resource destructor.
211 * @func: Resource function table.
213 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
215 void (*res_free
) (struct vmw_resource
*res
),
216 const struct vmw_res_func
*func
)
218 kref_init(&res
->kref
);
219 res
->hw_destroy
= NULL
;
220 res
->res_free
= res_free
;
221 res
->dev_priv
= dev_priv
;
223 RB_CLEAR_NODE(&res
->mob_node
);
224 INIT_LIST_HEAD(&res
->lru_head
);
225 INIT_LIST_HEAD(&res
->binding_head
);
227 res
->guest_memory_bo
= NULL
;
228 res
->guest_memory_offset
= 0;
229 res
->guest_memory_dirty
= false;
230 res
->res_dirty
= false;
231 res
->coherent
= false;
237 return vmw_resource_alloc_id(res
);
242 * vmw_user_resource_lookup_handle - lookup a struct resource from a
243 * TTM user-space handle and perform basic type checks
245 * @dev_priv: Pointer to a device private struct
246 * @tfile: Pointer to a struct ttm_object_file identifying the caller
247 * @handle: The TTM user-space handle
248 * @converter: Pointer to an object describing the resource type
249 * @p_res: On successful return the location pointed to will contain
250 * a pointer to a refcounted struct vmw_resource.
252 * If the handle can't be found or is associated with an incorrect resource
253 * type, -EINVAL will be returned.
255 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
256 struct ttm_object_file
*tfile
,
258 const struct vmw_user_resource_conv
260 struct vmw_resource
**p_res
)
262 struct ttm_base_object
*base
;
263 struct vmw_resource
*res
;
266 base
= ttm_base_object_lookup(tfile
, handle
);
270 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
))
271 goto out_bad_resource
;
273 res
= converter
->base_obj_to_res(base
);
274 kref_get(&res
->kref
);
280 ttm_base_object_unref(&base
);
286 * Helper function that looks either a surface or bo.
288 * The pointer this pointed at by out_surf and out_buf needs to be null.
290 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
291 struct drm_file
*filp
,
293 struct vmw_surface
**out_surf
,
294 struct vmw_bo
**out_buf
)
296 struct ttm_object_file
*tfile
= vmw_fpriv(filp
)->tfile
;
297 struct vmw_resource
*res
;
300 BUG_ON(*out_surf
|| *out_buf
);
302 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
303 user_surface_converter
,
306 *out_surf
= vmw_res_to_srf(res
);
311 ret
= vmw_user_bo_lookup(filp
, handle
, out_buf
);
316 * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
318 * @res: The resource for which to allocate a gbo buffer.
319 * @interruptible: Whether any sleeps during allocation should be
320 * performed while interruptible.
322 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
325 unsigned long size
= PFN_ALIGN(res
->guest_memory_size
);
327 struct vmw_bo_params bo_params
= {
328 .domain
= res
->func
->domain
,
329 .busy_domain
= res
->func
->busy_domain
,
330 .bo_type
= ttm_bo_type_device
,
331 .size
= res
->guest_memory_size
,
336 if (likely(res
->guest_memory_bo
)) {
337 BUG_ON(res
->guest_memory_bo
->tbo
.base
.size
< size
);
341 ret
= vmw_gem_object_create(res
->dev_priv
, &bo_params
, &gbo
);
342 if (unlikely(ret
!= 0))
345 res
->guest_memory_bo
= gbo
;
352 * vmw_resource_do_validate - Make a resource up-to-date and visible
355 * @res: The resource to make visible to the device.
356 * @val_buf: Information about a buffer possibly
357 * containing backup data if a bind operation is needed.
358 * @dirtying: Transfer dirty regions.
360 * On hardware resource shortage, this function returns -EBUSY and
361 * should be retried once resources have been freed up.
363 static int vmw_resource_do_validate(struct vmw_resource
*res
,
364 struct ttm_validate_buffer
*val_buf
,
368 const struct vmw_res_func
*func
= res
->func
;
370 if (unlikely(res
->id
== -1)) {
371 ret
= func
->create(res
);
372 if (unlikely(ret
!= 0))
377 ((func
->needs_guest_memory
&& !vmw_resource_mob_attached(res
) &&
379 (!func
->needs_guest_memory
&& val_buf
->bo
))) {
380 ret
= func
->bind(res
, val_buf
);
381 if (unlikely(ret
!= 0))
382 goto out_bind_failed
;
383 if (func
->needs_guest_memory
)
384 vmw_resource_mob_attach(res
);
388 * Handle the case where the backup mob is marked coherent but
389 * the resource isn't.
391 if (func
->dirty_alloc
&& vmw_resource_mob_attached(res
) &&
393 if (res
->guest_memory_bo
->dirty
&& !res
->dirty
) {
394 ret
= func
->dirty_alloc(res
);
397 } else if (!res
->guest_memory_bo
->dirty
&& res
->dirty
) {
398 func
->dirty_free(res
);
403 * Transfer the dirty regions to the resource and update
407 if (dirtying
&& !res
->res_dirty
) {
408 pgoff_t start
= res
->guest_memory_offset
>> PAGE_SHIFT
;
409 pgoff_t end
= __KERNEL_DIV_ROUND_UP
410 (res
->guest_memory_offset
+ res
->guest_memory_size
,
413 vmw_bo_dirty_unmap(res
->guest_memory_bo
, start
, end
);
416 vmw_bo_dirty_transfer_to_res(res
);
417 return func
->dirty_sync(res
);
429 * vmw_resource_unreserve - Unreserve a resource previously reserved for
430 * command submission.
432 * @res: Pointer to the struct vmw_resource to unreserve.
433 * @dirty_set: Change dirty status of the resource.
434 * @dirty: When changing dirty status indicates the new status.
435 * @switch_guest_memory: Guest memory buffer has been switched.
436 * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
437 * switched. May be NULL.
438 * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
440 * Currently unreserving a resource means putting it back on the device's
441 * resource lru list, so that it can be evicted if necessary.
443 void vmw_resource_unreserve(struct vmw_resource
*res
,
446 bool switch_guest_memory
,
447 struct vmw_bo
*new_guest_memory_bo
,
448 unsigned long new_guest_memory_offset
)
450 struct vmw_private
*dev_priv
= res
->dev_priv
;
452 if (!list_empty(&res
->lru_head
))
455 if (switch_guest_memory
&& new_guest_memory_bo
!= res
->guest_memory_bo
) {
456 if (res
->guest_memory_bo
) {
457 vmw_resource_mob_detach(res
);
459 vmw_bo_dirty_release(res
->guest_memory_bo
);
460 vmw_user_bo_unref(&res
->guest_memory_bo
);
463 if (new_guest_memory_bo
) {
464 res
->guest_memory_bo
= vmw_user_bo_ref(new_guest_memory_bo
);
467 * The validation code should already have added a
468 * dirty tracker here.
470 WARN_ON(res
->coherent
&& !new_guest_memory_bo
->dirty
);
472 vmw_resource_mob_attach(res
);
474 res
->guest_memory_bo
= NULL
;
476 } else if (switch_guest_memory
&& res
->coherent
) {
477 vmw_bo_dirty_release(res
->guest_memory_bo
);
480 if (switch_guest_memory
)
481 res
->guest_memory_offset
= new_guest_memory_offset
;
484 res
->res_dirty
= dirty
;
486 if (!res
->func
->may_evict
|| res
->id
== -1 || res
->pin_count
)
489 spin_lock(&dev_priv
->resource_lock
);
490 list_add_tail(&res
->lru_head
,
491 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
492 spin_unlock(&dev_priv
->resource_lock
);
496 * vmw_resource_check_buffer - Check whether a backup buffer is needed
497 * for a resource and in that case, allocate
498 * one, reserve and validate it.
500 * @ticket: The ww acquire context to use, or NULL if trylocking.
501 * @res: The resource for which to allocate a backup buffer.
502 * @interruptible: Whether any sleeps during allocation should be
503 * performed while interruptible.
504 * @val_buf: On successful return contains data about the
505 * reserved and validated backup buffer.
508 vmw_resource_check_buffer(struct ww_acquire_ctx
*ticket
,
509 struct vmw_resource
*res
,
511 struct ttm_validate_buffer
*val_buf
)
513 struct ttm_operation_ctx ctx
= { true, false };
514 struct list_head val_list
;
515 bool guest_memory_dirty
= false;
518 if (unlikely(!res
->guest_memory_bo
)) {
519 ret
= vmw_resource_buf_alloc(res
, interruptible
);
520 if (unlikely(ret
!= 0))
524 INIT_LIST_HEAD(&val_list
);
525 ttm_bo_get(&res
->guest_memory_bo
->tbo
);
526 val_buf
->bo
= &res
->guest_memory_bo
->tbo
;
527 val_buf
->num_shared
= 0;
528 list_add_tail(&val_buf
->head
, &val_list
);
529 ret
= ttm_eu_reserve_buffers(ticket
, &val_list
, interruptible
, NULL
);
530 if (unlikely(ret
!= 0))
533 if (res
->func
->needs_guest_memory
&& !vmw_resource_mob_attached(res
))
536 guest_memory_dirty
= res
->guest_memory_dirty
;
537 vmw_bo_placement_set(res
->guest_memory_bo
, res
->func
->domain
,
538 res
->func
->busy_domain
);
539 ret
= ttm_bo_validate(&res
->guest_memory_bo
->tbo
,
540 &res
->guest_memory_bo
->placement
,
543 if (unlikely(ret
!= 0))
544 goto out_no_validate
;
549 ttm_eu_backoff_reservation(ticket
, &val_list
);
551 ttm_bo_put(val_buf
->bo
);
553 if (guest_memory_dirty
)
554 vmw_user_bo_unref(&res
->guest_memory_bo
);
560 * vmw_resource_reserve - Reserve a resource for command submission
562 * @res: The resource to reserve.
564 * This function takes the resource off the LRU list and make sure
565 * a guest memory buffer is present for guest-backed resources.
566 * However, the buffer may not be bound to the resource at this
570 int vmw_resource_reserve(struct vmw_resource
*res
, bool interruptible
,
571 bool no_guest_memory
)
573 struct vmw_private
*dev_priv
= res
->dev_priv
;
576 spin_lock(&dev_priv
->resource_lock
);
577 list_del_init(&res
->lru_head
);
578 spin_unlock(&dev_priv
->resource_lock
);
580 if (res
->func
->needs_guest_memory
&& !res
->guest_memory_bo
&&
582 ret
= vmw_resource_buf_alloc(res
, interruptible
);
583 if (unlikely(ret
!= 0)) {
584 DRM_ERROR("Failed to allocate a guest memory buffer "
585 "of size %lu. bytes\n",
586 (unsigned long) res
->guest_memory_size
);
595 * vmw_resource_backoff_reservation - Unreserve and unreference a
596 * guest memory buffer
598 * @ticket: The ww acquire ctx used for reservation.
599 * @val_buf: Guest memory buffer information.
602 vmw_resource_backoff_reservation(struct ww_acquire_ctx
*ticket
,
603 struct ttm_validate_buffer
*val_buf
)
605 struct list_head val_list
;
607 if (likely(val_buf
->bo
== NULL
))
610 INIT_LIST_HEAD(&val_list
);
611 list_add_tail(&val_buf
->head
, &val_list
);
612 ttm_eu_backoff_reservation(ticket
, &val_list
);
613 ttm_bo_put(val_buf
->bo
);
618 * vmw_resource_do_evict - Evict a resource, and transfer its data
619 * to a backup buffer.
621 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
622 * @res: The resource to evict.
623 * @interruptible: Whether to wait interruptible.
625 static int vmw_resource_do_evict(struct ww_acquire_ctx
*ticket
,
626 struct vmw_resource
*res
, bool interruptible
)
628 struct ttm_validate_buffer val_buf
;
629 const struct vmw_res_func
*func
= res
->func
;
632 BUG_ON(!func
->may_evict
);
635 val_buf
.num_shared
= 0;
636 ret
= vmw_resource_check_buffer(ticket
, res
, interruptible
, &val_buf
);
637 if (unlikely(ret
!= 0))
640 if (unlikely(func
->unbind
!= NULL
&&
641 (!func
->needs_guest_memory
|| vmw_resource_mob_attached(res
)))) {
642 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
643 if (unlikely(ret
!= 0))
645 vmw_resource_mob_detach(res
);
647 ret
= func
->destroy(res
);
648 res
->guest_memory_dirty
= true;
649 res
->res_dirty
= false;
651 vmw_resource_backoff_reservation(ticket
, &val_buf
);
658 * vmw_resource_validate - Make a resource up-to-date and visible
660 * @res: The resource to make visible to the device.
661 * @intr: Perform waits interruptible if possible.
662 * @dirtying: Pending GPU operation will dirty the resource
664 * On successful return, any backup DMA buffer pointed to by @res->backup will
665 * be reserved and validated.
666 * On hardware resource shortage, this function will repeatedly evict
667 * resources of the same type until the validation succeeds.
669 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
672 int vmw_resource_validate(struct vmw_resource
*res
, bool intr
,
676 struct vmw_resource
*evict_res
;
677 struct vmw_private
*dev_priv
= res
->dev_priv
;
678 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
679 struct ttm_validate_buffer val_buf
;
680 unsigned err_count
= 0;
682 if (!res
->func
->create
)
686 val_buf
.num_shared
= 0;
687 if (res
->guest_memory_bo
)
688 val_buf
.bo
= &res
->guest_memory_bo
->tbo
;
690 ret
= vmw_resource_do_validate(res
, &val_buf
, dirtying
);
691 if (likely(ret
!= -EBUSY
))
694 spin_lock(&dev_priv
->resource_lock
);
695 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
696 DRM_ERROR("Out of device device resources "
697 "for %s.\n", res
->func
->type_name
);
699 spin_unlock(&dev_priv
->resource_lock
);
703 evict_res
= vmw_resource_reference
704 (list_first_entry(lru_list
, struct vmw_resource
,
706 list_del_init(&evict_res
->lru_head
);
708 spin_unlock(&dev_priv
->resource_lock
);
710 /* Trylock backup buffers with a NULL ticket. */
711 ret
= vmw_resource_do_evict(NULL
, evict_res
, intr
);
712 if (unlikely(ret
!= 0)) {
713 spin_lock(&dev_priv
->resource_lock
);
714 list_add_tail(&evict_res
->lru_head
, lru_list
);
715 spin_unlock(&dev_priv
->resource_lock
);
716 if (ret
== -ERESTARTSYS
||
717 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
718 vmw_resource_unreference(&evict_res
);
719 goto out_no_validate
;
723 vmw_resource_unreference(&evict_res
);
726 if (unlikely(ret
!= 0))
727 goto out_no_validate
;
728 else if (!res
->func
->needs_guest_memory
&& res
->guest_memory_bo
) {
729 WARN_ON_ONCE(vmw_resource_mob_attached(res
));
730 vmw_user_bo_unref(&res
->guest_memory_bo
);
741 * vmw_resource_unbind_list
743 * @vbo: Pointer to the current backing MOB.
745 * Evicts the Guest Backed hardware resource if the backup
746 * buffer is being moved out of MOB memory.
747 * Note that this function will not race with the resource
748 * validation code, since resource validation and eviction
749 * both require the backup buffer to be reserved.
751 void vmw_resource_unbind_list(struct vmw_bo
*vbo
)
753 struct ttm_validate_buffer val_buf
= {
758 dma_resv_assert_held(vbo
->tbo
.base
.resv
);
759 while (!RB_EMPTY_ROOT(&vbo
->res_tree
)) {
760 struct rb_node
*node
= vbo
->res_tree
.rb_node
;
761 struct vmw_resource
*res
=
762 container_of(node
, struct vmw_resource
, mob_node
);
764 if (!WARN_ON_ONCE(!res
->func
->unbind
))
765 (void) res
->func
->unbind(res
, res
->res_dirty
, &val_buf
);
767 res
->guest_memory_size
= true;
768 res
->res_dirty
= false;
769 vmw_resource_mob_detach(res
);
772 (void) ttm_bo_wait(&vbo
->tbo
, false, false);
777 * vmw_query_readback_all - Read back cached query states
779 * @dx_query_mob: Buffer containing the DX query MOB
781 * Read back cached states from the device if they exist. This function
782 * assumes binding_mutex is held.
784 int vmw_query_readback_all(struct vmw_bo
*dx_query_mob
)
786 struct vmw_resource
*dx_query_ctx
;
787 struct vmw_private
*dev_priv
;
789 SVGA3dCmdHeader header
;
790 SVGA3dCmdDXReadbackAllQuery body
;
794 /* No query bound, so do nothing */
795 if (!dx_query_mob
|| !dx_query_mob
->dx_query_ctx
)
798 dx_query_ctx
= dx_query_mob
->dx_query_ctx
;
799 dev_priv
= dx_query_ctx
->dev_priv
;
801 cmd
= VMW_CMD_CTX_RESERVE(dev_priv
, sizeof(*cmd
), dx_query_ctx
->id
);
802 if (unlikely(cmd
== NULL
))
805 cmd
->header
.id
= SVGA_3D_CMD_DX_READBACK_ALL_QUERY
;
806 cmd
->header
.size
= sizeof(cmd
->body
);
807 cmd
->body
.cid
= dx_query_ctx
->id
;
809 vmw_cmd_commit(dev_priv
, sizeof(*cmd
));
811 /* Triggers a rebind the next time affected context is bound */
812 dx_query_mob
->dx_query_ctx
= NULL
;
820 * vmw_query_move_notify - Read back cached query states
822 * @bo: The TTM buffer object about to move.
823 * @old_mem: The memory region @bo is moving from.
824 * @new_mem: The memory region @bo is moving to.
826 * Called before the query MOB is swapped out to read back cached query
827 * states from the device.
829 void vmw_query_move_notify(struct ttm_buffer_object
*bo
,
830 struct ttm_resource
*old_mem
,
831 struct ttm_resource
*new_mem
)
833 struct vmw_bo
*dx_query_mob
;
834 struct ttm_device
*bdev
= bo
->bdev
;
835 struct vmw_private
*dev_priv
= vmw_priv_from_ttm(bdev
);
837 mutex_lock(&dev_priv
->binding_mutex
);
839 /* If BO is being moved from MOB to system memory */
841 new_mem
->mem_type
== TTM_PL_SYSTEM
&&
842 old_mem
->mem_type
== VMW_PL_MOB
) {
843 struct vmw_fence_obj
*fence
;
845 dx_query_mob
= to_vmw_bo(&bo
->base
);
846 if (!dx_query_mob
|| !dx_query_mob
->dx_query_ctx
) {
847 mutex_unlock(&dev_priv
->binding_mutex
);
851 (void) vmw_query_readback_all(dx_query_mob
);
852 mutex_unlock(&dev_priv
->binding_mutex
);
854 /* Create a fence and attach the BO to it */
855 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
856 vmw_bo_fence_single(bo
, fence
);
859 vmw_fence_obj_unreference(&fence
);
861 (void) ttm_bo_wait(bo
, false, false);
863 mutex_unlock(&dev_priv
->binding_mutex
);
867 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
869 * @res: The resource being queried.
871 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
873 return res
->func
->needs_guest_memory
;
877 * vmw_resource_evict_type - Evict all resources of a specific type
879 * @dev_priv: Pointer to a device private struct
880 * @type: The resource type to evict
882 * To avoid thrashing starvation or as part of the hibernation sequence,
883 * try to evict all evictable resources of a specific type.
885 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
886 enum vmw_res_type type
)
888 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
889 struct vmw_resource
*evict_res
;
890 unsigned err_count
= 0;
892 struct ww_acquire_ctx ticket
;
895 spin_lock(&dev_priv
->resource_lock
);
897 if (list_empty(lru_list
))
900 evict_res
= vmw_resource_reference(
901 list_first_entry(lru_list
, struct vmw_resource
,
903 list_del_init(&evict_res
->lru_head
);
904 spin_unlock(&dev_priv
->resource_lock
);
906 /* Wait lock backup buffers with a ticket. */
907 ret
= vmw_resource_do_evict(&ticket
, evict_res
, false);
908 if (unlikely(ret
!= 0)) {
909 spin_lock(&dev_priv
->resource_lock
);
910 list_add_tail(&evict_res
->lru_head
, lru_list
);
911 spin_unlock(&dev_priv
->resource_lock
);
912 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
913 vmw_resource_unreference(&evict_res
);
918 vmw_resource_unreference(&evict_res
);
922 spin_unlock(&dev_priv
->resource_lock
);
926 * vmw_resource_evict_all - Evict all evictable resources
928 * @dev_priv: Pointer to a device private struct
930 * To avoid thrashing starvation or as part of the hibernation sequence,
931 * evict all evictable resources. In particular this means that all
932 * guest-backed resources that are registered with the device are
933 * evicted and the OTable becomes clean.
935 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
937 enum vmw_res_type type
;
939 mutex_lock(&dev_priv
->cmdbuf_mutex
);
941 for (type
= 0; type
< vmw_res_max
; ++type
)
942 vmw_resource_evict_type(dev_priv
, type
);
944 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
948 * vmw_resource_pin - Add a pin reference on a resource
950 * @res: The resource to add a pin reference on
952 * This function adds a pin reference, and if needed validates the resource.
953 * Having a pin reference means that the resource can never be evicted, and
954 * its id will never change as long as there is a pin reference.
955 * This function returns 0 on success and a negative error code on failure.
957 int vmw_resource_pin(struct vmw_resource
*res
, bool interruptible
)
959 struct ttm_operation_ctx ctx
= { interruptible
, false };
960 struct vmw_private
*dev_priv
= res
->dev_priv
;
963 mutex_lock(&dev_priv
->cmdbuf_mutex
);
964 ret
= vmw_resource_reserve(res
, interruptible
, false);
968 if (res
->pin_count
== 0) {
969 struct vmw_bo
*vbo
= NULL
;
971 if (res
->guest_memory_bo
) {
972 vbo
= res
->guest_memory_bo
;
974 ret
= ttm_bo_reserve(&vbo
->tbo
, interruptible
, false, NULL
);
976 goto out_no_validate
;
977 if (!vbo
->tbo
.pin_count
) {
978 vmw_bo_placement_set(vbo
,
980 res
->func
->busy_domain
);
981 ret
= ttm_bo_validate
986 ttm_bo_unreserve(&vbo
->tbo
);
987 goto out_no_validate
;
991 /* Do we really need to pin the MOB as well? */
992 vmw_bo_pin_reserved(vbo
, true);
994 ret
= vmw_resource_validate(res
, interruptible
, true);
996 ttm_bo_unreserve(&vbo
->tbo
);
998 goto out_no_validate
;
1003 vmw_resource_unreserve(res
, false, false, false, NULL
, 0UL);
1005 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1011 * vmw_resource_unpin - Remove a pin reference from a resource
1013 * @res: The resource to remove a pin reference from
1015 * Having a pin reference means that the resource can never be evicted, and
1016 * its id will never change as long as there is a pin reference.
1018 void vmw_resource_unpin(struct vmw_resource
*res
)
1020 struct vmw_private
*dev_priv
= res
->dev_priv
;
1023 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1025 ret
= vmw_resource_reserve(res
, false, true);
1028 WARN_ON(res
->pin_count
== 0);
1029 if (--res
->pin_count
== 0 && res
->guest_memory_bo
) {
1030 struct vmw_bo
*vbo
= res
->guest_memory_bo
;
1032 (void) ttm_bo_reserve(&vbo
->tbo
, false, false, NULL
);
1033 vmw_bo_pin_reserved(vbo
, false);
1034 ttm_bo_unreserve(&vbo
->tbo
);
1037 vmw_resource_unreserve(res
, false, false, false, NULL
, 0UL);
1039 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1043 * vmw_res_type - Return the resource type
1045 * @res: Pointer to the resource
1047 enum vmw_res_type
vmw_res_type(const struct vmw_resource
*res
)
1049 return res
->func
->res_type
;
1053 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1054 * sequential range of touched backing store memory.
1055 * @res: The resource.
1056 * @start: The first page touched.
1057 * @end: The last page touched + 1.
1059 void vmw_resource_dirty_update(struct vmw_resource
*res
, pgoff_t start
,
1063 res
->func
->dirty_range_add(res
, start
<< PAGE_SHIFT
,
1068 * vmw_resources_clean - Clean resources intersecting a mob range
1069 * @vbo: The mob buffer object
1070 * @start: The mob page offset starting the range
1071 * @end: The mob page offset ending the range
1072 * @num_prefault: Returns how many pages including the first have been
1073 * cleaned and are ok to prefault
1075 int vmw_resources_clean(struct vmw_bo
*vbo
, pgoff_t start
,
1076 pgoff_t end
, pgoff_t
*num_prefault
)
1078 struct rb_node
*cur
= vbo
->res_tree
.rb_node
;
1079 struct vmw_resource
*found
= NULL
;
1080 unsigned long res_start
= start
<< PAGE_SHIFT
;
1081 unsigned long res_end
= end
<< PAGE_SHIFT
;
1082 unsigned long last_cleaned
= 0;
1085 * Find the resource with lowest backup_offset that intersects the
1089 struct vmw_resource
*cur_res
=
1090 container_of(cur
, struct vmw_resource
, mob_node
);
1092 if (cur_res
->guest_memory_offset
>= res_end
) {
1094 } else if (cur_res
->guest_memory_offset
+ cur_res
->guest_memory_size
<=
1096 cur
= cur
->rb_right
;
1100 /* Continue to look for resources with lower offsets */
1105 * In order of increasing guest_memory_offset, clean dirty resources
1106 * intersecting the range.
1109 if (found
->res_dirty
) {
1112 if (!found
->func
->clean
)
1115 ret
= found
->func
->clean(found
);
1119 found
->res_dirty
= false;
1121 last_cleaned
= found
->guest_memory_offset
+ found
->guest_memory_size
;
1122 cur
= rb_next(&found
->mob_node
);
1126 found
= container_of(cur
, struct vmw_resource
, mob_node
);
1127 if (found
->guest_memory_offset
>= res_end
)
1132 * Set number of pages allowed prefaulting and fence the buffer object
1135 if (last_cleaned
> res_start
) {
1136 struct ttm_buffer_object
*bo
= &vbo
->tbo
;
1138 *num_prefault
= __KERNEL_DIV_ROUND_UP(last_cleaned
- res_start
,
1140 vmw_bo_fence_single(bo
, NULL
);