1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * struct vmw_relocation - Buffer object relocation
41 * @head: List head for the command submission context's relocation list
42 * @vbo: Non ref-counted pointer to buffer object
43 * @mob_loc: Pointer to location for mob id to be modified
44 * @location: Pointer to location for guest pointer to be modified
46 struct vmw_relocation
{
47 struct list_head head
;
48 struct vmw_buffer_object
*vbo
;
51 SVGAGuestPtr
*location
;
56 * enum vmw_resource_relocation_type - Relocation type for resources
58 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
59 * command stream is replaced with the actual id after validation.
60 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
62 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
63 * after validation is -1, the command is replaced with a NOP. Otherwise no
66 enum vmw_resource_relocation_type
{
74 * struct vmw_resource_relocation - Relocation info for resources
76 * @head: List head for the software context's relocation list.
77 * @res: Non-ref-counted pointer to the resource.
78 * @offset: Offset of single byte entries into the command buffer where the
79 * id that needs fixup is located.
80 * @rel_type: Type of relocation.
82 struct vmw_resource_relocation
{
83 struct list_head head
;
84 const struct vmw_resource
*res
;
86 enum vmw_resource_relocation_type rel_type
:3;
90 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
91 * @head: List head of context list
92 * @ctx: The context resource
93 * @cur: The context's persistent binding state
94 * @staged: The binding state changes of this command buffer
96 struct vmw_ctx_validation_info
{
97 struct list_head head
;
98 struct vmw_resource
*ctx
;
99 struct vmw_ctx_binding_state
*cur
;
100 struct vmw_ctx_binding_state
*staged
;
104 * struct vmw_cmd_entry - Describe a command for the verifier
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
110 struct vmw_cmd_entry
{
111 int (*func
) (struct vmw_private
*, struct vmw_sw_context
*,
116 const char *cmd_name
;
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121 (_gb_disable), (_gb_enable), #_cmd}
123 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
124 struct vmw_sw_context
*sw_context
,
125 struct vmw_resource
*ctx
);
126 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
127 struct vmw_sw_context
*sw_context
,
129 struct vmw_buffer_object
**vmw_bo_p
);
131 * vmw_ptr_diff - Compute the offset from a to b in bytes
133 * @a: A starting pointer.
134 * @b: A pointer offset in the same address space.
136 * Returns: The offset in bytes between the two pointers.
138 static size_t vmw_ptr_diff(void *a
, void *b
)
140 return (unsigned long) b
- (unsigned long) a
;
144 * vmw_execbuf_bindings_commit - Commit modified binding state
145 * @sw_context: The command submission context
146 * @backoff: Whether this is part of the error path and binding state
147 * changes should be ignored
149 static void vmw_execbuf_bindings_commit(struct vmw_sw_context
*sw_context
,
152 struct vmw_ctx_validation_info
*entry
;
154 list_for_each_entry(entry
, &sw_context
->ctx_list
, head
) {
156 vmw_binding_state_commit(entry
->cur
, entry
->staged
);
157 if (entry
->staged
!= sw_context
->staged_bindings
)
158 vmw_binding_state_free(entry
->staged
);
160 sw_context
->staged_bindings_inuse
= false;
163 /* List entries are freed with the validation context */
164 INIT_LIST_HEAD(&sw_context
->ctx_list
);
168 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
169 * @sw_context: The command submission context
171 static void vmw_bind_dx_query_mob(struct vmw_sw_context
*sw_context
)
173 if (sw_context
->dx_query_mob
)
174 vmw_context_bind_dx_query(sw_context
->dx_query_ctx
,
175 sw_context
->dx_query_mob
);
179 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
180 * added to the validate list.
182 * @dev_priv: Pointer to the device private:
183 * @sw_context: The command submission context
184 * @node: The validation node holding the context resource metadata
186 static int vmw_cmd_ctx_first_setup(struct vmw_private
*dev_priv
,
187 struct vmw_sw_context
*sw_context
,
188 struct vmw_resource
*res
,
189 struct vmw_ctx_validation_info
*node
)
193 ret
= vmw_resource_context_res_add(dev_priv
, sw_context
, res
);
194 if (unlikely(ret
!= 0))
197 if (!sw_context
->staged_bindings
) {
198 sw_context
->staged_bindings
=
199 vmw_binding_state_alloc(dev_priv
);
200 if (IS_ERR(sw_context
->staged_bindings
)) {
201 DRM_ERROR("Failed to allocate context binding "
203 ret
= PTR_ERR(sw_context
->staged_bindings
);
204 sw_context
->staged_bindings
= NULL
;
209 if (sw_context
->staged_bindings_inuse
) {
210 node
->staged
= vmw_binding_state_alloc(dev_priv
);
211 if (IS_ERR(node
->staged
)) {
212 DRM_ERROR("Failed to allocate context binding "
214 ret
= PTR_ERR(node
->staged
);
219 node
->staged
= sw_context
->staged_bindings
;
220 sw_context
->staged_bindings_inuse
= true;
224 node
->cur
= vmw_context_binding_state(res
);
225 list_add_tail(&node
->head
, &sw_context
->ctx_list
);
233 * vmw_execbuf_res_size - calculate extra size fore the resource validation
235 * @dev_priv: Pointer to the device private struct.
236 * @res_type: The resource type.
238 * Guest-backed contexts and DX contexts require extra size to store
239 * execbuf private information in the validation node. Typically the
240 * binding manager associated data structures.
242 * Returns: The extra size requirement based on resource type.
244 static unsigned int vmw_execbuf_res_size(struct vmw_private
*dev_priv
,
245 enum vmw_res_type res_type
)
247 return (res_type
== vmw_res_dx_context
||
248 (res_type
== vmw_res_context
&& dev_priv
->has_mob
)) ?
249 sizeof(struct vmw_ctx_validation_info
) : 0;
253 * vmw_execbuf_rcache_update - Update a resource-node cache entry
255 * @rcache: Pointer to the entry to update.
256 * @res: Pointer to the resource.
257 * @private: Pointer to the execbuf-private space in the resource
260 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry
*rcache
,
261 struct vmw_resource
*res
,
265 rcache
->private = private;
267 rcache
->valid_handle
= 0;
271 * vmw_execbuf_res_noref_val_add - Add a resource described by an
272 * unreferenced rcu-protected pointer to the validation list.
273 * @sw_context: Pointer to the software context.
274 * @res: Unreferenced rcu-protected pointer to the resource.
276 * Returns: 0 on success. Negative error code on failure. Typical error
277 * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
280 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context
*sw_context
,
281 struct vmw_resource
*res
)
283 struct vmw_private
*dev_priv
= res
->dev_priv
;
285 enum vmw_res_type res_type
= vmw_res_type(res
);
286 struct vmw_res_cache_entry
*rcache
;
287 struct vmw_ctx_validation_info
*ctx_info
;
289 unsigned int priv_size
;
291 rcache
= &sw_context
->res_cache
[res_type
];
292 if (likely(rcache
->valid
&& rcache
->res
== res
)) {
293 vmw_user_resource_noref_release();
297 priv_size
= vmw_execbuf_res_size(dev_priv
, res_type
);
298 ret
= vmw_validation_add_resource(sw_context
->ctx
, res
, priv_size
,
299 (void **)&ctx_info
, &first_usage
);
300 vmw_user_resource_noref_release();
304 if (priv_size
&& first_usage
) {
305 ret
= vmw_cmd_ctx_first_setup(dev_priv
, sw_context
, res
,
311 vmw_execbuf_rcache_update(rcache
, res
, ctx_info
);
316 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
317 * validation list if it's not already on it
318 * @sw_context: Pointer to the software context.
319 * @res: Pointer to the resource.
321 * Returns: Zero on success. Negative error code on failure.
323 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context
*sw_context
,
324 struct vmw_resource
*res
)
326 struct vmw_res_cache_entry
*rcache
;
327 enum vmw_res_type res_type
= vmw_res_type(res
);
331 rcache
= &sw_context
->res_cache
[res_type
];
332 if (likely(rcache
->valid
&& rcache
->res
== res
))
335 ret
= vmw_validation_add_resource(sw_context
->ctx
, res
, 0, &ptr
, NULL
);
339 vmw_execbuf_rcache_update(rcache
, res
, ptr
);
345 * vmw_view_res_val_add - Add a view and the surface it's pointing to
346 * to the validation list
348 * @sw_context: The software context holding the validation list.
349 * @view: Pointer to the view resource.
351 * Returns 0 if success, negative error code otherwise.
353 static int vmw_view_res_val_add(struct vmw_sw_context
*sw_context
,
354 struct vmw_resource
*view
)
359 * First add the resource the view is pointing to, otherwise
360 * it may be swapped out when the view is validated.
362 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, vmw_view_srf(view
));
366 return vmw_execbuf_res_noctx_val_add(sw_context
, view
);
370 * vmw_view_id_val_add - Look up a view and add it and the surface it's
371 * pointing to to the validation list.
373 * @sw_context: The software context holding the validation list.
374 * @view_type: The view type to look up.
375 * @id: view id of the view.
377 * The view is represented by a view id and the DX context it's created on,
378 * or scheduled for creation on. If there is no DX context set, the function
379 * will return an -EINVAL error pointer.
381 * Returns: Unreferenced pointer to the resource on success, negative error
382 * pointer on failure.
384 static struct vmw_resource
*
385 vmw_view_id_val_add(struct vmw_sw_context
*sw_context
,
386 enum vmw_view_type view_type
, u32 id
)
388 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
389 struct vmw_resource
*view
;
393 DRM_ERROR("DX Context not set.\n");
394 return ERR_PTR(-EINVAL
);
397 view
= vmw_view_lookup(sw_context
->man
, view_type
, id
);
401 ret
= vmw_view_res_val_add(sw_context
, view
);
409 * vmw_resource_context_res_add - Put resources previously bound to a context on
410 * the validation list
412 * @dev_priv: Pointer to a device private structure
413 * @sw_context: Pointer to a software context used for this command submission
414 * @ctx: Pointer to the context resource
416 * This function puts all resources that were previously bound to @ctx on
417 * the resource validation list. This is part of the context state reemission
419 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
420 struct vmw_sw_context
*sw_context
,
421 struct vmw_resource
*ctx
)
423 struct list_head
*binding_list
;
424 struct vmw_ctx_bindinfo
*entry
;
426 struct vmw_resource
*res
;
429 /* Add all cotables to the validation list. */
430 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
431 for (i
= 0; i
< SVGA_COTABLE_DX10_MAX
; ++i
) {
432 res
= vmw_context_cotable(ctx
, i
);
436 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
);
437 if (unlikely(ret
!= 0))
443 /* Add all resources bound to the context to the validation list */
444 mutex_lock(&dev_priv
->binding_mutex
);
445 binding_list
= vmw_context_binding_list(ctx
);
447 list_for_each_entry(entry
, binding_list
, ctx_list
) {
448 if (vmw_res_type(entry
->res
) == vmw_res_view
)
449 ret
= vmw_view_res_val_add(sw_context
, entry
->res
);
451 ret
= vmw_execbuf_res_noctx_val_add(sw_context
,
453 if (unlikely(ret
!= 0))
457 if (dev_priv
->has_dx
&& vmw_res_type(ctx
) == vmw_res_dx_context
) {
458 struct vmw_buffer_object
*dx_query_mob
;
460 dx_query_mob
= vmw_context_get_dx_query_mob(ctx
);
462 ret
= vmw_validation_add_bo(sw_context
->ctx
,
463 dx_query_mob
, true, false);
466 mutex_unlock(&dev_priv
->binding_mutex
);
471 * vmw_resource_relocation_add - Add a relocation to the relocation list
473 * @list: Pointer to head of relocation list.
474 * @res: The resource.
475 * @offset: Offset into the command buffer currently being parsed where the
476 * id that needs fixup is located. Granularity is one byte.
477 * @rel_type: Relocation type.
479 static int vmw_resource_relocation_add(struct vmw_sw_context
*sw_context
,
480 const struct vmw_resource
*res
,
481 unsigned long offset
,
482 enum vmw_resource_relocation_type
485 struct vmw_resource_relocation
*rel
;
487 rel
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*rel
));
488 if (unlikely(!rel
)) {
489 DRM_ERROR("Failed to allocate a resource relocation.\n");
494 rel
->offset
= offset
;
495 rel
->rel_type
= rel_type
;
496 list_add_tail(&rel
->head
, &sw_context
->res_relocations
);
502 * vmw_resource_relocations_free - Free all relocations on a list
504 * @list: Pointer to the head of the relocation list
506 static void vmw_resource_relocations_free(struct list_head
*list
)
508 /* Memory is validation context memory, so no need to free it */
510 INIT_LIST_HEAD(list
);
514 * vmw_resource_relocations_apply - Apply all relocations on a list
516 * @cb: Pointer to the start of the command buffer bein patch. This need
517 * not be the same buffer as the one being parsed when the relocation
518 * list was built, but the contents must be the same modulo the
520 * @list: Pointer to the head of the relocation list.
522 static void vmw_resource_relocations_apply(uint32_t *cb
,
523 struct list_head
*list
)
525 struct vmw_resource_relocation
*rel
;
527 /* Validate the struct vmw_resource_relocation member size */
528 BUILD_BUG_ON(SVGA_CB_MAX_SIZE
>= (1 << 29));
529 BUILD_BUG_ON(vmw_res_rel_max
>= (1 << 3));
531 list_for_each_entry(rel
, list
, head
) {
532 u32
*addr
= (u32
*)((unsigned long) cb
+ rel
->offset
);
533 switch (rel
->rel_type
) {
534 case vmw_res_rel_normal
:
535 *addr
= rel
->res
->id
;
537 case vmw_res_rel_nop
:
538 *addr
= SVGA_3D_CMD_NOP
;
541 if (rel
->res
->id
== -1)
542 *addr
= SVGA_3D_CMD_NOP
;
548 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
549 struct vmw_sw_context
*sw_context
,
550 SVGA3dCmdHeader
*header
)
555 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
556 struct vmw_sw_context
*sw_context
,
557 SVGA3dCmdHeader
*header
)
563 * vmw_resources_reserve - Reserve all resources on the sw_context's
566 * @sw_context: Pointer to the software context.
568 * Note that since vmware's command submission currently is protected by
569 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
570 * since only a single thread at once will attempt this.
572 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
576 ret
= vmw_validation_res_reserve(sw_context
->ctx
, true);
580 if (sw_context
->dx_query_mob
) {
581 struct vmw_buffer_object
*expected_dx_query_mob
;
583 expected_dx_query_mob
=
584 vmw_context_get_dx_query_mob(sw_context
->dx_query_ctx
);
585 if (expected_dx_query_mob
&&
586 expected_dx_query_mob
!= sw_context
->dx_query_mob
) {
595 * vmw_cmd_res_check - Check that a resource is present and if so, put it
596 * on the resource validate list unless it's already there.
598 * @dev_priv: Pointer to a device private structure.
599 * @sw_context: Pointer to the software context.
600 * @res_type: Resource type.
601 * @converter: User-space visisble type specific information.
602 * @id_loc: Pointer to the location in the command buffer currently being
603 * parsed from where the user-space resource id handle is located.
604 * @p_val: Pointer to pointer to resource validalidation node. Populated
608 vmw_cmd_res_check(struct vmw_private
*dev_priv
,
609 struct vmw_sw_context
*sw_context
,
610 enum vmw_res_type res_type
,
611 const struct vmw_user_resource_conv
*converter
,
613 struct vmw_resource
**p_res
)
615 struct vmw_res_cache_entry
*rcache
= &sw_context
->res_cache
[res_type
];
616 struct vmw_resource
*res
;
622 if (*id_loc
== SVGA3D_INVALID_ID
) {
623 if (res_type
== vmw_res_context
) {
624 DRM_ERROR("Illegal context invalid id.\n");
630 if (likely(rcache
->valid_handle
&& *id_loc
== rcache
->handle
)) {
633 unsigned int size
= vmw_execbuf_res_size(dev_priv
, res_type
);
635 ret
= vmw_validation_preload_res(sw_context
->ctx
, size
);
639 res
= vmw_user_resource_noref_lookup_handle
640 (dev_priv
, sw_context
->fp
->tfile
, *id_loc
, converter
);
641 if (unlikely(IS_ERR(res
))) {
642 DRM_ERROR("Could not find or use resource 0x%08x.\n",
643 (unsigned int) *id_loc
);
647 ret
= vmw_execbuf_res_noref_val_add(sw_context
, res
);
648 if (unlikely(ret
!= 0))
651 if (rcache
->valid
&& rcache
->res
== res
) {
652 rcache
->valid_handle
= true;
653 rcache
->handle
= *id_loc
;
657 ret
= vmw_resource_relocation_add(sw_context
, res
,
658 vmw_ptr_diff(sw_context
->buf_start
,
668 * vmw_rebind_dx_query - Rebind DX query associated with the context
670 * @ctx_res: context the query belongs to
672 * This function assumes binding_mutex is held.
674 static int vmw_rebind_all_dx_query(struct vmw_resource
*ctx_res
)
676 struct vmw_private
*dev_priv
= ctx_res
->dev_priv
;
677 struct vmw_buffer_object
*dx_query_mob
;
679 SVGA3dCmdHeader header
;
680 SVGA3dCmdDXBindAllQuery body
;
684 dx_query_mob
= vmw_context_get_dx_query_mob(ctx_res
);
686 if (!dx_query_mob
|| dx_query_mob
->dx_query_ctx
)
689 cmd
= vmw_fifo_reserve_dx(dev_priv
, sizeof(*cmd
), ctx_res
->id
);
692 DRM_ERROR("Failed to rebind queries.\n");
696 cmd
->header
.id
= SVGA_3D_CMD_DX_BIND_ALL_QUERY
;
697 cmd
->header
.size
= sizeof(cmd
->body
);
698 cmd
->body
.cid
= ctx_res
->id
;
699 cmd
->body
.mobid
= dx_query_mob
->base
.mem
.start
;
700 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
702 vmw_context_bind_dx_query(ctx_res
, dx_query_mob
);
708 * vmw_rebind_contexts - Rebind all resources previously bound to
709 * referenced contexts.
711 * @sw_context: Pointer to the software context.
713 * Rebind context binding points that have been scrubbed because of eviction.
715 static int vmw_rebind_contexts(struct vmw_sw_context
*sw_context
)
717 struct vmw_ctx_validation_info
*val
;
720 list_for_each_entry(val
, &sw_context
->ctx_list
, head
) {
721 ret
= vmw_binding_rebind_all(val
->cur
);
722 if (unlikely(ret
!= 0)) {
723 if (ret
!= -ERESTARTSYS
)
724 DRM_ERROR("Failed to rebind context.\n");
728 ret
= vmw_rebind_all_dx_query(val
->ctx
);
737 * vmw_view_bindings_add - Add an array of view bindings to a context
738 * binding state tracker.
740 * @sw_context: The execbuf state used for this command.
741 * @view_type: View type for the bindings.
742 * @binding_type: Binding type for the bindings.
743 * @shader_slot: The shader slot to user for the bindings.
744 * @view_ids: Array of view ids to be bound.
745 * @num_views: Number of view ids in @view_ids.
746 * @first_slot: The binding slot to be used for the first view id in @view_ids.
748 static int vmw_view_bindings_add(struct vmw_sw_context
*sw_context
,
749 enum vmw_view_type view_type
,
750 enum vmw_ctx_binding_type binding_type
,
752 uint32 view_ids
[], u32 num_views
,
755 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
759 DRM_ERROR("DX Context not set.\n");
763 for (i
= 0; i
< num_views
; ++i
) {
764 struct vmw_ctx_bindinfo_view binding
;
765 struct vmw_resource
*view
= NULL
;
767 if (view_ids
[i
] != SVGA3D_INVALID_ID
) {
768 view
= vmw_view_id_val_add(sw_context
, view_type
,
771 DRM_ERROR("View not found.\n");
772 return PTR_ERR(view
);
775 binding
.bi
.ctx
= ctx_node
->ctx
;
776 binding
.bi
.res
= view
;
777 binding
.bi
.bt
= binding_type
;
778 binding
.shader_slot
= shader_slot
;
779 binding
.slot
= first_slot
+ i
;
780 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
781 shader_slot
, binding
.slot
);
788 * vmw_cmd_cid_check - Check a command header for valid context information.
790 * @dev_priv: Pointer to a device private structure.
791 * @sw_context: Pointer to the software context.
792 * @header: A command header with an embedded user-space context handle.
794 * Convenience function: Call vmw_cmd_res_check with the user-space context
795 * handle embedded in @header.
797 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
798 struct vmw_sw_context
*sw_context
,
799 SVGA3dCmdHeader
*header
)
802 SVGA3dCmdHeader header
;
806 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
807 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
808 user_context_converter
, &cmd
->cid
, NULL
);
812 * vmw_execbuf_info_from_res - Get the private validation metadata for a
813 * recently validated resource
814 * @sw_context: Pointer to the command submission context
817 * The resource pointed to by @res needs to be present in the command submission
818 * context's resource cache and hence the last resource of that type to be
819 * processed by the validation code.
821 * Return: a pointer to the private metadata of the resource, or NULL
824 static struct vmw_ctx_validation_info
*
825 vmw_execbuf_info_from_res(struct vmw_sw_context
*sw_context
,
826 struct vmw_resource
*res
)
828 struct vmw_res_cache_entry
*rcache
=
829 &sw_context
->res_cache
[vmw_res_type(res
)];
831 if (rcache
->valid
&& rcache
->res
== res
)
832 return rcache
->private;
839 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
840 struct vmw_sw_context
*sw_context
,
841 SVGA3dCmdHeader
*header
)
844 SVGA3dCmdHeader header
;
845 SVGA3dCmdSetRenderTarget body
;
847 struct vmw_resource
*ctx
;
848 struct vmw_resource
*res
;
851 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
853 if (cmd
->body
.type
>= SVGA3D_RT_MAX
) {
854 DRM_ERROR("Illegal render target type %u.\n",
855 (unsigned) cmd
->body
.type
);
859 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
860 user_context_converter
, &cmd
->body
.cid
,
862 if (unlikely(ret
!= 0))
865 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
866 user_surface_converter
, &cmd
->body
.target
.sid
,
871 if (dev_priv
->has_mob
) {
872 struct vmw_ctx_bindinfo_view binding
;
873 struct vmw_ctx_validation_info
*node
;
875 node
= vmw_execbuf_info_from_res(sw_context
, ctx
);
879 binding
.bi
.ctx
= ctx
;
880 binding
.bi
.res
= res
;
881 binding
.bi
.bt
= vmw_ctx_binding_rt
;
882 binding
.slot
= cmd
->body
.type
;
883 vmw_binding_add(node
->staged
, &binding
.bi
, 0, binding
.slot
);
889 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
890 struct vmw_sw_context
*sw_context
,
891 SVGA3dCmdHeader
*header
)
894 SVGA3dCmdHeader header
;
895 SVGA3dCmdSurfaceCopy body
;
899 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
901 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
902 user_surface_converter
,
903 &cmd
->body
.src
.sid
, NULL
);
907 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
908 user_surface_converter
,
909 &cmd
->body
.dest
.sid
, NULL
);
912 static int vmw_cmd_buffer_copy_check(struct vmw_private
*dev_priv
,
913 struct vmw_sw_context
*sw_context
,
914 SVGA3dCmdHeader
*header
)
917 SVGA3dCmdHeader header
;
918 SVGA3dCmdDXBufferCopy body
;
922 cmd
= container_of(header
, typeof(*cmd
), header
);
923 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
924 user_surface_converter
,
925 &cmd
->body
.src
, NULL
);
929 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
930 user_surface_converter
,
931 &cmd
->body
.dest
, NULL
);
934 static int vmw_cmd_pred_copy_check(struct vmw_private
*dev_priv
,
935 struct vmw_sw_context
*sw_context
,
936 SVGA3dCmdHeader
*header
)
939 SVGA3dCmdHeader header
;
940 SVGA3dCmdDXPredCopyRegion body
;
944 cmd
= container_of(header
, typeof(*cmd
), header
);
945 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
946 user_surface_converter
,
947 &cmd
->body
.srcSid
, NULL
);
951 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
952 user_surface_converter
,
953 &cmd
->body
.dstSid
, NULL
);
956 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
957 struct vmw_sw_context
*sw_context
,
958 SVGA3dCmdHeader
*header
)
961 SVGA3dCmdHeader header
;
962 SVGA3dCmdSurfaceStretchBlt body
;
966 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
967 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
968 user_surface_converter
,
969 &cmd
->body
.src
.sid
, NULL
);
970 if (unlikely(ret
!= 0))
972 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
973 user_surface_converter
,
974 &cmd
->body
.dest
.sid
, NULL
);
977 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
978 struct vmw_sw_context
*sw_context
,
979 SVGA3dCmdHeader
*header
)
982 SVGA3dCmdHeader header
;
983 SVGA3dCmdBlitSurfaceToScreen body
;
986 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
988 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
989 user_surface_converter
,
990 &cmd
->body
.srcImage
.sid
, NULL
);
993 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
994 struct vmw_sw_context
*sw_context
,
995 SVGA3dCmdHeader
*header
)
998 SVGA3dCmdHeader header
;
999 SVGA3dCmdPresent body
;
1003 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
1005 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1006 user_surface_converter
, &cmd
->body
.sid
,
1011 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1013 * @dev_priv: The device private structure.
1014 * @new_query_bo: The new buffer holding query results.
1015 * @sw_context: The software context used for this command submission.
1017 * This function checks whether @new_query_bo is suitable for holding
1018 * query results, and if another buffer currently is pinned for query
1019 * results. If so, the function prepares the state of @sw_context for
1020 * switching pinned buffers after successful submission of the current
1023 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
1024 struct vmw_buffer_object
*new_query_bo
,
1025 struct vmw_sw_context
*sw_context
)
1027 struct vmw_res_cache_entry
*ctx_entry
=
1028 &sw_context
->res_cache
[vmw_res_context
];
1031 BUG_ON(!ctx_entry
->valid
);
1032 sw_context
->last_query_ctx
= ctx_entry
->res
;
1034 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
1036 if (unlikely(new_query_bo
->base
.num_pages
> 4)) {
1037 DRM_ERROR("Query buffer too large.\n");
1041 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
1042 sw_context
->needs_post_query_barrier
= true;
1043 ret
= vmw_validation_add_bo(sw_context
->ctx
,
1044 sw_context
->cur_query_bo
,
1045 dev_priv
->has_mob
, false);
1046 if (unlikely(ret
!= 0))
1049 sw_context
->cur_query_bo
= new_query_bo
;
1051 ret
= vmw_validation_add_bo(sw_context
->ctx
,
1052 dev_priv
->dummy_query_bo
,
1053 dev_priv
->has_mob
, false);
1054 if (unlikely(ret
!= 0))
1064 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1066 * @dev_priv: The device private structure.
1067 * @sw_context: The software context used for this command submission batch.
1069 * This function will check if we're switching query buffers, and will then,
1070 * issue a dummy occlusion query wait used as a query barrier. When the fence
1071 * object following that query wait has signaled, we are sure that all
1072 * preceding queries have finished, and the old query buffer can be unpinned.
1073 * However, since both the new query buffer and the old one are fenced with
1074 * that fence, we can do an asynchronus unpin now, and be sure that the
1075 * old query buffer won't be moved until the fence has signaled.
1077 * As mentioned above, both the new - and old query buffers need to be fenced
1078 * using a sequence emitted *after* calling this function.
1080 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
1081 struct vmw_sw_context
*sw_context
)
1084 * The validate list should still hold references to all
1088 if (sw_context
->needs_post_query_barrier
) {
1089 struct vmw_res_cache_entry
*ctx_entry
=
1090 &sw_context
->res_cache
[vmw_res_context
];
1091 struct vmw_resource
*ctx
;
1094 BUG_ON(!ctx_entry
->valid
);
1095 ctx
= ctx_entry
->res
;
1097 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
1099 if (unlikely(ret
!= 0))
1100 DRM_ERROR("Out of fifo space for dummy query.\n");
1103 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
1104 if (dev_priv
->pinned_bo
) {
1105 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
1106 vmw_bo_unreference(&dev_priv
->pinned_bo
);
1109 if (!sw_context
->needs_post_query_barrier
) {
1110 vmw_bo_pin_reserved(sw_context
->cur_query_bo
, true);
1113 * We pin also the dummy_query_bo buffer so that we
1114 * don't need to validate it when emitting
1115 * dummy queries in context destroy paths.
1118 if (!dev_priv
->dummy_query_bo_pinned
) {
1119 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
,
1121 dev_priv
->dummy_query_bo_pinned
= true;
1124 BUG_ON(sw_context
->last_query_ctx
== NULL
);
1125 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
1126 dev_priv
->query_cid_valid
= true;
1127 dev_priv
->pinned_bo
=
1128 vmw_bo_reference(sw_context
->cur_query_bo
);
1134 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1135 * handle to a MOB id.
1137 * @dev_priv: Pointer to a device private structure.
1138 * @sw_context: The software context used for this command batch validation.
1139 * @id: Pointer to the user-space handle to be translated.
1140 * @vmw_bo_p: Points to a location that, on successful return will carry
1141 * a non-reference-counted pointer to the buffer object identified by the
1142 * user-space handle in @id.
1144 * This function saves information needed to translate a user-space buffer
1145 * handle to a MOB id. The translation does not take place immediately, but
1146 * during a call to vmw_apply_relocations(). This function builds a relocation
1147 * list and a list of buffers to validate. The former needs to be freed using
1148 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1149 * needs to be freed using vmw_clear_validations.
1151 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
1152 struct vmw_sw_context
*sw_context
,
1154 struct vmw_buffer_object
**vmw_bo_p
)
1156 struct vmw_buffer_object
*vmw_bo
;
1157 uint32_t handle
= *id
;
1158 struct vmw_relocation
*reloc
;
1161 vmw_validation_preload_bo(sw_context
->ctx
);
1162 vmw_bo
= vmw_user_bo_noref_lookup(sw_context
->fp
->tfile
, handle
);
1163 if (IS_ERR(vmw_bo
)) {
1164 DRM_ERROR("Could not find or use MOB buffer.\n");
1165 return PTR_ERR(vmw_bo
);
1168 ret
= vmw_validation_add_bo(sw_context
->ctx
, vmw_bo
, true, false);
1169 vmw_user_bo_noref_release();
1170 if (unlikely(ret
!= 0))
1173 reloc
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*reloc
));
1177 reloc
->mob_loc
= id
;
1178 reloc
->vbo
= vmw_bo
;
1181 list_add_tail(&reloc
->head
, &sw_context
->bo_relocations
);
1187 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1188 * handle to a valid SVGAGuestPtr
1190 * @dev_priv: Pointer to a device private structure.
1191 * @sw_context: The software context used for this command batch validation.
1192 * @ptr: Pointer to the user-space handle to be translated.
1193 * @vmw_bo_p: Points to a location that, on successful return will carry
1194 * a non-reference-counted pointer to the DMA buffer identified by the
1195 * user-space handle in @id.
1197 * This function saves information needed to translate a user-space buffer
1198 * handle to a valid SVGAGuestPtr. The translation does not take place
1199 * immediately, but during a call to vmw_apply_relocations().
1200 * This function builds a relocation list and a list of buffers to validate.
1201 * The former needs to be freed using either vmw_apply_relocations() or
1202 * vmw_free_relocations(). The latter needs to be freed using
1203 * vmw_clear_validations.
1205 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
1206 struct vmw_sw_context
*sw_context
,
1208 struct vmw_buffer_object
**vmw_bo_p
)
1210 struct vmw_buffer_object
*vmw_bo
;
1211 uint32_t handle
= ptr
->gmrId
;
1212 struct vmw_relocation
*reloc
;
1215 vmw_validation_preload_bo(sw_context
->ctx
);
1216 vmw_bo
= vmw_user_bo_noref_lookup(sw_context
->fp
->tfile
, handle
);
1217 if (IS_ERR(vmw_bo
)) {
1218 DRM_ERROR("Could not find or use GMR region.\n");
1219 return PTR_ERR(vmw_bo
);
1222 ret
= vmw_validation_add_bo(sw_context
->ctx
, vmw_bo
, false, false);
1223 vmw_user_bo_noref_release();
1224 if (unlikely(ret
!= 0))
1227 reloc
= vmw_validation_mem_alloc(sw_context
->ctx
, sizeof(*reloc
));
1231 reloc
->location
= ptr
;
1232 reloc
->vbo
= vmw_bo
;
1234 list_add_tail(&reloc
->head
, &sw_context
->bo_relocations
);
1242 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1244 * @dev_priv: Pointer to a device private struct.
1245 * @sw_context: The software context used for this command submission.
1246 * @header: Pointer to the command header in the command stream.
1248 * This function adds the new query into the query COTABLE
1250 static int vmw_cmd_dx_define_query(struct vmw_private
*dev_priv
,
1251 struct vmw_sw_context
*sw_context
,
1252 SVGA3dCmdHeader
*header
)
1254 struct vmw_dx_define_query_cmd
{
1255 SVGA3dCmdHeader header
;
1256 SVGA3dCmdDXDefineQuery q
;
1260 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
1261 struct vmw_resource
*cotable_res
;
1264 if (ctx_node
== NULL
) {
1265 DRM_ERROR("DX Context not set for query.\n");
1269 cmd
= container_of(header
, struct vmw_dx_define_query_cmd
, header
);
1271 if (cmd
->q
.type
< SVGA3D_QUERYTYPE_MIN
||
1272 cmd
->q
.type
>= SVGA3D_QUERYTYPE_MAX
)
1275 cotable_res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_DXQUERY
);
1276 ret
= vmw_cotable_notify(cotable_res
, cmd
->q
.queryId
);
1284 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1286 * @dev_priv: Pointer to a device private struct.
1287 * @sw_context: The software context used for this command submission.
1288 * @header: Pointer to the command header in the command stream.
1290 * The query bind operation will eventually associate the query ID
1291 * with its backing MOB. In this function, we take the user mode
1292 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1293 * kernel mode equivalent.
1295 static int vmw_cmd_dx_bind_query(struct vmw_private
*dev_priv
,
1296 struct vmw_sw_context
*sw_context
,
1297 SVGA3dCmdHeader
*header
)
1299 struct vmw_dx_bind_query_cmd
{
1300 SVGA3dCmdHeader header
;
1301 SVGA3dCmdDXBindQuery q
;
1304 struct vmw_buffer_object
*vmw_bo
;
1308 cmd
= container_of(header
, struct vmw_dx_bind_query_cmd
, header
);
1311 * Look up the buffer pointed to by q.mobid, put it on the relocation
1312 * list so its kernel mode MOB ID can be filled in later
1314 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, &cmd
->q
.mobid
,
1320 sw_context
->dx_query_mob
= vmw_bo
;
1321 sw_context
->dx_query_ctx
= sw_context
->dx_ctx_node
->ctx
;
1328 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1330 * @dev_priv: Pointer to a device private struct.
1331 * @sw_context: The software context used for this command submission.
1332 * @header: Pointer to the command header in the command stream.
1334 static int vmw_cmd_begin_gb_query(struct vmw_private
*dev_priv
,
1335 struct vmw_sw_context
*sw_context
,
1336 SVGA3dCmdHeader
*header
)
1338 struct vmw_begin_gb_query_cmd
{
1339 SVGA3dCmdHeader header
;
1340 SVGA3dCmdBeginGBQuery q
;
1343 cmd
= container_of(header
, struct vmw_begin_gb_query_cmd
,
1346 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1347 user_context_converter
, &cmd
->q
.cid
,
1352 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1354 * @dev_priv: Pointer to a device private struct.
1355 * @sw_context: The software context used for this command submission.
1356 * @header: Pointer to the command header in the command stream.
1358 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
1359 struct vmw_sw_context
*sw_context
,
1360 SVGA3dCmdHeader
*header
)
1362 struct vmw_begin_query_cmd
{
1363 SVGA3dCmdHeader header
;
1364 SVGA3dCmdBeginQuery q
;
1367 cmd
= container_of(header
, struct vmw_begin_query_cmd
,
1370 if (unlikely(dev_priv
->has_mob
)) {
1372 SVGA3dCmdHeader header
;
1373 SVGA3dCmdBeginGBQuery q
;
1376 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1378 gb_cmd
.header
.id
= SVGA_3D_CMD_BEGIN_GB_QUERY
;
1379 gb_cmd
.header
.size
= cmd
->header
.size
;
1380 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1381 gb_cmd
.q
.type
= cmd
->q
.type
;
1383 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1384 return vmw_cmd_begin_gb_query(dev_priv
, sw_context
, header
);
1387 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1388 user_context_converter
, &cmd
->q
.cid
,
1393 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1395 * @dev_priv: Pointer to a device private struct.
1396 * @sw_context: The software context used for this command submission.
1397 * @header: Pointer to the command header in the command stream.
1399 static int vmw_cmd_end_gb_query(struct vmw_private
*dev_priv
,
1400 struct vmw_sw_context
*sw_context
,
1401 SVGA3dCmdHeader
*header
)
1403 struct vmw_buffer_object
*vmw_bo
;
1404 struct vmw_query_cmd
{
1405 SVGA3dCmdHeader header
;
1406 SVGA3dCmdEndGBQuery q
;
1410 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1411 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1412 if (unlikely(ret
!= 0))
1415 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1418 if (unlikely(ret
!= 0))
1421 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1427 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1429 * @dev_priv: Pointer to a device private struct.
1430 * @sw_context: The software context used for this command submission.
1431 * @header: Pointer to the command header in the command stream.
1433 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
1434 struct vmw_sw_context
*sw_context
,
1435 SVGA3dCmdHeader
*header
)
1437 struct vmw_buffer_object
*vmw_bo
;
1438 struct vmw_query_cmd
{
1439 SVGA3dCmdHeader header
;
1440 SVGA3dCmdEndQuery q
;
1444 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1445 if (dev_priv
->has_mob
) {
1447 SVGA3dCmdHeader header
;
1448 SVGA3dCmdEndGBQuery q
;
1451 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1453 gb_cmd
.header
.id
= SVGA_3D_CMD_END_GB_QUERY
;
1454 gb_cmd
.header
.size
= cmd
->header
.size
;
1455 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1456 gb_cmd
.q
.type
= cmd
->q
.type
;
1457 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1458 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1460 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1461 return vmw_cmd_end_gb_query(dev_priv
, sw_context
, header
);
1464 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1465 if (unlikely(ret
!= 0))
1468 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1469 &cmd
->q
.guestResult
,
1471 if (unlikely(ret
!= 0))
1474 ret
= vmw_query_bo_switch_prepare(dev_priv
, vmw_bo
, sw_context
);
1480 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1482 * @dev_priv: Pointer to a device private struct.
1483 * @sw_context: The software context used for this command submission.
1484 * @header: Pointer to the command header in the command stream.
1486 static int vmw_cmd_wait_gb_query(struct vmw_private
*dev_priv
,
1487 struct vmw_sw_context
*sw_context
,
1488 SVGA3dCmdHeader
*header
)
1490 struct vmw_buffer_object
*vmw_bo
;
1491 struct vmw_query_cmd
{
1492 SVGA3dCmdHeader header
;
1493 SVGA3dCmdWaitForGBQuery q
;
1497 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1498 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1499 if (unlikely(ret
!= 0))
1502 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1505 if (unlikely(ret
!= 0))
1512 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1514 * @dev_priv: Pointer to a device private struct.
1515 * @sw_context: The software context used for this command submission.
1516 * @header: Pointer to the command header in the command stream.
1518 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
1519 struct vmw_sw_context
*sw_context
,
1520 SVGA3dCmdHeader
*header
)
1522 struct vmw_buffer_object
*vmw_bo
;
1523 struct vmw_query_cmd
{
1524 SVGA3dCmdHeader header
;
1525 SVGA3dCmdWaitForQuery q
;
1529 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1530 if (dev_priv
->has_mob
) {
1532 SVGA3dCmdHeader header
;
1533 SVGA3dCmdWaitForGBQuery q
;
1536 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1538 gb_cmd
.header
.id
= SVGA_3D_CMD_WAIT_FOR_GB_QUERY
;
1539 gb_cmd
.header
.size
= cmd
->header
.size
;
1540 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1541 gb_cmd
.q
.type
= cmd
->q
.type
;
1542 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1543 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1545 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1546 return vmw_cmd_wait_gb_query(dev_priv
, sw_context
, header
);
1549 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1550 if (unlikely(ret
!= 0))
1553 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1554 &cmd
->q
.guestResult
,
1556 if (unlikely(ret
!= 0))
1562 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
1563 struct vmw_sw_context
*sw_context
,
1564 SVGA3dCmdHeader
*header
)
1566 struct vmw_buffer_object
*vmw_bo
= NULL
;
1567 struct vmw_surface
*srf
= NULL
;
1568 struct vmw_dma_cmd
{
1569 SVGA3dCmdHeader header
;
1570 SVGA3dCmdSurfaceDMA dma
;
1573 SVGA3dCmdSurfaceDMASuffix
*suffix
;
1576 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
1577 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->dma
+
1578 header
->size
- sizeof(*suffix
));
1580 /* Make sure device and verifier stays in sync. */
1581 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
1582 DRM_ERROR("Invalid DMA suffix size.\n");
1586 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1587 &cmd
->dma
.guest
.ptr
,
1589 if (unlikely(ret
!= 0))
1592 /* Make sure DMA doesn't cross BO boundaries. */
1593 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
1594 if (unlikely(cmd
->dma
.guest
.ptr
.offset
> bo_size
)) {
1595 DRM_ERROR("Invalid DMA offset.\n");
1599 bo_size
-= cmd
->dma
.guest
.ptr
.offset
;
1600 if (unlikely(suffix
->maximumOffset
> bo_size
))
1601 suffix
->maximumOffset
= bo_size
;
1603 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1604 user_surface_converter
, &cmd
->dma
.host
.sid
,
1606 if (unlikely(ret
!= 0)) {
1607 if (unlikely(ret
!= -ERESTARTSYS
))
1608 DRM_ERROR("could not find surface for DMA.\n");
1612 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
1614 vmw_kms_cursor_snoop(srf
, sw_context
->fp
->tfile
, &vmw_bo
->base
,
1620 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
1621 struct vmw_sw_context
*sw_context
,
1622 SVGA3dCmdHeader
*header
)
1624 struct vmw_draw_cmd
{
1625 SVGA3dCmdHeader header
;
1626 SVGA3dCmdDrawPrimitives body
;
1628 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
1629 (unsigned long)header
+ sizeof(*cmd
));
1630 SVGA3dPrimitiveRange
*range
;
1635 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1636 if (unlikely(ret
!= 0))
1639 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
1640 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
1642 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
1643 DRM_ERROR("Illegal number of vertex declarations.\n");
1647 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
1648 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1649 user_surface_converter
,
1650 &decl
->array
.surfaceId
, NULL
);
1651 if (unlikely(ret
!= 0))
1655 maxnum
= (header
->size
- sizeof(cmd
->body
) -
1656 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
1657 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
1658 DRM_ERROR("Illegal number of index ranges.\n");
1662 range
= (SVGA3dPrimitiveRange
*) decl
;
1663 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
1664 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1665 user_surface_converter
,
1666 &range
->indexArray
.surfaceId
, NULL
);
1667 if (unlikely(ret
!= 0))
1674 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
1675 struct vmw_sw_context
*sw_context
,
1676 SVGA3dCmdHeader
*header
)
1678 struct vmw_tex_state_cmd
{
1679 SVGA3dCmdHeader header
;
1680 SVGA3dCmdSetTextureState state
;
1683 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
1684 ((unsigned long) header
+ header
->size
+ sizeof(header
));
1685 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
1686 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
1687 struct vmw_resource
*ctx
;
1688 struct vmw_resource
*res
;
1691 cmd
= container_of(header
, struct vmw_tex_state_cmd
,
1694 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1695 user_context_converter
, &cmd
->state
.cid
,
1697 if (unlikely(ret
!= 0))
1700 for (; cur_state
< last_state
; ++cur_state
) {
1701 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
1704 if (cur_state
->stage
>= SVGA3D_NUM_TEXTURE_UNITS
) {
1705 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1706 (unsigned) cur_state
->stage
);
1710 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1711 user_surface_converter
,
1712 &cur_state
->value
, &res
);
1713 if (unlikely(ret
!= 0))
1716 if (dev_priv
->has_mob
) {
1717 struct vmw_ctx_bindinfo_tex binding
;
1718 struct vmw_ctx_validation_info
*node
;
1720 node
= vmw_execbuf_info_from_res(sw_context
, ctx
);
1724 binding
.bi
.ctx
= ctx
;
1725 binding
.bi
.res
= res
;
1726 binding
.bi
.bt
= vmw_ctx_binding_tex
;
1727 binding
.texture_stage
= cur_state
->stage
;
1728 vmw_binding_add(node
->staged
, &binding
.bi
, 0,
1729 binding
.texture_stage
);
1736 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
1737 struct vmw_sw_context
*sw_context
,
1740 struct vmw_buffer_object
*vmw_bo
;
1744 SVGAFifoCmdDefineGMRFB body
;
1747 return vmw_translate_guest_ptr(dev_priv
, sw_context
,
1754 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1757 * @dev_priv: Pointer to a device private struct.
1758 * @sw_context: The software context being used for this batch.
1759 * @val_node: The validation node representing the resource.
1760 * @buf_id: Pointer to the user-space backup buffer handle in the command
1762 * @backup_offset: Offset of backup into MOB.
1764 * This function prepares for registering a switch of backup buffers
1765 * in the resource metadata just prior to unreserving. It's basically a wrapper
1766 * around vmw_cmd_res_switch_backup with a different interface.
1768 static int vmw_cmd_res_switch_backup(struct vmw_private
*dev_priv
,
1769 struct vmw_sw_context
*sw_context
,
1770 struct vmw_resource
*res
,
1772 unsigned long backup_offset
)
1774 struct vmw_buffer_object
*vbo
;
1778 info
= vmw_execbuf_info_from_res(sw_context
, res
);
1782 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, buf_id
, &vbo
);
1786 vmw_validation_res_switch_backup(sw_context
->ctx
, info
, vbo
,
1793 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @res_type: The resource type.
1798 * @converter: Information about user-space binding for this resource type.
1799 * @res_id: Pointer to the user-space resource handle in the command stream.
1800 * @buf_id: Pointer to the user-space backup buffer handle in the command
1802 * @backup_offset: Offset of backup into MOB.
1804 * This function prepares for registering a switch of backup buffers
1805 * in the resource metadata just prior to unreserving. It's basically a wrapper
1806 * around vmw_cmd_res_switch_backup with a different interface.
1808 static int vmw_cmd_switch_backup(struct vmw_private
*dev_priv
,
1809 struct vmw_sw_context
*sw_context
,
1810 enum vmw_res_type res_type
,
1811 const struct vmw_user_resource_conv
1815 unsigned long backup_offset
)
1817 struct vmw_resource
*res
;
1820 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, res_type
,
1821 converter
, res_id
, &res
);
1825 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
,
1826 buf_id
, backup_offset
);
1830 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1833 * @dev_priv: Pointer to a device private struct.
1834 * @sw_context: The software context being used for this batch.
1835 * @header: Pointer to the command header in the command stream.
1837 static int vmw_cmd_bind_gb_surface(struct vmw_private
*dev_priv
,
1838 struct vmw_sw_context
*sw_context
,
1839 SVGA3dCmdHeader
*header
)
1841 struct vmw_bind_gb_surface_cmd
{
1842 SVGA3dCmdHeader header
;
1843 SVGA3dCmdBindGBSurface body
;
1846 cmd
= container_of(header
, struct vmw_bind_gb_surface_cmd
, header
);
1848 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_surface
,
1849 user_surface_converter
,
1850 &cmd
->body
.sid
, &cmd
->body
.mobid
,
1855 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1858 * @dev_priv: Pointer to a device private struct.
1859 * @sw_context: The software context being used for this batch.
1860 * @header: Pointer to the command header in the command stream.
1862 static int vmw_cmd_update_gb_image(struct vmw_private
*dev_priv
,
1863 struct vmw_sw_context
*sw_context
,
1864 SVGA3dCmdHeader
*header
)
1866 struct vmw_gb_surface_cmd
{
1867 SVGA3dCmdHeader header
;
1868 SVGA3dCmdUpdateGBImage body
;
1871 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1873 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1874 user_surface_converter
,
1875 &cmd
->body
.image
.sid
, NULL
);
1879 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1882 * @dev_priv: Pointer to a device private struct.
1883 * @sw_context: The software context being used for this batch.
1884 * @header: Pointer to the command header in the command stream.
1886 static int vmw_cmd_update_gb_surface(struct vmw_private
*dev_priv
,
1887 struct vmw_sw_context
*sw_context
,
1888 SVGA3dCmdHeader
*header
)
1890 struct vmw_gb_surface_cmd
{
1891 SVGA3dCmdHeader header
;
1892 SVGA3dCmdUpdateGBSurface body
;
1895 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1897 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1898 user_surface_converter
,
1899 &cmd
->body
.sid
, NULL
);
1903 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1906 * @dev_priv: Pointer to a device private struct.
1907 * @sw_context: The software context being used for this batch.
1908 * @header: Pointer to the command header in the command stream.
1910 static int vmw_cmd_readback_gb_image(struct vmw_private
*dev_priv
,
1911 struct vmw_sw_context
*sw_context
,
1912 SVGA3dCmdHeader
*header
)
1914 struct vmw_gb_surface_cmd
{
1915 SVGA3dCmdHeader header
;
1916 SVGA3dCmdReadbackGBImage body
;
1919 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1921 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1922 user_surface_converter
,
1923 &cmd
->body
.image
.sid
, NULL
);
1927 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1930 * @dev_priv: Pointer to a device private struct.
1931 * @sw_context: The software context being used for this batch.
1932 * @header: Pointer to the command header in the command stream.
1934 static int vmw_cmd_readback_gb_surface(struct vmw_private
*dev_priv
,
1935 struct vmw_sw_context
*sw_context
,
1936 SVGA3dCmdHeader
*header
)
1938 struct vmw_gb_surface_cmd
{
1939 SVGA3dCmdHeader header
;
1940 SVGA3dCmdReadbackGBSurface body
;
1943 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1945 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1946 user_surface_converter
,
1947 &cmd
->body
.sid
, NULL
);
1951 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1954 * @dev_priv: Pointer to a device private struct.
1955 * @sw_context: The software context being used for this batch.
1956 * @header: Pointer to the command header in the command stream.
1958 static int vmw_cmd_invalidate_gb_image(struct vmw_private
*dev_priv
,
1959 struct vmw_sw_context
*sw_context
,
1960 SVGA3dCmdHeader
*header
)
1962 struct vmw_gb_surface_cmd
{
1963 SVGA3dCmdHeader header
;
1964 SVGA3dCmdInvalidateGBImage body
;
1967 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1969 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1970 user_surface_converter
,
1971 &cmd
->body
.image
.sid
, NULL
);
1975 * vmw_cmd_invalidate_gb_surface - Validate an
1976 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1978 * @dev_priv: Pointer to a device private struct.
1979 * @sw_context: The software context being used for this batch.
1980 * @header: Pointer to the command header in the command stream.
1982 static int vmw_cmd_invalidate_gb_surface(struct vmw_private
*dev_priv
,
1983 struct vmw_sw_context
*sw_context
,
1984 SVGA3dCmdHeader
*header
)
1986 struct vmw_gb_surface_cmd
{
1987 SVGA3dCmdHeader header
;
1988 SVGA3dCmdInvalidateGBSurface body
;
1991 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1993 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1994 user_surface_converter
,
1995 &cmd
->body
.sid
, NULL
);
2000 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2003 * @dev_priv: Pointer to a device private struct.
2004 * @sw_context: The software context being used for this batch.
2005 * @header: Pointer to the command header in the command stream.
2007 static int vmw_cmd_shader_define(struct vmw_private
*dev_priv
,
2008 struct vmw_sw_context
*sw_context
,
2009 SVGA3dCmdHeader
*header
)
2011 struct vmw_shader_define_cmd
{
2012 SVGA3dCmdHeader header
;
2013 SVGA3dCmdDefineShader body
;
2017 struct vmw_resource
*ctx
;
2019 cmd
= container_of(header
, struct vmw_shader_define_cmd
,
2022 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2023 user_context_converter
, &cmd
->body
.cid
,
2025 if (unlikely(ret
!= 0))
2028 if (unlikely(!dev_priv
->has_mob
))
2031 size
= cmd
->header
.size
- sizeof(cmd
->body
);
2032 ret
= vmw_compat_shader_add(dev_priv
,
2033 vmw_context_res_man(ctx
),
2034 cmd
->body
.shid
, cmd
+ 1,
2035 cmd
->body
.type
, size
,
2036 &sw_context
->staged_cmd_res
);
2037 if (unlikely(ret
!= 0))
2040 return vmw_resource_relocation_add(sw_context
,
2042 vmw_ptr_diff(sw_context
->buf_start
,
2048 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2051 * @dev_priv: Pointer to a device private struct.
2052 * @sw_context: The software context being used for this batch.
2053 * @header: Pointer to the command header in the command stream.
2055 static int vmw_cmd_shader_destroy(struct vmw_private
*dev_priv
,
2056 struct vmw_sw_context
*sw_context
,
2057 SVGA3dCmdHeader
*header
)
2059 struct vmw_shader_destroy_cmd
{
2060 SVGA3dCmdHeader header
;
2061 SVGA3dCmdDestroyShader body
;
2064 struct vmw_resource
*ctx
;
2066 cmd
= container_of(header
, struct vmw_shader_destroy_cmd
,
2069 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2070 user_context_converter
, &cmd
->body
.cid
,
2072 if (unlikely(ret
!= 0))
2075 if (unlikely(!dev_priv
->has_mob
))
2078 ret
= vmw_shader_remove(vmw_context_res_man(ctx
),
2081 &sw_context
->staged_cmd_res
);
2082 if (unlikely(ret
!= 0))
2085 return vmw_resource_relocation_add(sw_context
,
2087 vmw_ptr_diff(sw_context
->buf_start
,
2093 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2096 * @dev_priv: Pointer to a device private struct.
2097 * @sw_context: The software context being used for this batch.
2098 * @header: Pointer to the command header in the command stream.
2100 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
2101 struct vmw_sw_context
*sw_context
,
2102 SVGA3dCmdHeader
*header
)
2104 struct vmw_set_shader_cmd
{
2105 SVGA3dCmdHeader header
;
2106 SVGA3dCmdSetShader body
;
2108 struct vmw_ctx_bindinfo_shader binding
;
2109 struct vmw_resource
*ctx
, *res
= NULL
;
2110 struct vmw_ctx_validation_info
*ctx_info
;
2113 cmd
= container_of(header
, struct vmw_set_shader_cmd
,
2116 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_PREDX_MAX
) {
2117 DRM_ERROR("Illegal shader type %u.\n",
2118 (unsigned) cmd
->body
.type
);
2122 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2123 user_context_converter
, &cmd
->body
.cid
,
2125 if (unlikely(ret
!= 0))
2128 if (!dev_priv
->has_mob
)
2131 if (cmd
->body
.shid
!= SVGA3D_INVALID_ID
) {
2132 res
= vmw_shader_lookup(vmw_context_res_man(ctx
),
2137 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
);
2138 if (unlikely(ret
!= 0))
2143 if (IS_ERR_OR_NULL(res
)) {
2144 ret
= vmw_cmd_res_check(dev_priv
, sw_context
,
2146 user_shader_converter
,
2147 &cmd
->body
.shid
, &res
);
2148 if (unlikely(ret
!= 0))
2152 ctx_info
= vmw_execbuf_info_from_res(sw_context
, ctx
);
2156 binding
.bi
.ctx
= ctx
;
2157 binding
.bi
.res
= res
;
2158 binding
.bi
.bt
= vmw_ctx_binding_shader
;
2159 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2160 vmw_binding_add(ctx_info
->staged
, &binding
.bi
,
2161 binding
.shader_slot
, 0);
2166 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2169 * @dev_priv: Pointer to a device private struct.
2170 * @sw_context: The software context being used for this batch.
2171 * @header: Pointer to the command header in the command stream.
2173 static int vmw_cmd_set_shader_const(struct vmw_private
*dev_priv
,
2174 struct vmw_sw_context
*sw_context
,
2175 SVGA3dCmdHeader
*header
)
2177 struct vmw_set_shader_const_cmd
{
2178 SVGA3dCmdHeader header
;
2179 SVGA3dCmdSetShaderConst body
;
2183 cmd
= container_of(header
, struct vmw_set_shader_const_cmd
,
2186 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2187 user_context_converter
, &cmd
->body
.cid
,
2189 if (unlikely(ret
!= 0))
2192 if (dev_priv
->has_mob
)
2193 header
->id
= SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
;
2199 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2202 * @dev_priv: Pointer to a device private struct.
2203 * @sw_context: The software context being used for this batch.
2204 * @header: Pointer to the command header in the command stream.
2206 static int vmw_cmd_bind_gb_shader(struct vmw_private
*dev_priv
,
2207 struct vmw_sw_context
*sw_context
,
2208 SVGA3dCmdHeader
*header
)
2210 struct vmw_bind_gb_shader_cmd
{
2211 SVGA3dCmdHeader header
;
2212 SVGA3dCmdBindGBShader body
;
2215 cmd
= container_of(header
, struct vmw_bind_gb_shader_cmd
,
2218 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_shader
,
2219 user_shader_converter
,
2220 &cmd
->body
.shid
, &cmd
->body
.mobid
,
2221 cmd
->body
.offsetInBytes
);
2225 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2226 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2228 * @dev_priv: Pointer to a device private struct.
2229 * @sw_context: The software context being used for this batch.
2230 * @header: Pointer to the command header in the command stream.
2233 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private
*dev_priv
,
2234 struct vmw_sw_context
*sw_context
,
2235 SVGA3dCmdHeader
*header
)
2238 SVGA3dCmdHeader header
;
2239 SVGA3dCmdDXSetSingleConstantBuffer body
;
2241 struct vmw_resource
*res
= NULL
;
2242 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2243 struct vmw_ctx_bindinfo_cb binding
;
2246 if (unlikely(ctx_node
== NULL
)) {
2247 DRM_ERROR("DX Context not set.\n");
2251 cmd
= container_of(header
, typeof(*cmd
), header
);
2252 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2253 user_surface_converter
,
2254 &cmd
->body
.sid
, &res
);
2255 if (unlikely(ret
!= 0))
2258 binding
.bi
.ctx
= ctx_node
->ctx
;
2259 binding
.bi
.res
= res
;
2260 binding
.bi
.bt
= vmw_ctx_binding_cb
;
2261 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2262 binding
.offset
= cmd
->body
.offsetInBytes
;
2263 binding
.size
= cmd
->body
.sizeInBytes
;
2264 binding
.slot
= cmd
->body
.slot
;
2266 if (binding
.shader_slot
>= SVGA3D_NUM_SHADERTYPE_DX10
||
2267 binding
.slot
>= SVGA3D_DX_MAX_CONSTBUFFERS
) {
2268 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2269 (unsigned) cmd
->body
.type
,
2270 (unsigned) binding
.slot
);
2274 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
2275 binding
.shader_slot
, binding
.slot
);
2281 * vmw_cmd_dx_set_shader_res - Validate an
2282 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2284 * @dev_priv: Pointer to a device private struct.
2285 * @sw_context: The software context being used for this batch.
2286 * @header: Pointer to the command header in the command stream.
2288 static int vmw_cmd_dx_set_shader_res(struct vmw_private
*dev_priv
,
2289 struct vmw_sw_context
*sw_context
,
2290 SVGA3dCmdHeader
*header
)
2293 SVGA3dCmdHeader header
;
2294 SVGA3dCmdDXSetShaderResources body
;
2295 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2296 u32 num_sr_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2297 sizeof(SVGA3dShaderResourceViewId
);
2299 if ((u64
) cmd
->body
.startView
+ (u64
) num_sr_view
>
2300 (u64
) SVGA3D_DX_MAX_SRVIEWS
||
2301 cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2302 DRM_ERROR("Invalid shader binding.\n");
2306 return vmw_view_bindings_add(sw_context
, vmw_view_sr
,
2308 cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
,
2309 (void *) &cmd
[1], num_sr_view
,
2310 cmd
->body
.startView
);
2314 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2317 * @dev_priv: Pointer to a device private struct.
2318 * @sw_context: The software context being used for this batch.
2319 * @header: Pointer to the command header in the command stream.
2321 static int vmw_cmd_dx_set_shader(struct vmw_private
*dev_priv
,
2322 struct vmw_sw_context
*sw_context
,
2323 SVGA3dCmdHeader
*header
)
2326 SVGA3dCmdHeader header
;
2327 SVGA3dCmdDXSetShader body
;
2329 struct vmw_resource
*res
= NULL
;
2330 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2331 struct vmw_ctx_bindinfo_shader binding
;
2334 if (unlikely(ctx_node
== NULL
)) {
2335 DRM_ERROR("DX Context not set.\n");
2339 cmd
= container_of(header
, typeof(*cmd
), header
);
2341 if (cmd
->body
.type
>= SVGA3D_SHADERTYPE_DX10_MAX
) {
2342 DRM_ERROR("Illegal shader type %u.\n",
2343 (unsigned) cmd
->body
.type
);
2347 if (cmd
->body
.shaderId
!= SVGA3D_INVALID_ID
) {
2348 res
= vmw_shader_lookup(sw_context
->man
, cmd
->body
.shaderId
, 0);
2350 DRM_ERROR("Could not find shader for binding.\n");
2351 return PTR_ERR(res
);
2354 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
);
2359 binding
.bi
.ctx
= ctx_node
->ctx
;
2360 binding
.bi
.res
= res
;
2361 binding
.bi
.bt
= vmw_ctx_binding_dx_shader
;
2362 binding
.shader_slot
= cmd
->body
.type
- SVGA3D_SHADERTYPE_MIN
;
2364 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
2365 binding
.shader_slot
, 0);
2371 * vmw_cmd_dx_set_vertex_buffers - Validates an
2372 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2374 * @dev_priv: Pointer to a device private struct.
2375 * @sw_context: The software context being used for this batch.
2376 * @header: Pointer to the command header in the command stream.
2378 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private
*dev_priv
,
2379 struct vmw_sw_context
*sw_context
,
2380 SVGA3dCmdHeader
*header
)
2382 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2383 struct vmw_ctx_bindinfo_vb binding
;
2384 struct vmw_resource
*res
;
2386 SVGA3dCmdHeader header
;
2387 SVGA3dCmdDXSetVertexBuffers body
;
2388 SVGA3dVertexBuffer buf
[];
2392 if (unlikely(ctx_node
== NULL
)) {
2393 DRM_ERROR("DX Context not set.\n");
2397 cmd
= container_of(header
, typeof(*cmd
), header
);
2398 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2399 sizeof(SVGA3dVertexBuffer
);
2400 if ((u64
)num
+ (u64
)cmd
->body
.startBuffer
>
2401 (u64
)SVGA3D_DX_MAX_VERTEXBUFFERS
) {
2402 DRM_ERROR("Invalid number of vertex buffers.\n");
2406 for (i
= 0; i
< num
; i
++) {
2407 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2408 user_surface_converter
,
2409 &cmd
->buf
[i
].sid
, &res
);
2410 if (unlikely(ret
!= 0))
2413 binding
.bi
.ctx
= ctx_node
->ctx
;
2414 binding
.bi
.bt
= vmw_ctx_binding_vb
;
2415 binding
.bi
.res
= res
;
2416 binding
.offset
= cmd
->buf
[i
].offset
;
2417 binding
.stride
= cmd
->buf
[i
].stride
;
2418 binding
.slot
= i
+ cmd
->body
.startBuffer
;
2420 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
2428 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2429 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2431 * @dev_priv: Pointer to a device private struct.
2432 * @sw_context: The software context being used for this batch.
2433 * @header: Pointer to the command header in the command stream.
2435 static int vmw_cmd_dx_set_index_buffer(struct vmw_private
*dev_priv
,
2436 struct vmw_sw_context
*sw_context
,
2437 SVGA3dCmdHeader
*header
)
2439 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2440 struct vmw_ctx_bindinfo_ib binding
;
2441 struct vmw_resource
*res
;
2443 SVGA3dCmdHeader header
;
2444 SVGA3dCmdDXSetIndexBuffer body
;
2448 if (unlikely(ctx_node
== NULL
)) {
2449 DRM_ERROR("DX Context not set.\n");
2453 cmd
= container_of(header
, typeof(*cmd
), header
);
2454 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2455 user_surface_converter
,
2456 &cmd
->body
.sid
, &res
);
2457 if (unlikely(ret
!= 0))
2460 binding
.bi
.ctx
= ctx_node
->ctx
;
2461 binding
.bi
.res
= res
;
2462 binding
.bi
.bt
= vmw_ctx_binding_ib
;
2463 binding
.offset
= cmd
->body
.offset
;
2464 binding
.format
= cmd
->body
.format
;
2466 vmw_binding_add(ctx_node
->staged
, &binding
.bi
, 0, 0);
2472 * vmw_cmd_dx_set_rendertarget - Validate an
2473 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2475 * @dev_priv: Pointer to a device private struct.
2476 * @sw_context: The software context being used for this batch.
2477 * @header: Pointer to the command header in the command stream.
2479 static int vmw_cmd_dx_set_rendertargets(struct vmw_private
*dev_priv
,
2480 struct vmw_sw_context
*sw_context
,
2481 SVGA3dCmdHeader
*header
)
2484 SVGA3dCmdHeader header
;
2485 SVGA3dCmdDXSetRenderTargets body
;
2486 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2488 u32 num_rt_view
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2489 sizeof(SVGA3dRenderTargetViewId
);
2491 if (num_rt_view
> SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS
) {
2492 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2496 ret
= vmw_view_bindings_add(sw_context
, vmw_view_ds
,
2497 vmw_ctx_binding_ds
, 0,
2498 &cmd
->body
.depthStencilViewId
, 1, 0);
2502 return vmw_view_bindings_add(sw_context
, vmw_view_rt
,
2503 vmw_ctx_binding_dx_rt
, 0,
2504 (void *)&cmd
[1], num_rt_view
, 0);
2508 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2509 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2511 * @dev_priv: Pointer to a device private struct.
2512 * @sw_context: The software context being used for this batch.
2513 * @header: Pointer to the command header in the command stream.
2515 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private
*dev_priv
,
2516 struct vmw_sw_context
*sw_context
,
2517 SVGA3dCmdHeader
*header
)
2520 SVGA3dCmdHeader header
;
2521 SVGA3dCmdDXClearRenderTargetView body
;
2522 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2524 return PTR_RET(vmw_view_id_val_add(sw_context
, vmw_view_rt
,
2525 cmd
->body
.renderTargetViewId
));
2529 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2530 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2532 * @dev_priv: Pointer to a device private struct.
2533 * @sw_context: The software context being used for this batch.
2534 * @header: Pointer to the command header in the command stream.
2536 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private
*dev_priv
,
2537 struct vmw_sw_context
*sw_context
,
2538 SVGA3dCmdHeader
*header
)
2541 SVGA3dCmdHeader header
;
2542 SVGA3dCmdDXClearDepthStencilView body
;
2543 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2545 return PTR_RET(vmw_view_id_val_add(sw_context
, vmw_view_ds
,
2546 cmd
->body
.depthStencilViewId
));
2549 static int vmw_cmd_dx_view_define(struct vmw_private
*dev_priv
,
2550 struct vmw_sw_context
*sw_context
,
2551 SVGA3dCmdHeader
*header
)
2553 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2554 struct vmw_resource
*srf
;
2555 struct vmw_resource
*res
;
2556 enum vmw_view_type view_type
;
2559 * This is based on the fact that all affected define commands have
2560 * the same initial command body layout.
2563 SVGA3dCmdHeader header
;
2568 if (unlikely(ctx_node
== NULL
)) {
2569 DRM_ERROR("DX Context not set.\n");
2573 view_type
= vmw_view_cmd_to_type(header
->id
);
2574 if (view_type
== vmw_view_max
)
2576 cmd
= container_of(header
, typeof(*cmd
), header
);
2577 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2578 user_surface_converter
,
2580 if (unlikely(ret
!= 0))
2583 res
= vmw_context_cotable(ctx_node
->ctx
, vmw_view_cotables
[view_type
]);
2584 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2585 if (unlikely(ret
!= 0))
2588 return vmw_view_add(sw_context
->man
,
2594 header
->size
+ sizeof(*header
),
2595 &sw_context
->staged_cmd_res
);
2599 * vmw_cmd_dx_set_so_targets - Validate an
2600 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2602 * @dev_priv: Pointer to a device private struct.
2603 * @sw_context: The software context being used for this batch.
2604 * @header: Pointer to the command header in the command stream.
2606 static int vmw_cmd_dx_set_so_targets(struct vmw_private
*dev_priv
,
2607 struct vmw_sw_context
*sw_context
,
2608 SVGA3dCmdHeader
*header
)
2610 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2611 struct vmw_ctx_bindinfo_so binding
;
2612 struct vmw_resource
*res
;
2614 SVGA3dCmdHeader header
;
2615 SVGA3dCmdDXSetSOTargets body
;
2616 SVGA3dSoTarget targets
[];
2620 if (unlikely(ctx_node
== NULL
)) {
2621 DRM_ERROR("DX Context not set.\n");
2625 cmd
= container_of(header
, typeof(*cmd
), header
);
2626 num
= (cmd
->header
.size
- sizeof(cmd
->body
)) /
2627 sizeof(SVGA3dSoTarget
);
2629 if (num
> SVGA3D_DX_MAX_SOTARGETS
) {
2630 DRM_ERROR("Invalid DX SO binding.\n");
2634 for (i
= 0; i
< num
; i
++) {
2635 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2636 user_surface_converter
,
2637 &cmd
->targets
[i
].sid
, &res
);
2638 if (unlikely(ret
!= 0))
2641 binding
.bi
.ctx
= ctx_node
->ctx
;
2642 binding
.bi
.res
= res
;
2643 binding
.bi
.bt
= vmw_ctx_binding_so
,
2644 binding
.offset
= cmd
->targets
[i
].offset
;
2645 binding
.size
= cmd
->targets
[i
].sizeInBytes
;
2648 vmw_binding_add(ctx_node
->staged
, &binding
.bi
,
2655 static int vmw_cmd_dx_so_define(struct vmw_private
*dev_priv
,
2656 struct vmw_sw_context
*sw_context
,
2657 SVGA3dCmdHeader
*header
)
2659 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2660 struct vmw_resource
*res
;
2662 * This is based on the fact that all affected define commands have
2663 * the same initial command body layout.
2666 SVGA3dCmdHeader header
;
2669 enum vmw_so_type so_type
;
2672 if (unlikely(ctx_node
== NULL
)) {
2673 DRM_ERROR("DX Context not set.\n");
2677 so_type
= vmw_so_cmd_to_type(header
->id
);
2678 res
= vmw_context_cotable(ctx_node
->ctx
, vmw_so_cotables
[so_type
]);
2679 cmd
= container_of(header
, typeof(*cmd
), header
);
2680 ret
= vmw_cotable_notify(res
, cmd
->defined_id
);
2686 * vmw_cmd_dx_check_subresource - Validate an
2687 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2689 * @dev_priv: Pointer to a device private struct.
2690 * @sw_context: The software context being used for this batch.
2691 * @header: Pointer to the command header in the command stream.
2693 static int vmw_cmd_dx_check_subresource(struct vmw_private
*dev_priv
,
2694 struct vmw_sw_context
*sw_context
,
2695 SVGA3dCmdHeader
*header
)
2698 SVGA3dCmdHeader header
;
2700 SVGA3dCmdDXReadbackSubResource r_body
;
2701 SVGA3dCmdDXInvalidateSubResource i_body
;
2702 SVGA3dCmdDXUpdateSubResource u_body
;
2703 SVGA3dSurfaceId sid
;
2707 BUILD_BUG_ON(offsetof(typeof(*cmd
), r_body
.sid
) !=
2708 offsetof(typeof(*cmd
), sid
));
2709 BUILD_BUG_ON(offsetof(typeof(*cmd
), i_body
.sid
) !=
2710 offsetof(typeof(*cmd
), sid
));
2711 BUILD_BUG_ON(offsetof(typeof(*cmd
), u_body
.sid
) !=
2712 offsetof(typeof(*cmd
), sid
));
2714 cmd
= container_of(header
, typeof(*cmd
), header
);
2716 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2717 user_surface_converter
,
2721 static int vmw_cmd_dx_cid_check(struct vmw_private
*dev_priv
,
2722 struct vmw_sw_context
*sw_context
,
2723 SVGA3dCmdHeader
*header
)
2725 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2727 if (unlikely(ctx_node
== NULL
)) {
2728 DRM_ERROR("DX Context not set.\n");
2736 * vmw_cmd_dx_view_remove - validate a view remove command and
2737 * schedule the view resource for removal.
2739 * @dev_priv: Pointer to a device private struct.
2740 * @sw_context: The software context being used for this batch.
2741 * @header: Pointer to the command header in the command stream.
2743 * Check that the view exists, and if it was not created using this
2744 * command batch, conditionally make this command a NOP.
2746 static int vmw_cmd_dx_view_remove(struct vmw_private
*dev_priv
,
2747 struct vmw_sw_context
*sw_context
,
2748 SVGA3dCmdHeader
*header
)
2750 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2752 SVGA3dCmdHeader header
;
2753 union vmw_view_destroy body
;
2754 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2755 enum vmw_view_type view_type
= vmw_view_cmd_to_type(header
->id
);
2756 struct vmw_resource
*view
;
2760 DRM_ERROR("DX Context not set.\n");
2764 ret
= vmw_view_remove(sw_context
->man
,
2765 cmd
->body
.view_id
, view_type
,
2766 &sw_context
->staged_cmd_res
,
2772 * If the view wasn't created during this command batch, it might
2773 * have been removed due to a context swapout, so add a
2774 * relocation to conditionally make this command a NOP to avoid
2777 return vmw_resource_relocation_add(sw_context
,
2779 vmw_ptr_diff(sw_context
->buf_start
,
2781 vmw_res_rel_cond_nop
);
2785 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2788 * @dev_priv: Pointer to a device private struct.
2789 * @sw_context: The software context being used for this batch.
2790 * @header: Pointer to the command header in the command stream.
2792 static int vmw_cmd_dx_define_shader(struct vmw_private
*dev_priv
,
2793 struct vmw_sw_context
*sw_context
,
2794 SVGA3dCmdHeader
*header
)
2796 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2797 struct vmw_resource
*res
;
2799 SVGA3dCmdHeader header
;
2800 SVGA3dCmdDXDefineShader body
;
2801 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2805 DRM_ERROR("DX Context not set.\n");
2809 res
= vmw_context_cotable(ctx_node
->ctx
, SVGA_COTABLE_DXSHADER
);
2810 ret
= vmw_cotable_notify(res
, cmd
->body
.shaderId
);
2814 return vmw_dx_shader_add(sw_context
->man
, ctx_node
->ctx
,
2815 cmd
->body
.shaderId
, cmd
->body
.type
,
2816 &sw_context
->staged_cmd_res
);
2820 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2823 * @dev_priv: Pointer to a device private struct.
2824 * @sw_context: The software context being used for this batch.
2825 * @header: Pointer to the command header in the command stream.
2827 static int vmw_cmd_dx_destroy_shader(struct vmw_private
*dev_priv
,
2828 struct vmw_sw_context
*sw_context
,
2829 SVGA3dCmdHeader
*header
)
2831 struct vmw_ctx_validation_info
*ctx_node
= sw_context
->dx_ctx_node
;
2833 SVGA3dCmdHeader header
;
2834 SVGA3dCmdDXDestroyShader body
;
2835 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2839 DRM_ERROR("DX Context not set.\n");
2843 ret
= vmw_shader_remove(sw_context
->man
, cmd
->body
.shaderId
, 0,
2844 &sw_context
->staged_cmd_res
);
2846 DRM_ERROR("Could not find shader to remove.\n");
2852 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2855 * @dev_priv: Pointer to a device private struct.
2856 * @sw_context: The software context being used for this batch.
2857 * @header: Pointer to the command header in the command stream.
2859 static int vmw_cmd_dx_bind_shader(struct vmw_private
*dev_priv
,
2860 struct vmw_sw_context
*sw_context
,
2861 SVGA3dCmdHeader
*header
)
2863 struct vmw_resource
*ctx
;
2864 struct vmw_resource
*res
;
2866 SVGA3dCmdHeader header
;
2867 SVGA3dCmdDXBindShader body
;
2868 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2871 if (cmd
->body
.cid
!= SVGA3D_INVALID_ID
) {
2872 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
2873 user_context_converter
,
2874 &cmd
->body
.cid
, &ctx
);
2878 if (!sw_context
->dx_ctx_node
) {
2879 DRM_ERROR("DX Context not set.\n");
2882 ctx
= sw_context
->dx_ctx_node
->ctx
;
2885 res
= vmw_shader_lookup(vmw_context_res_man(ctx
),
2888 DRM_ERROR("Could not find shader to bind.\n");
2889 return PTR_ERR(res
);
2892 ret
= vmw_execbuf_res_noctx_val_add(sw_context
, res
);
2894 DRM_ERROR("Error creating resource validation node.\n");
2898 return vmw_cmd_res_switch_backup(dev_priv
, sw_context
, res
,
2900 cmd
->body
.offsetInBytes
);
2904 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
2906 * @dev_priv: Pointer to a device private struct.
2907 * @sw_context: The software context being used for this batch.
2908 * @header: Pointer to the command header in the command stream.
2910 static int vmw_cmd_dx_genmips(struct vmw_private
*dev_priv
,
2911 struct vmw_sw_context
*sw_context
,
2912 SVGA3dCmdHeader
*header
)
2915 SVGA3dCmdHeader header
;
2916 SVGA3dCmdDXGenMips body
;
2917 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2919 return PTR_RET(vmw_view_id_val_add(sw_context
, vmw_view_sr
,
2920 cmd
->body
.shaderResourceViewId
));
2924 * vmw_cmd_dx_transfer_from_buffer -
2925 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2927 * @dev_priv: Pointer to a device private struct.
2928 * @sw_context: The software context being used for this batch.
2929 * @header: Pointer to the command header in the command stream.
2931 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private
*dev_priv
,
2932 struct vmw_sw_context
*sw_context
,
2933 SVGA3dCmdHeader
*header
)
2936 SVGA3dCmdHeader header
;
2937 SVGA3dCmdDXTransferFromBuffer body
;
2938 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2941 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2942 user_surface_converter
,
2943 &cmd
->body
.srcSid
, NULL
);
2947 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2948 user_surface_converter
,
2949 &cmd
->body
.destSid
, NULL
);
2953 * vmw_cmd_intra_surface_copy -
2954 * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
2956 * @dev_priv: Pointer to a device private struct.
2957 * @sw_context: The software context being used for this batch.
2958 * @header: Pointer to the command header in the command stream.
2960 static int vmw_cmd_intra_surface_copy(struct vmw_private
*dev_priv
,
2961 struct vmw_sw_context
*sw_context
,
2962 SVGA3dCmdHeader
*header
)
2965 SVGA3dCmdHeader header
;
2966 SVGA3dCmdIntraSurfaceCopy body
;
2967 } *cmd
= container_of(header
, typeof(*cmd
), header
);
2969 if (!(dev_priv
->capabilities2
& SVGA_CAP2_INTRA_SURFACE_COPY
))
2972 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
2973 user_surface_converter
,
2974 &cmd
->body
.surface
.sid
, NULL
);
2978 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
2979 struct vmw_sw_context
*sw_context
,
2980 void *buf
, uint32_t *size
)
2982 uint32_t size_remaining
= *size
;
2985 cmd_id
= ((uint32_t *)buf
)[0];
2987 case SVGA_CMD_UPDATE
:
2988 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
2990 case SVGA_CMD_DEFINE_GMRFB
:
2991 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
2993 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
2994 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
2996 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
2997 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3000 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id
);
3004 if (*size
> size_remaining
) {
3005 DRM_ERROR("Invalid SVGA command (size mismatch):"
3010 if (unlikely(!sw_context
->kernel
)) {
3011 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id
);
3015 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
3016 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
3021 static const struct vmw_cmd_entry vmw_cmd_entries
[SVGA_3D_CMD_MAX
] = {
3022 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
,
3023 false, false, false),
3024 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
,
3025 false, false, false),
3026 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
,
3027 true, false, false),
3028 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
,
3029 true, false, false),
3030 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
,
3031 true, false, false),
3032 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
,
3033 false, false, false),
3034 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
,
3035 false, false, false),
3036 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
,
3037 true, false, false),
3038 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
,
3039 true, false, false),
3040 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
,
3041 true, false, false),
3042 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
3043 &vmw_cmd_set_render_target_check
, true, false, false),
3044 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
,
3045 true, false, false),
3046 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
,
3047 true, false, false),
3048 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
,
3049 true, false, false),
3050 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
,
3051 true, false, false),
3052 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
,
3053 true, false, false),
3054 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
,
3055 true, false, false),
3056 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
,
3057 true, false, false),
3058 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
,
3059 false, false, false),
3060 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_shader_define
,
3061 true, false, false),
3062 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_shader_destroy
,
3063 true, false, false),
3064 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
,
3065 true, false, false),
3066 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_set_shader_const
,
3067 true, false, false),
3068 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
,
3069 true, false, false),
3070 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
,
3071 true, false, false),
3072 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
,
3073 true, false, false),
3074 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
,
3075 true, false, false),
3076 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
,
3077 true, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
,
3079 true, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
3081 &vmw_cmd_blt_surf_screen_check
, false, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
,
3083 false, false, false),
3084 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
,
3085 false, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3087 false, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
,
3089 false, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA
, &vmw_cmd_invalid
,
3091 false, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1
, &vmw_cmd_invalid
,
3093 false, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2
, &vmw_cmd_invalid
,
3095 false, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT
, &vmw_cmd_invalid
,
3097 false, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT
, &vmw_cmd_invalid
,
3099 false, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT
, &vmw_cmd_invalid
,
3101 false, false, false),
3102 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL
, &vmw_cmd_invalid
,
3103 false, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND
, &vmw_cmd_invalid
,
3105 false, false, false),
3106 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND
, &vmw_cmd_invalid
,
3107 false, false, false),
3108 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE
, &vmw_cmd_invalid
,
3109 false, false, true),
3110 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE
, &vmw_cmd_invalid
,
3111 false, false, true),
3112 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB
, &vmw_cmd_invalid
,
3113 false, false, true),
3114 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB
, &vmw_cmd_invalid
,
3115 false, false, true),
3116 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64
, &vmw_cmd_invalid
,
3117 false, false, true),
3118 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING
, &vmw_cmd_invalid
,
3119 false, false, true),
3120 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE
, &vmw_cmd_invalid
,
3121 false, false, true),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE
, &vmw_cmd_invalid
,
3123 false, false, true),
3124 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE
, &vmw_cmd_bind_gb_surface
,
3126 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE
, &vmw_cmd_invalid
,
3127 false, false, true),
3128 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE
, &vmw_cmd_update_gb_image
,
3130 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE
,
3131 &vmw_cmd_update_gb_surface
, true, false, true),
3132 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE
,
3133 &vmw_cmd_readback_gb_image
, true, false, true),
3134 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE
,
3135 &vmw_cmd_readback_gb_surface
, true, false, true),
3136 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
3137 &vmw_cmd_invalidate_gb_image
, true, false, true),
3138 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE
,
3139 &vmw_cmd_invalidate_gb_surface
, true, false, true),
3140 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT
, &vmw_cmd_invalid
,
3141 false, false, true),
3142 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT
, &vmw_cmd_invalid
,
3143 false, false, true),
3144 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT
, &vmw_cmd_invalid
,
3145 false, false, true),
3146 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT
, &vmw_cmd_invalid
,
3147 false, false, true),
3148 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT
, &vmw_cmd_invalid
,
3149 false, false, true),
3150 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER
, &vmw_cmd_invalid
,
3151 false, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER
, &vmw_cmd_bind_gb_shader
,
3154 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER
, &vmw_cmd_invalid
,
3155 false, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64
, &vmw_cmd_invalid
,
3157 false, false, false),
3158 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY
, &vmw_cmd_begin_gb_query
,
3160 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY
, &vmw_cmd_end_gb_query
,
3162 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY
, &vmw_cmd_wait_gb_query
,
3164 VMW_CMD_DEF(SVGA_3D_CMD_NOP
, &vmw_cmd_ok
,
3166 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR
, &vmw_cmd_ok
,
3168 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART
, &vmw_cmd_invalid
,
3169 false, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART
, &vmw_cmd_invalid
,
3171 false, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART
, &vmw_cmd_invalid
,
3173 false, false, true),
3174 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE
, &vmw_cmd_invalid
,
3175 false, false, true),
3176 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3177 false, false, true),
3178 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3179 false, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3181 false, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
3183 false, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3185 false, false, true),
3186 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
3187 false, false, true),
3188 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
, &vmw_cmd_cid_check
,
3190 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA
, &vmw_cmd_invalid
,
3191 false, false, true),
3192 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH
, &vmw_cmd_invalid
,
3193 false, false, true),
3194 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE
, &vmw_cmd_invalid
,
3195 false, false, true),
3196 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2
, &vmw_cmd_invalid
,
3197 false, false, true),
3202 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT
, &vmw_cmd_invalid
,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT
, &vmw_cmd_invalid
,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT
, &vmw_cmd_invalid
,
3207 false, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT
, &vmw_cmd_invalid
,
3209 false, false, true),
3210 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT
, &vmw_cmd_invalid
,
3211 false, false, true),
3212 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER
,
3213 &vmw_cmd_dx_set_single_constant_buffer
, true, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
,
3215 &vmw_cmd_dx_set_shader_res
, true, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER
, &vmw_cmd_dx_set_shader
,
3218 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS
, &vmw_cmd_dx_cid_check
,
3220 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW
, &vmw_cmd_dx_cid_check
,
3222 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED
, &vmw_cmd_dx_cid_check
,
3224 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED
, &vmw_cmd_dx_cid_check
,
3226 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED
,
3227 &vmw_cmd_dx_cid_check
, true, false, true),
3228 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO
, &vmw_cmd_dx_cid_check
,
3230 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
,
3231 &vmw_cmd_dx_set_vertex_buffers
, true, false, true),
3232 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER
,
3233 &vmw_cmd_dx_set_index_buffer
, true, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS
,
3235 &vmw_cmd_dx_set_rendertargets
, true, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE
, &vmw_cmd_dx_cid_check
,
3238 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE
,
3239 &vmw_cmd_dx_cid_check
, true, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE
,
3241 &vmw_cmd_dx_cid_check
, true, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY
, &vmw_cmd_dx_define_query
,
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY
, &vmw_cmd_dx_cid_check
,
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY
, &vmw_cmd_dx_bind_query
,
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET
,
3249 &vmw_cmd_dx_cid_check
, true, false, true),
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY
, &vmw_cmd_dx_cid_check
,
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY
, &vmw_cmd_dx_cid_check
,
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY
, &vmw_cmd_invalid
,
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION
, &vmw_cmd_dx_cid_check
,
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS
, &vmw_cmd_dx_cid_check
,
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS
, &vmw_cmd_dx_cid_check
,
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW
,
3263 &vmw_cmd_dx_clear_rendertarget_view
, true, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW
,
3265 &vmw_cmd_dx_clear_depthstencil_view
, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY
, &vmw_cmd_invalid
,
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS
, &vmw_cmd_dx_genmips
,
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE
,
3271 &vmw_cmd_dx_check_subresource
, true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE
,
3273 &vmw_cmd_dx_check_subresource
, true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE
,
3275 &vmw_cmd_dx_check_subresource
, true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW
,
3277 &vmw_cmd_dx_view_define
, true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW
,
3279 &vmw_cmd_dx_view_remove
, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW
,
3281 &vmw_cmd_dx_view_define
, true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW
,
3283 &vmw_cmd_dx_view_remove
, true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW
,
3285 &vmw_cmd_dx_view_define
, true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW
,
3287 &vmw_cmd_dx_view_remove
, true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT
,
3289 &vmw_cmd_dx_so_define
, true, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT
,
3291 &vmw_cmd_dx_cid_check
, true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE
,
3293 &vmw_cmd_dx_so_define
, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE
,
3295 &vmw_cmd_dx_cid_check
, true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE
,
3297 &vmw_cmd_dx_so_define
, true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE
,
3299 &vmw_cmd_dx_cid_check
, true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE
,
3301 &vmw_cmd_dx_so_define
, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE
,
3303 &vmw_cmd_dx_cid_check
, true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE
,
3305 &vmw_cmd_dx_so_define
, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE
,
3307 &vmw_cmd_dx_cid_check
, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER
,
3309 &vmw_cmd_dx_define_shader
, true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER
,
3311 &vmw_cmd_dx_destroy_shader
, true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER
,
3313 &vmw_cmd_dx_bind_shader
, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT
,
3315 &vmw_cmd_dx_so_define
, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT
,
3317 &vmw_cmd_dx_cid_check
, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT
, &vmw_cmd_dx_cid_check
,
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS
,
3321 &vmw_cmd_dx_set_so_targets
, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT
,
3323 &vmw_cmd_dx_cid_check
, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY
,
3325 &vmw_cmd_dx_cid_check
, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY
,
3327 &vmw_cmd_buffer_copy_check
, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION
,
3329 &vmw_cmd_pred_copy_check
, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER
,
3331 &vmw_cmd_dx_transfer_from_buffer
,
3333 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY
, &vmw_cmd_intra_surface_copy
,
3337 bool vmw_cmd_describe(const void *buf
, u32
*size
, char const **cmd
)
3339 u32 cmd_id
= ((u32
*) buf
)[0];
3341 if (cmd_id
>= SVGA_CMD_MAX
) {
3342 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3343 const struct vmw_cmd_entry
*entry
;
3345 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3346 cmd_id
= header
->id
;
3347 if (cmd_id
>= SVGA_3D_CMD_MAX
)
3350 cmd_id
-= SVGA_3D_CMD_BASE
;
3351 entry
= &vmw_cmd_entries
[cmd_id
];
3352 *cmd
= entry
->cmd_name
;
3357 case SVGA_CMD_UPDATE
:
3358 *cmd
= "SVGA_CMD_UPDATE";
3359 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdUpdate
);
3361 case SVGA_CMD_DEFINE_GMRFB
:
3362 *cmd
= "SVGA_CMD_DEFINE_GMRFB";
3363 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdDefineGMRFB
);
3365 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
3366 *cmd
= "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3367 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3369 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
3370 *cmd
= "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3371 *size
= sizeof(u32
) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
3382 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
3383 struct vmw_sw_context
*sw_context
,
3384 void *buf
, uint32_t *size
)
3387 uint32_t size_remaining
= *size
;
3388 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
3390 const struct vmw_cmd_entry
*entry
;
3391 bool gb
= dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
;
3393 cmd_id
= ((uint32_t *)buf
)[0];
3394 /* Handle any none 3D commands */
3395 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
3396 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
3399 cmd_id
= header
->id
;
3400 *size
= header
->size
+ sizeof(SVGA3dCmdHeader
);
3402 cmd_id
-= SVGA_3D_CMD_BASE
;
3403 if (unlikely(*size
> size_remaining
))
3406 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
3409 entry
= &vmw_cmd_entries
[cmd_id
];
3410 if (unlikely(!entry
->func
))
3413 if (unlikely(!entry
->user_allow
&& !sw_context
->kernel
))
3414 goto out_privileged
;
3416 if (unlikely(entry
->gb_disable
&& gb
))
3419 if (unlikely(entry
->gb_enable
&& !gb
))
3422 ret
= entry
->func(dev_priv
, sw_context
, header
);
3423 if (unlikely(ret
!= 0))
3428 DRM_ERROR("Invalid SVGA3D command: %d\n",
3429 cmd_id
+ SVGA_3D_CMD_BASE
);
3432 DRM_ERROR("Privileged SVGA3D command: %d\n",
3433 cmd_id
+ SVGA_3D_CMD_BASE
);
3436 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3437 cmd_id
+ SVGA_3D_CMD_BASE
);
3440 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3441 cmd_id
+ SVGA_3D_CMD_BASE
);
3445 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
3446 struct vmw_sw_context
*sw_context
,
3450 int32_t cur_size
= size
;
3453 sw_context
->buf_start
= buf
;
3455 while (cur_size
> 0) {
3457 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
3458 if (unlikely(ret
!= 0))
3460 buf
= (void *)((unsigned long) buf
+ size
);
3464 if (unlikely(cur_size
!= 0)) {
3465 DRM_ERROR("Command verifier out of sync.\n");
3472 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
3474 /* Memory is validation context memory, so no need to free it */
3476 INIT_LIST_HEAD(&sw_context
->bo_relocations
);
3479 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
3481 struct vmw_relocation
*reloc
;
3482 struct ttm_buffer_object
*bo
;
3484 list_for_each_entry(reloc
, &sw_context
->bo_relocations
, head
) {
3485 bo
= &reloc
->vbo
->base
;
3486 switch (bo
->mem
.mem_type
) {
3488 reloc
->location
->offset
+= bo
->offset
;
3489 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
3492 reloc
->location
->gmrId
= bo
->mem
.start
;
3495 *reloc
->mob_loc
= bo
->mem
.start
;
3501 vmw_free_relocations(sw_context
);
3504 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
3507 if (likely(sw_context
->cmd_bounce_size
>= size
))
3510 if (sw_context
->cmd_bounce_size
== 0)
3511 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
3513 while (sw_context
->cmd_bounce_size
< size
) {
3514 sw_context
->cmd_bounce_size
=
3515 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
3516 (sw_context
->cmd_bounce_size
>> 1));
3519 vfree(sw_context
->cmd_bounce
);
3520 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
3522 if (sw_context
->cmd_bounce
== NULL
) {
3523 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3524 sw_context
->cmd_bounce_size
= 0;
3532 * vmw_execbuf_fence_commands - create and submit a command stream fence
3534 * Creates a fence object and submits a command stream marker.
3535 * If this fails for some reason, We sync the fifo and return NULL.
3536 * It is then safe to fence buffers with a NULL pointer.
3538 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3539 * a userspace handle if @p_handle is not NULL, otherwise not.
3542 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
3543 struct vmw_private
*dev_priv
,
3544 struct vmw_fence_obj
**p_fence
,
3549 bool synced
= false;
3551 /* p_handle implies file_priv. */
3552 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
3554 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
3555 if (unlikely(ret
!= 0)) {
3556 DRM_ERROR("Fence submission error. Syncing.\n");
3560 if (p_handle
!= NULL
)
3561 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
3562 sequence
, p_fence
, p_handle
);
3564 ret
= vmw_fence_create(dev_priv
->fman
, sequence
, p_fence
);
3566 if (unlikely(ret
!= 0 && !synced
)) {
3567 (void) vmw_fallback_wait(dev_priv
, false, false,
3569 VMW_FENCE_WAIT_TIMEOUT
);
3577 * vmw_execbuf_copy_fence_user - copy fence object information to
3580 * @dev_priv: Pointer to a vmw_private struct.
3581 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3582 * @ret: Return value from fence object creation.
3583 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3584 * which the information should be copied.
3585 * @fence: Pointer to the fenc object.
3586 * @fence_handle: User-space fence handle.
3587 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3588 * @sync_file: Only used to clean up in case of an error in this function.
3590 * This function copies fence information to user-space. If copying fails,
3591 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3592 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3593 * the error will hopefully be detected.
3594 * Also if copying fails, user-space will be unable to signal the fence
3595 * object so we wait for it immediately, and then unreference the
3596 * user-space reference.
3599 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
3600 struct vmw_fpriv
*vmw_fp
,
3602 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3603 struct vmw_fence_obj
*fence
,
3604 uint32_t fence_handle
,
3605 int32_t out_fence_fd
,
3606 struct sync_file
*sync_file
)
3608 struct drm_vmw_fence_rep fence_rep
;
3610 if (user_fence_rep
== NULL
)
3613 memset(&fence_rep
, 0, sizeof(fence_rep
));
3615 fence_rep
.error
= ret
;
3616 fence_rep
.fd
= out_fence_fd
;
3618 BUG_ON(fence
== NULL
);
3620 fence_rep
.handle
= fence_handle
;
3621 fence_rep
.seqno
= fence
->base
.seqno
;
3622 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
3623 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
3627 * copy_to_user errors will be detected by user space not
3628 * seeing fence_rep::error filled in. Typically
3629 * user-space would have pre-set that member to -EFAULT.
3631 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
3635 * User-space lost the fence object. We need to sync
3636 * and unreference the handle.
3638 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
3640 fput(sync_file
->file
);
3642 if (fence_rep
.fd
!= -1) {
3643 put_unused_fd(fence_rep
.fd
);
3647 ttm_ref_object_base_unref(vmw_fp
->tfile
,
3648 fence_handle
, TTM_REF_USAGE
);
3649 DRM_ERROR("Fence copy error. Syncing.\n");
3650 (void) vmw_fence_obj_wait(fence
, false, false,
3651 VMW_FENCE_WAIT_TIMEOUT
);
3656 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3659 * @dev_priv: Pointer to a device private structure.
3660 * @kernel_commands: Pointer to the unpatched command batch.
3661 * @command_size: Size of the unpatched command batch.
3662 * @sw_context: Structure holding the relocation lists.
3664 * Side effects: If this function returns 0, then the command batch
3665 * pointed to by @kernel_commands will have been modified.
3667 static int vmw_execbuf_submit_fifo(struct vmw_private
*dev_priv
,
3668 void *kernel_commands
,
3670 struct vmw_sw_context
*sw_context
)
3674 if (sw_context
->dx_ctx_node
)
3675 cmd
= vmw_fifo_reserve_dx(dev_priv
, command_size
,
3676 sw_context
->dx_ctx_node
->ctx
->id
);
3678 cmd
= vmw_fifo_reserve(dev_priv
, command_size
);
3680 DRM_ERROR("Failed reserving fifo space for commands.\n");
3684 vmw_apply_relocations(sw_context
);
3685 memcpy(cmd
, kernel_commands
, command_size
);
3686 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3687 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3688 vmw_fifo_commit(dev_priv
, command_size
);
3694 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3695 * the command buffer manager.
3697 * @dev_priv: Pointer to a device private structure.
3698 * @header: Opaque handle to the command buffer allocation.
3699 * @command_size: Size of the unpatched command batch.
3700 * @sw_context: Structure holding the relocation lists.
3702 * Side effects: If this function returns 0, then the command buffer
3703 * represented by @header will have been modified.
3705 static int vmw_execbuf_submit_cmdbuf(struct vmw_private
*dev_priv
,
3706 struct vmw_cmdbuf_header
*header
,
3708 struct vmw_sw_context
*sw_context
)
3710 u32 id
= ((sw_context
->dx_ctx_node
) ? sw_context
->dx_ctx_node
->ctx
->id
:
3712 void *cmd
= vmw_cmdbuf_reserve(dev_priv
->cman
, command_size
,
3715 vmw_apply_relocations(sw_context
);
3716 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
3717 vmw_resource_relocations_free(&sw_context
->res_relocations
);
3718 vmw_cmdbuf_commit(dev_priv
->cman
, command_size
, header
, false);
3724 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3725 * submission using a command buffer.
3727 * @dev_priv: Pointer to a device private structure.
3728 * @user_commands: User-space pointer to the commands to be submitted.
3729 * @command_size: Size of the unpatched command batch.
3730 * @header: Out parameter returning the opaque pointer to the command buffer.
3732 * This function checks whether we can use the command buffer manager for
3733 * submission and if so, creates a command buffer of suitable size and
3734 * copies the user data into that buffer.
3736 * On successful return, the function returns a pointer to the data in the
3737 * command buffer and *@header is set to non-NULL.
3738 * If command buffers could not be used, the function will return the value
3739 * of @kernel_commands on function call. That value may be NULL. In that case,
3740 * the value of *@header will be set to NULL.
3741 * If an error is encountered, the function will return a pointer error value.
3742 * If the function is interrupted by a signal while sleeping, it will return
3743 * -ERESTARTSYS casted to a pointer error value.
3745 static void *vmw_execbuf_cmdbuf(struct vmw_private
*dev_priv
,
3746 void __user
*user_commands
,
3747 void *kernel_commands
,
3749 struct vmw_cmdbuf_header
**header
)
3755 if (command_size
> SVGA_CB_MAX_SIZE
) {
3756 DRM_ERROR("Command buffer is too large.\n");
3757 return ERR_PTR(-EINVAL
);
3760 if (!dev_priv
->cman
|| kernel_commands
)
3761 return kernel_commands
;
3763 /* If possible, add a little space for fencing. */
3764 cmdbuf_size
= command_size
+ 512;
3765 cmdbuf_size
= min_t(size_t, cmdbuf_size
, SVGA_CB_MAX_SIZE
);
3766 kernel_commands
= vmw_cmdbuf_alloc(dev_priv
->cman
, cmdbuf_size
,
3768 if (IS_ERR(kernel_commands
))
3769 return kernel_commands
;
3771 ret
= copy_from_user(kernel_commands
, user_commands
,
3774 DRM_ERROR("Failed copying commands.\n");
3775 vmw_cmdbuf_header_free(*header
);
3777 return ERR_PTR(-EFAULT
);
3780 return kernel_commands
;
3783 static int vmw_execbuf_tie_context(struct vmw_private
*dev_priv
,
3784 struct vmw_sw_context
*sw_context
,
3787 struct vmw_resource
*res
;
3791 if (handle
== SVGA3D_INVALID_ID
)
3794 size
= vmw_execbuf_res_size(dev_priv
, vmw_res_dx_context
);
3795 ret
= vmw_validation_preload_res(sw_context
->ctx
, size
);
3799 res
= vmw_user_resource_noref_lookup_handle
3800 (dev_priv
, sw_context
->fp
->tfile
, handle
,
3801 user_context_converter
);
3802 if (unlikely(IS_ERR(res
))) {
3803 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3805 return PTR_ERR(res
);
3808 ret
= vmw_execbuf_res_noref_val_add(sw_context
, res
);
3809 if (unlikely(ret
!= 0))
3812 sw_context
->dx_ctx_node
= vmw_execbuf_info_from_res(sw_context
, res
);
3813 sw_context
->man
= vmw_context_res_man(res
);
3818 int vmw_execbuf_process(struct drm_file
*file_priv
,
3819 struct vmw_private
*dev_priv
,
3820 void __user
*user_commands
,
3821 void *kernel_commands
,
3822 uint32_t command_size
,
3823 uint64_t throttle_us
,
3824 uint32_t dx_context_handle
,
3825 struct drm_vmw_fence_rep __user
*user_fence_rep
,
3826 struct vmw_fence_obj
**out_fence
,
3829 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
3830 struct vmw_fence_obj
*fence
= NULL
;
3831 struct vmw_cmdbuf_header
*header
;
3834 int32_t out_fence_fd
= -1;
3835 struct sync_file
*sync_file
= NULL
;
3836 DECLARE_VAL_CONTEXT(val_ctx
, &sw_context
->res_ht
, 1);
3838 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
3839 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
3840 if (out_fence_fd
< 0) {
3841 DRM_ERROR("Failed to get a fence file descriptor.\n");
3842 return out_fence_fd
;
3847 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
3851 goto out_free_fence_fd
;
3854 kernel_commands
= vmw_execbuf_cmdbuf(dev_priv
, user_commands
,
3855 kernel_commands
, command_size
,
3857 if (IS_ERR(kernel_commands
)) {
3858 ret
= PTR_ERR(kernel_commands
);
3859 goto out_free_fence_fd
;
3862 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
3865 goto out_free_header
;
3868 sw_context
->kernel
= false;
3869 if (kernel_commands
== NULL
) {
3870 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
3871 if (unlikely(ret
!= 0))
3875 ret
= copy_from_user(sw_context
->cmd_bounce
,
3876 user_commands
, command_size
);
3878 if (unlikely(ret
!= 0)) {
3880 DRM_ERROR("Failed copying commands.\n");
3883 kernel_commands
= sw_context
->cmd_bounce
;
3885 sw_context
->kernel
= true;
3887 sw_context
->fp
= vmw_fpriv(file_priv
);
3888 INIT_LIST_HEAD(&sw_context
->ctx_list
);
3889 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
3890 sw_context
->last_query_ctx
= NULL
;
3891 sw_context
->needs_post_query_barrier
= false;
3892 sw_context
->dx_ctx_node
= NULL
;
3893 sw_context
->dx_query_mob
= NULL
;
3894 sw_context
->dx_query_ctx
= NULL
;
3895 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
3896 INIT_LIST_HEAD(&sw_context
->res_relocations
);
3897 INIT_LIST_HEAD(&sw_context
->bo_relocations
);
3898 if (sw_context
->staged_bindings
)
3899 vmw_binding_state_reset(sw_context
->staged_bindings
);
3901 if (!sw_context
->res_ht_initialized
) {
3902 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
3903 if (unlikely(ret
!= 0))
3905 sw_context
->res_ht_initialized
= true;
3907 INIT_LIST_HEAD(&sw_context
->staged_cmd_res
);
3908 sw_context
->ctx
= &val_ctx
;
3909 ret
= vmw_execbuf_tie_context(dev_priv
, sw_context
, dx_context_handle
);
3910 if (unlikely(ret
!= 0))
3913 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
3915 if (unlikely(ret
!= 0))
3918 ret
= vmw_resources_reserve(sw_context
);
3919 if (unlikely(ret
!= 0))
3922 ret
= vmw_validation_bo_reserve(&val_ctx
, true);
3923 if (unlikely(ret
!= 0))
3926 ret
= vmw_validation_bo_validate(&val_ctx
, true);
3927 if (unlikely(ret
!= 0))
3930 ret
= vmw_validation_res_validate(&val_ctx
, true);
3931 if (unlikely(ret
!= 0))
3933 vmw_validation_drop_ht(&val_ctx
);
3935 ret
= mutex_lock_interruptible(&dev_priv
->binding_mutex
);
3936 if (unlikely(ret
!= 0)) {
3941 if (dev_priv
->has_mob
) {
3942 ret
= vmw_rebind_contexts(sw_context
);
3943 if (unlikely(ret
!= 0))
3944 goto out_unlock_binding
;
3948 ret
= vmw_execbuf_submit_fifo(dev_priv
, kernel_commands
,
3949 command_size
, sw_context
);
3951 ret
= vmw_execbuf_submit_cmdbuf(dev_priv
, header
, command_size
,
3955 mutex_unlock(&dev_priv
->binding_mutex
);
3959 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
3960 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
3962 (user_fence_rep
) ? &handle
: NULL
);
3964 * This error is harmless, because if fence submission fails,
3965 * vmw_fifo_send_fence will sync. The error will be propagated to
3966 * user-space in @fence_rep
3970 DRM_ERROR("Fence submission error. Syncing.\n");
3972 vmw_execbuf_bindings_commit(sw_context
, false);
3973 vmw_bind_dx_query_mob(sw_context
);
3974 vmw_validation_res_unreserve(&val_ctx
, false);
3976 vmw_validation_bo_fence(sw_context
->ctx
, fence
);
3978 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
3979 !dev_priv
->query_cid_valid
))
3980 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
3983 * If anything fails here, give up trying to export the fence
3984 * and do a sync since the user mode will not be able to sync
3985 * the fence itself. This ensures we are still functionally
3988 if (flags
& DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD
) {
3990 sync_file
= sync_file_create(&fence
->base
);
3992 DRM_ERROR("Unable to create sync file for fence\n");
3993 put_unused_fd(out_fence_fd
);
3996 (void) vmw_fence_obj_wait(fence
, false, false,
3997 VMW_FENCE_WAIT_TIMEOUT
);
3999 /* Link the fence with the FD created earlier */
4000 fd_install(out_fence_fd
, sync_file
->file
);
4004 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
4005 user_fence_rep
, fence
, handle
,
4006 out_fence_fd
, sync_file
);
4008 /* Don't unreference when handing fence out */
4009 if (unlikely(out_fence
!= NULL
)) {
4012 } else if (likely(fence
!= NULL
)) {
4013 vmw_fence_obj_unreference(&fence
);
4016 vmw_cmdbuf_res_commit(&sw_context
->staged_cmd_res
);
4017 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4020 * Unreference resources outside of the cmdbuf_mutex to
4021 * avoid deadlocks in resource destruction paths.
4023 vmw_validation_unref_lists(&val_ctx
);
4028 mutex_unlock(&dev_priv
->binding_mutex
);
4030 vmw_validation_bo_backoff(&val_ctx
);
4032 vmw_execbuf_bindings_commit(sw_context
, true);
4033 vmw_validation_res_unreserve(&val_ctx
, true);
4034 vmw_resource_relocations_free(&sw_context
->res_relocations
);
4035 vmw_free_relocations(sw_context
);
4036 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
4037 !dev_priv
->query_cid_valid
))
4038 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4040 vmw_cmdbuf_res_revert(&sw_context
->staged_cmd_res
);
4041 vmw_validation_drop_ht(&val_ctx
);
4042 WARN_ON(!list_empty(&sw_context
->ctx_list
));
4043 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4046 * Unreference resources outside of the cmdbuf_mutex to
4047 * avoid deadlocks in resource destruction paths.
4049 vmw_validation_unref_lists(&val_ctx
);
4052 vmw_cmdbuf_header_free(header
);
4054 if (out_fence_fd
>= 0)
4055 put_unused_fd(out_fence_fd
);
4061 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4063 * @dev_priv: The device private structure.
4065 * This function is called to idle the fifo and unpin the query buffer
4066 * if the normal way to do this hits an error, which should typically be
4069 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
4071 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4073 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
4074 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4075 if (dev_priv
->dummy_query_bo_pinned
) {
4076 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4077 dev_priv
->dummy_query_bo_pinned
= false;
4083 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4086 * @dev_priv: The device private structure.
4087 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4088 * _after_ a query barrier that flushes all queries touching the current
4089 * buffer pointed to by @dev_priv->pinned_bo
4091 * This function should be used to unpin the pinned query bo, or
4092 * as a query barrier when we need to make sure that all queries have
4093 * finished before the next fifo command. (For example on hardware
4094 * context destructions where the hardware may otherwise leak unfinished
4097 * This function does not return any failure codes, but make attempts
4098 * to do safe unpinning in case of errors.
4100 * The function will synchronize on the previous query barrier, and will
4101 * thus not finish until that barrier has executed.
4103 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4104 * before calling this function.
4106 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
4107 struct vmw_fence_obj
*fence
)
4110 struct vmw_fence_obj
*lfence
= NULL
;
4111 DECLARE_VAL_CONTEXT(val_ctx
, NULL
, 0);
4113 if (dev_priv
->pinned_bo
== NULL
)
4116 ret
= vmw_validation_add_bo(&val_ctx
, dev_priv
->pinned_bo
, false,
4119 goto out_no_reserve
;
4121 ret
= vmw_validation_add_bo(&val_ctx
, dev_priv
->dummy_query_bo
, false,
4124 goto out_no_reserve
;
4126 ret
= vmw_validation_bo_reserve(&val_ctx
, false);
4128 goto out_no_reserve
;
4130 if (dev_priv
->query_cid_valid
) {
4131 BUG_ON(fence
!= NULL
);
4132 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
4135 dev_priv
->query_cid_valid
= false;
4138 vmw_bo_pin_reserved(dev_priv
->pinned_bo
, false);
4139 if (dev_priv
->dummy_query_bo_pinned
) {
4140 vmw_bo_pin_reserved(dev_priv
->dummy_query_bo
, false);
4141 dev_priv
->dummy_query_bo_pinned
= false;
4143 if (fence
== NULL
) {
4144 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
4148 vmw_validation_bo_fence(&val_ctx
, fence
);
4150 vmw_fence_obj_unreference(&lfence
);
4152 vmw_validation_unref_lists(&val_ctx
);
4153 vmw_bo_unreference(&dev_priv
->pinned_bo
);
4158 vmw_validation_bo_backoff(&val_ctx
);
4160 vmw_validation_unref_lists(&val_ctx
);
4161 vmw_execbuf_unpin_panic(dev_priv
);
4162 vmw_bo_unreference(&dev_priv
->pinned_bo
);
4167 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4170 * @dev_priv: The device private structure.
4172 * This function should be used to unpin the pinned query bo, or
4173 * as a query barrier when we need to make sure that all queries have
4174 * finished before the next fifo command. (For example on hardware
4175 * context destructions where the hardware may otherwise leak unfinished
4178 * This function does not return any failure codes, but make attempts
4179 * to do safe unpinning in case of errors.
4181 * The function will synchronize on the previous query barrier, and will
4182 * thus not finish until that barrier has executed.
4184 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
4186 mutex_lock(&dev_priv
->cmdbuf_mutex
);
4187 if (dev_priv
->query_cid_valid
)
4188 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
4189 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
4192 int vmw_execbuf_ioctl(struct drm_device
*dev
, unsigned long data
,
4193 struct drm_file
*file_priv
, size_t size
)
4195 struct vmw_private
*dev_priv
= vmw_priv(dev
);
4196 struct drm_vmw_execbuf_arg arg
;
4198 static const size_t copy_offset
[] = {
4199 offsetof(struct drm_vmw_execbuf_arg
, context_handle
),
4200 sizeof(struct drm_vmw_execbuf_arg
)};
4201 struct dma_fence
*in_fence
= NULL
;
4203 if (unlikely(size
< copy_offset
[0])) {
4204 DRM_ERROR("Invalid command size, ioctl %d\n",
4209 if (copy_from_user(&arg
, (void __user
*) data
, copy_offset
[0]) != 0)
4213 * Extend the ioctl argument while
4214 * maintaining backwards compatibility:
4215 * We take different code paths depending on the value of
4219 if (unlikely(arg
.version
> DRM_VMW_EXECBUF_VERSION
||
4220 arg
.version
== 0)) {
4221 DRM_ERROR("Incorrect execbuf version.\n");
4225 if (arg
.version
> 1 &&
4226 copy_from_user(&arg
.context_handle
,
4227 (void __user
*) (data
+ copy_offset
[0]),
4228 copy_offset
[arg
.version
- 1] -
4229 copy_offset
[0]) != 0)
4232 switch (arg
.version
) {
4234 arg
.context_handle
= (uint32_t) -1;
4242 /* If imported a fence FD from elsewhere, then wait on it */
4243 if (arg
.flags
& DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD
) {
4244 in_fence
= sync_file_get_fence(arg
.imported_fence_fd
);
4247 DRM_ERROR("Cannot get imported fence\n");
4251 ret
= vmw_wait_dma_fence(dev_priv
->fman
, in_fence
);
4256 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
4257 if (unlikely(ret
!= 0))
4260 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
4261 (void __user
*)(unsigned long)arg
.commands
,
4262 NULL
, arg
.command_size
, arg
.throttle_us
,
4264 (void __user
*)(unsigned long)arg
.fence_rep
,
4267 ttm_read_unlock(&dev_priv
->reservation_sem
);
4268 if (unlikely(ret
!= 0))
4271 vmw_kms_cursor_post_execbuf(dev_priv
);
4275 dma_fence_put(in_fence
);