]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drm/vmwgfx: remove redundant return ret statement
[thirdparty/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include <linux/sync_file.h>
28
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_HT_ORDER 12
37
38 /*
39 * struct vmw_relocation - Buffer object relocation
40 *
41 * @head: List head for the command submission context's relocation list
42 * @vbo: Non ref-counted pointer to buffer object
43 * @mob_loc: Pointer to location for mob id to be modified
44 * @location: Pointer to location for guest pointer to be modified
45 */
46 struct vmw_relocation {
47 struct list_head head;
48 struct vmw_buffer_object *vbo;
49 union {
50 SVGAMobId *mob_loc;
51 SVGAGuestPtr *location;
52 };
53 };
54
55 /**
56 * enum vmw_resource_relocation_type - Relocation type for resources
57 *
58 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
59 * command stream is replaced with the actual id after validation.
60 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
61 * with a NOP.
62 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
63 * after validation is -1, the command is replaced with a NOP. Otherwise no
64 * action.
65 */
66 enum vmw_resource_relocation_type {
67 vmw_res_rel_normal,
68 vmw_res_rel_nop,
69 vmw_res_rel_cond_nop,
70 vmw_res_rel_max
71 };
72
73 /**
74 * struct vmw_resource_relocation - Relocation info for resources
75 *
76 * @head: List head for the software context's relocation list.
77 * @res: Non-ref-counted pointer to the resource.
78 * @offset: Offset of single byte entries into the command buffer where the
79 * id that needs fixup is located.
80 * @rel_type: Type of relocation.
81 */
82 struct vmw_resource_relocation {
83 struct list_head head;
84 const struct vmw_resource *res;
85 u32 offset:29;
86 enum vmw_resource_relocation_type rel_type:3;
87 };
88
89 /*
90 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
91 * @head: List head of context list
92 * @ctx: The context resource
93 * @cur: The context's persistent binding state
94 * @staged: The binding state changes of this command buffer
95 */
96 struct vmw_ctx_validation_info {
97 struct list_head head;
98 struct vmw_resource *ctx;
99 struct vmw_ctx_binding_state *cur;
100 struct vmw_ctx_binding_state *staged;
101 };
102
103 /**
104 * struct vmw_cmd_entry - Describe a command for the verifier
105 *
106 * @user_allow: Whether allowed from the execbuf ioctl.
107 * @gb_disable: Whether disabled if guest-backed objects are available.
108 * @gb_enable: Whether enabled iff guest-backed objects are available.
109 */
110 struct vmw_cmd_entry {
111 int (*func) (struct vmw_private *, struct vmw_sw_context *,
112 SVGA3dCmdHeader *);
113 bool user_allow;
114 bool gb_disable;
115 bool gb_enable;
116 const char *cmd_name;
117 };
118
119 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
120 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
121 (_gb_disable), (_gb_enable), #_cmd}
122
123 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
124 struct vmw_sw_context *sw_context,
125 struct vmw_resource *ctx);
126 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
127 struct vmw_sw_context *sw_context,
128 SVGAMobId *id,
129 struct vmw_buffer_object **vmw_bo_p);
130 /**
131 * vmw_ptr_diff - Compute the offset from a to b in bytes
132 *
133 * @a: A starting pointer.
134 * @b: A pointer offset in the same address space.
135 *
136 * Returns: The offset in bytes between the two pointers.
137 */
138 static size_t vmw_ptr_diff(void *a, void *b)
139 {
140 return (unsigned long) b - (unsigned long) a;
141 }
142
143 /**
144 * vmw_execbuf_bindings_commit - Commit modified binding state
145 * @sw_context: The command submission context
146 * @backoff: Whether this is part of the error path and binding state
147 * changes should be ignored
148 */
149 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
150 bool backoff)
151 {
152 struct vmw_ctx_validation_info *entry;
153
154 list_for_each_entry(entry, &sw_context->ctx_list, head) {
155 if (!backoff)
156 vmw_binding_state_commit(entry->cur, entry->staged);
157 if (entry->staged != sw_context->staged_bindings)
158 vmw_binding_state_free(entry->staged);
159 else
160 sw_context->staged_bindings_inuse = false;
161 }
162
163 /* List entries are freed with the validation context */
164 INIT_LIST_HEAD(&sw_context->ctx_list);
165 }
166
167 /**
168 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
169 * @sw_context: The command submission context
170 */
171 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
172 {
173 if (sw_context->dx_query_mob)
174 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
175 sw_context->dx_query_mob);
176 }
177
178 /**
179 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
180 * added to the validate list.
181 *
182 * @dev_priv: Pointer to the device private:
183 * @sw_context: The command submission context
184 * @node: The validation node holding the context resource metadata
185 */
186 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
187 struct vmw_sw_context *sw_context,
188 struct vmw_resource *res,
189 struct vmw_ctx_validation_info *node)
190 {
191 int ret;
192
193 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
194 if (unlikely(ret != 0))
195 goto out_err;
196
197 if (!sw_context->staged_bindings) {
198 sw_context->staged_bindings =
199 vmw_binding_state_alloc(dev_priv);
200 if (IS_ERR(sw_context->staged_bindings)) {
201 DRM_ERROR("Failed to allocate context binding "
202 "information.\n");
203 ret = PTR_ERR(sw_context->staged_bindings);
204 sw_context->staged_bindings = NULL;
205 goto out_err;
206 }
207 }
208
209 if (sw_context->staged_bindings_inuse) {
210 node->staged = vmw_binding_state_alloc(dev_priv);
211 if (IS_ERR(node->staged)) {
212 DRM_ERROR("Failed to allocate context binding "
213 "information.\n");
214 ret = PTR_ERR(node->staged);
215 node->staged = NULL;
216 goto out_err;
217 }
218 } else {
219 node->staged = sw_context->staged_bindings;
220 sw_context->staged_bindings_inuse = true;
221 }
222
223 node->ctx = res;
224 node->cur = vmw_context_binding_state(res);
225 list_add_tail(&node->head, &sw_context->ctx_list);
226
227 return 0;
228 out_err:
229 return ret;
230 }
231
232 /**
233 * vmw_execbuf_res_size - calculate extra size fore the resource validation
234 * node
235 * @dev_priv: Pointer to the device private struct.
236 * @res_type: The resource type.
237 *
238 * Guest-backed contexts and DX contexts require extra size to store
239 * execbuf private information in the validation node. Typically the
240 * binding manager associated data structures.
241 *
242 * Returns: The extra size requirement based on resource type.
243 */
244 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
245 enum vmw_res_type res_type)
246 {
247 return (res_type == vmw_res_dx_context ||
248 (res_type == vmw_res_context && dev_priv->has_mob)) ?
249 sizeof(struct vmw_ctx_validation_info) : 0;
250 }
251
252 /**
253 * vmw_execbuf_rcache_update - Update a resource-node cache entry
254 *
255 * @rcache: Pointer to the entry to update.
256 * @res: Pointer to the resource.
257 * @private: Pointer to the execbuf-private space in the resource
258 * validation node.
259 */
260 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
261 struct vmw_resource *res,
262 void *private)
263 {
264 rcache->res = res;
265 rcache->private = private;
266 rcache->valid = 1;
267 rcache->valid_handle = 0;
268 }
269
270 /**
271 * vmw_execbuf_res_noref_val_add - Add a resource described by an
272 * unreferenced rcu-protected pointer to the validation list.
273 * @sw_context: Pointer to the software context.
274 * @res: Unreferenced rcu-protected pointer to the resource.
275 *
276 * Returns: 0 on success. Negative error code on failure. Typical error
277 * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
278 * doomed.
279 */
280 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
281 struct vmw_resource *res)
282 {
283 struct vmw_private *dev_priv = res->dev_priv;
284 int ret;
285 enum vmw_res_type res_type = vmw_res_type(res);
286 struct vmw_res_cache_entry *rcache;
287 struct vmw_ctx_validation_info *ctx_info;
288 bool first_usage;
289 unsigned int priv_size;
290
291 rcache = &sw_context->res_cache[res_type];
292 if (likely(rcache->valid && rcache->res == res)) {
293 vmw_user_resource_noref_release();
294 return 0;
295 }
296
297 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
298 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
299 (void **)&ctx_info, &first_usage);
300 vmw_user_resource_noref_release();
301 if (ret)
302 return ret;
303
304 if (priv_size && first_usage) {
305 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
306 ctx_info);
307 if (ret)
308 return ret;
309 }
310
311 vmw_execbuf_rcache_update(rcache, res, ctx_info);
312 return 0;
313 }
314
315 /**
316 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
317 * validation list if it's not already on it
318 * @sw_context: Pointer to the software context.
319 * @res: Pointer to the resource.
320 *
321 * Returns: Zero on success. Negative error code on failure.
322 */
323 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
324 struct vmw_resource *res)
325 {
326 struct vmw_res_cache_entry *rcache;
327 enum vmw_res_type res_type = vmw_res_type(res);
328 void *ptr;
329 int ret;
330
331 rcache = &sw_context->res_cache[res_type];
332 if (likely(rcache->valid && rcache->res == res))
333 return 0;
334
335 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
336 if (ret)
337 return ret;
338
339 vmw_execbuf_rcache_update(rcache, res, ptr);
340
341 return 0;
342 }
343
344 /**
345 * vmw_view_res_val_add - Add a view and the surface it's pointing to
346 * to the validation list
347 *
348 * @sw_context: The software context holding the validation list.
349 * @view: Pointer to the view resource.
350 *
351 * Returns 0 if success, negative error code otherwise.
352 */
353 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
354 struct vmw_resource *view)
355 {
356 int ret;
357
358 /*
359 * First add the resource the view is pointing to, otherwise
360 * it may be swapped out when the view is validated.
361 */
362 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
363 if (ret)
364 return ret;
365
366 return vmw_execbuf_res_noctx_val_add(sw_context, view);
367 }
368
369 /**
370 * vmw_view_id_val_add - Look up a view and add it and the surface it's
371 * pointing to to the validation list.
372 *
373 * @sw_context: The software context holding the validation list.
374 * @view_type: The view type to look up.
375 * @id: view id of the view.
376 *
377 * The view is represented by a view id and the DX context it's created on,
378 * or scheduled for creation on. If there is no DX context set, the function
379 * will return an -EINVAL error pointer.
380 *
381 * Returns: Unreferenced pointer to the resource on success, negative error
382 * pointer on failure.
383 */
384 static struct vmw_resource *
385 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
386 enum vmw_view_type view_type, u32 id)
387 {
388 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
389 struct vmw_resource *view;
390 int ret;
391
392 if (!ctx_node) {
393 DRM_ERROR("DX Context not set.\n");
394 return ERR_PTR(-EINVAL);
395 }
396
397 view = vmw_view_lookup(sw_context->man, view_type, id);
398 if (IS_ERR(view))
399 return view;
400
401 ret = vmw_view_res_val_add(sw_context, view);
402 if (ret)
403 return ERR_PTR(ret);
404
405 return view;
406 }
407
408 /**
409 * vmw_resource_context_res_add - Put resources previously bound to a context on
410 * the validation list
411 *
412 * @dev_priv: Pointer to a device private structure
413 * @sw_context: Pointer to a software context used for this command submission
414 * @ctx: Pointer to the context resource
415 *
416 * This function puts all resources that were previously bound to @ctx on
417 * the resource validation list. This is part of the context state reemission
418 */
419 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
420 struct vmw_sw_context *sw_context,
421 struct vmw_resource *ctx)
422 {
423 struct list_head *binding_list;
424 struct vmw_ctx_bindinfo *entry;
425 int ret = 0;
426 struct vmw_resource *res;
427 u32 i;
428
429 /* Add all cotables to the validation list. */
430 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
431 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
432 res = vmw_context_cotable(ctx, i);
433 if (IS_ERR(res))
434 continue;
435
436 ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
437 if (unlikely(ret != 0))
438 return ret;
439 }
440 }
441
442
443 /* Add all resources bound to the context to the validation list */
444 mutex_lock(&dev_priv->binding_mutex);
445 binding_list = vmw_context_binding_list(ctx);
446
447 list_for_each_entry(entry, binding_list, ctx_list) {
448 if (vmw_res_type(entry->res) == vmw_res_view)
449 ret = vmw_view_res_val_add(sw_context, entry->res);
450 else
451 ret = vmw_execbuf_res_noctx_val_add(sw_context,
452 entry->res);
453 if (unlikely(ret != 0))
454 break;
455 }
456
457 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
458 struct vmw_buffer_object *dx_query_mob;
459
460 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
461 if (dx_query_mob)
462 ret = vmw_validation_add_bo(sw_context->ctx,
463 dx_query_mob, true, false);
464 }
465
466 mutex_unlock(&dev_priv->binding_mutex);
467 return ret;
468 }
469
470 /**
471 * vmw_resource_relocation_add - Add a relocation to the relocation list
472 *
473 * @list: Pointer to head of relocation list.
474 * @res: The resource.
475 * @offset: Offset into the command buffer currently being parsed where the
476 * id that needs fixup is located. Granularity is one byte.
477 * @rel_type: Relocation type.
478 */
479 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
480 const struct vmw_resource *res,
481 unsigned long offset,
482 enum vmw_resource_relocation_type
483 rel_type)
484 {
485 struct vmw_resource_relocation *rel;
486
487 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
488 if (unlikely(!rel)) {
489 DRM_ERROR("Failed to allocate a resource relocation.\n");
490 return -ENOMEM;
491 }
492
493 rel->res = res;
494 rel->offset = offset;
495 rel->rel_type = rel_type;
496 list_add_tail(&rel->head, &sw_context->res_relocations);
497
498 return 0;
499 }
500
501 /**
502 * vmw_resource_relocations_free - Free all relocations on a list
503 *
504 * @list: Pointer to the head of the relocation list
505 */
506 static void vmw_resource_relocations_free(struct list_head *list)
507 {
508 /* Memory is validation context memory, so no need to free it */
509
510 INIT_LIST_HEAD(list);
511 }
512
513 /**
514 * vmw_resource_relocations_apply - Apply all relocations on a list
515 *
516 * @cb: Pointer to the start of the command buffer bein patch. This need
517 * not be the same buffer as the one being parsed when the relocation
518 * list was built, but the contents must be the same modulo the
519 * resource ids.
520 * @list: Pointer to the head of the relocation list.
521 */
522 static void vmw_resource_relocations_apply(uint32_t *cb,
523 struct list_head *list)
524 {
525 struct vmw_resource_relocation *rel;
526
527 /* Validate the struct vmw_resource_relocation member size */
528 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
529 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
530
531 list_for_each_entry(rel, list, head) {
532 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
533 switch (rel->rel_type) {
534 case vmw_res_rel_normal:
535 *addr = rel->res->id;
536 break;
537 case vmw_res_rel_nop:
538 *addr = SVGA_3D_CMD_NOP;
539 break;
540 default:
541 if (rel->res->id == -1)
542 *addr = SVGA_3D_CMD_NOP;
543 break;
544 }
545 }
546 }
547
548 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
549 struct vmw_sw_context *sw_context,
550 SVGA3dCmdHeader *header)
551 {
552 return -EINVAL;
553 }
554
555 static int vmw_cmd_ok(struct vmw_private *dev_priv,
556 struct vmw_sw_context *sw_context,
557 SVGA3dCmdHeader *header)
558 {
559 return 0;
560 }
561
562 /**
563 * vmw_resources_reserve - Reserve all resources on the sw_context's
564 * resource list.
565 *
566 * @sw_context: Pointer to the software context.
567 *
568 * Note that since vmware's command submission currently is protected by
569 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
570 * since only a single thread at once will attempt this.
571 */
572 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
573 {
574 int ret;
575
576 ret = vmw_validation_res_reserve(sw_context->ctx, true);
577 if (ret)
578 return ret;
579
580 if (sw_context->dx_query_mob) {
581 struct vmw_buffer_object *expected_dx_query_mob;
582
583 expected_dx_query_mob =
584 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 if (expected_dx_query_mob &&
586 expected_dx_query_mob != sw_context->dx_query_mob) {
587 ret = -EINVAL;
588 }
589 }
590
591 return ret;
592 }
593
594 /**
595 * vmw_cmd_res_check - Check that a resource is present and if so, put it
596 * on the resource validate list unless it's already there.
597 *
598 * @dev_priv: Pointer to a device private structure.
599 * @sw_context: Pointer to the software context.
600 * @res_type: Resource type.
601 * @converter: User-space visisble type specific information.
602 * @id_loc: Pointer to the location in the command buffer currently being
603 * parsed from where the user-space resource id handle is located.
604 * @p_val: Pointer to pointer to resource validalidation node. Populated
605 * on exit.
606 */
607 static int
608 vmw_cmd_res_check(struct vmw_private *dev_priv,
609 struct vmw_sw_context *sw_context,
610 enum vmw_res_type res_type,
611 const struct vmw_user_resource_conv *converter,
612 uint32_t *id_loc,
613 struct vmw_resource **p_res)
614 {
615 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
616 struct vmw_resource *res;
617 int ret;
618
619 if (p_res)
620 *p_res = NULL;
621
622 if (*id_loc == SVGA3D_INVALID_ID) {
623 if (res_type == vmw_res_context) {
624 DRM_ERROR("Illegal context invalid id.\n");
625 return -EINVAL;
626 }
627 return 0;
628 }
629
630 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
631 res = rcache->res;
632 } else {
633 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
634
635 ret = vmw_validation_preload_res(sw_context->ctx, size);
636 if (ret)
637 return ret;
638
639 res = vmw_user_resource_noref_lookup_handle
640 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
641 if (unlikely(IS_ERR(res))) {
642 DRM_ERROR("Could not find or use resource 0x%08x.\n",
643 (unsigned int) *id_loc);
644 return PTR_ERR(res);
645 }
646
647 ret = vmw_execbuf_res_noref_val_add(sw_context, res);
648 if (unlikely(ret != 0))
649 return ret;
650
651 if (rcache->valid && rcache->res == res) {
652 rcache->valid_handle = true;
653 rcache->handle = *id_loc;
654 }
655 }
656
657 ret = vmw_resource_relocation_add(sw_context, res,
658 vmw_ptr_diff(sw_context->buf_start,
659 id_loc),
660 vmw_res_rel_normal);
661 if (p_res)
662 *p_res = res;
663
664 return 0;
665 }
666
667 /**
668 * vmw_rebind_dx_query - Rebind DX query associated with the context
669 *
670 * @ctx_res: context the query belongs to
671 *
672 * This function assumes binding_mutex is held.
673 */
674 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
675 {
676 struct vmw_private *dev_priv = ctx_res->dev_priv;
677 struct vmw_buffer_object *dx_query_mob;
678 struct {
679 SVGA3dCmdHeader header;
680 SVGA3dCmdDXBindAllQuery body;
681 } *cmd;
682
683
684 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
685
686 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
687 return 0;
688
689 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
690
691 if (cmd == NULL) {
692 DRM_ERROR("Failed to rebind queries.\n");
693 return -ENOMEM;
694 }
695
696 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
697 cmd->header.size = sizeof(cmd->body);
698 cmd->body.cid = ctx_res->id;
699 cmd->body.mobid = dx_query_mob->base.mem.start;
700 vmw_fifo_commit(dev_priv, sizeof(*cmd));
701
702 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
703
704 return 0;
705 }
706
707 /**
708 * vmw_rebind_contexts - Rebind all resources previously bound to
709 * referenced contexts.
710 *
711 * @sw_context: Pointer to the software context.
712 *
713 * Rebind context binding points that have been scrubbed because of eviction.
714 */
715 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
716 {
717 struct vmw_ctx_validation_info *val;
718 int ret;
719
720 list_for_each_entry(val, &sw_context->ctx_list, head) {
721 ret = vmw_binding_rebind_all(val->cur);
722 if (unlikely(ret != 0)) {
723 if (ret != -ERESTARTSYS)
724 DRM_ERROR("Failed to rebind context.\n");
725 return ret;
726 }
727
728 ret = vmw_rebind_all_dx_query(val->ctx);
729 if (ret != 0)
730 return ret;
731 }
732
733 return 0;
734 }
735
736 /**
737 * vmw_view_bindings_add - Add an array of view bindings to a context
738 * binding state tracker.
739 *
740 * @sw_context: The execbuf state used for this command.
741 * @view_type: View type for the bindings.
742 * @binding_type: Binding type for the bindings.
743 * @shader_slot: The shader slot to user for the bindings.
744 * @view_ids: Array of view ids to be bound.
745 * @num_views: Number of view ids in @view_ids.
746 * @first_slot: The binding slot to be used for the first view id in @view_ids.
747 */
748 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
749 enum vmw_view_type view_type,
750 enum vmw_ctx_binding_type binding_type,
751 uint32 shader_slot,
752 uint32 view_ids[], u32 num_views,
753 u32 first_slot)
754 {
755 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
756 u32 i;
757
758 if (!ctx_node) {
759 DRM_ERROR("DX Context not set.\n");
760 return -EINVAL;
761 }
762
763 for (i = 0; i < num_views; ++i) {
764 struct vmw_ctx_bindinfo_view binding;
765 struct vmw_resource *view = NULL;
766
767 if (view_ids[i] != SVGA3D_INVALID_ID) {
768 view = vmw_view_id_val_add(sw_context, view_type,
769 view_ids[i]);
770 if (IS_ERR(view)) {
771 DRM_ERROR("View not found.\n");
772 return PTR_ERR(view);
773 }
774 }
775 binding.bi.ctx = ctx_node->ctx;
776 binding.bi.res = view;
777 binding.bi.bt = binding_type;
778 binding.shader_slot = shader_slot;
779 binding.slot = first_slot + i;
780 vmw_binding_add(ctx_node->staged, &binding.bi,
781 shader_slot, binding.slot);
782 }
783
784 return 0;
785 }
786
787 /**
788 * vmw_cmd_cid_check - Check a command header for valid context information.
789 *
790 * @dev_priv: Pointer to a device private structure.
791 * @sw_context: Pointer to the software context.
792 * @header: A command header with an embedded user-space context handle.
793 *
794 * Convenience function: Call vmw_cmd_res_check with the user-space context
795 * handle embedded in @header.
796 */
797 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
798 struct vmw_sw_context *sw_context,
799 SVGA3dCmdHeader *header)
800 {
801 struct vmw_cid_cmd {
802 SVGA3dCmdHeader header;
803 uint32_t cid;
804 } *cmd;
805
806 cmd = container_of(header, struct vmw_cid_cmd, header);
807 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
808 user_context_converter, &cmd->cid, NULL);
809 }
810
811 /**
812 * vmw_execbuf_info_from_res - Get the private validation metadata for a
813 * recently validated resource
814 * @sw_context: Pointer to the command submission context
815 * @res: The resource
816 *
817 * The resource pointed to by @res needs to be present in the command submission
818 * context's resource cache and hence the last resource of that type to be
819 * processed by the validation code.
820 *
821 * Return: a pointer to the private metadata of the resource, or NULL
822 * if it wasn't found
823 */
824 static struct vmw_ctx_validation_info *
825 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
826 struct vmw_resource *res)
827 {
828 struct vmw_res_cache_entry *rcache =
829 &sw_context->res_cache[vmw_res_type(res)];
830
831 if (rcache->valid && rcache->res == res)
832 return rcache->private;
833
834 WARN_ON_ONCE(true);
835 return NULL;
836 }
837
838
839 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
840 struct vmw_sw_context *sw_context,
841 SVGA3dCmdHeader *header)
842 {
843 struct vmw_sid_cmd {
844 SVGA3dCmdHeader header;
845 SVGA3dCmdSetRenderTarget body;
846 } *cmd;
847 struct vmw_resource *ctx;
848 struct vmw_resource *res;
849 int ret;
850
851 cmd = container_of(header, struct vmw_sid_cmd, header);
852
853 if (cmd->body.type >= SVGA3D_RT_MAX) {
854 DRM_ERROR("Illegal render target type %u.\n",
855 (unsigned) cmd->body.type);
856 return -EINVAL;
857 }
858
859 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
860 user_context_converter, &cmd->body.cid,
861 &ctx);
862 if (unlikely(ret != 0))
863 return ret;
864
865 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
866 user_surface_converter, &cmd->body.target.sid,
867 &res);
868 if (unlikely(ret))
869 return ret;
870
871 if (dev_priv->has_mob) {
872 struct vmw_ctx_bindinfo_view binding;
873 struct vmw_ctx_validation_info *node;
874
875 node = vmw_execbuf_info_from_res(sw_context, ctx);
876 if (!node)
877 return -EINVAL;
878
879 binding.bi.ctx = ctx;
880 binding.bi.res = res;
881 binding.bi.bt = vmw_ctx_binding_rt;
882 binding.slot = cmd->body.type;
883 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
884 }
885
886 return 0;
887 }
888
889 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
890 struct vmw_sw_context *sw_context,
891 SVGA3dCmdHeader *header)
892 {
893 struct vmw_sid_cmd {
894 SVGA3dCmdHeader header;
895 SVGA3dCmdSurfaceCopy body;
896 } *cmd;
897 int ret;
898
899 cmd = container_of(header, struct vmw_sid_cmd, header);
900
901 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
902 user_surface_converter,
903 &cmd->body.src.sid, NULL);
904 if (ret)
905 return ret;
906
907 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
908 user_surface_converter,
909 &cmd->body.dest.sid, NULL);
910 }
911
912 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
913 struct vmw_sw_context *sw_context,
914 SVGA3dCmdHeader *header)
915 {
916 struct {
917 SVGA3dCmdHeader header;
918 SVGA3dCmdDXBufferCopy body;
919 } *cmd;
920 int ret;
921
922 cmd = container_of(header, typeof(*cmd), header);
923 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
924 user_surface_converter,
925 &cmd->body.src, NULL);
926 if (ret != 0)
927 return ret;
928
929 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
930 user_surface_converter,
931 &cmd->body.dest, NULL);
932 }
933
934 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
935 struct vmw_sw_context *sw_context,
936 SVGA3dCmdHeader *header)
937 {
938 struct {
939 SVGA3dCmdHeader header;
940 SVGA3dCmdDXPredCopyRegion body;
941 } *cmd;
942 int ret;
943
944 cmd = container_of(header, typeof(*cmd), header);
945 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
946 user_surface_converter,
947 &cmd->body.srcSid, NULL);
948 if (ret != 0)
949 return ret;
950
951 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
952 user_surface_converter,
953 &cmd->body.dstSid, NULL);
954 }
955
956 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
957 struct vmw_sw_context *sw_context,
958 SVGA3dCmdHeader *header)
959 {
960 struct vmw_sid_cmd {
961 SVGA3dCmdHeader header;
962 SVGA3dCmdSurfaceStretchBlt body;
963 } *cmd;
964 int ret;
965
966 cmd = container_of(header, struct vmw_sid_cmd, header);
967 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
968 user_surface_converter,
969 &cmd->body.src.sid, NULL);
970 if (unlikely(ret != 0))
971 return ret;
972 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973 user_surface_converter,
974 &cmd->body.dest.sid, NULL);
975 }
976
977 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
978 struct vmw_sw_context *sw_context,
979 SVGA3dCmdHeader *header)
980 {
981 struct vmw_sid_cmd {
982 SVGA3dCmdHeader header;
983 SVGA3dCmdBlitSurfaceToScreen body;
984 } *cmd;
985
986 cmd = container_of(header, struct vmw_sid_cmd, header);
987
988 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
989 user_surface_converter,
990 &cmd->body.srcImage.sid, NULL);
991 }
992
993 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
994 struct vmw_sw_context *sw_context,
995 SVGA3dCmdHeader *header)
996 {
997 struct vmw_sid_cmd {
998 SVGA3dCmdHeader header;
999 SVGA3dCmdPresent body;
1000 } *cmd;
1001
1002
1003 cmd = container_of(header, struct vmw_sid_cmd, header);
1004
1005 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1006 user_surface_converter, &cmd->body.sid,
1007 NULL);
1008 }
1009
1010 /**
1011 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1012 *
1013 * @dev_priv: The device private structure.
1014 * @new_query_bo: The new buffer holding query results.
1015 * @sw_context: The software context used for this command submission.
1016 *
1017 * This function checks whether @new_query_bo is suitable for holding
1018 * query results, and if another buffer currently is pinned for query
1019 * results. If so, the function prepares the state of @sw_context for
1020 * switching pinned buffers after successful submission of the current
1021 * command batch.
1022 */
1023 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024 struct vmw_buffer_object *new_query_bo,
1025 struct vmw_sw_context *sw_context)
1026 {
1027 struct vmw_res_cache_entry *ctx_entry =
1028 &sw_context->res_cache[vmw_res_context];
1029 int ret;
1030
1031 BUG_ON(!ctx_entry->valid);
1032 sw_context->last_query_ctx = ctx_entry->res;
1033
1034 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035
1036 if (unlikely(new_query_bo->base.num_pages > 4)) {
1037 DRM_ERROR("Query buffer too large.\n");
1038 return -EINVAL;
1039 }
1040
1041 if (unlikely(sw_context->cur_query_bo != NULL)) {
1042 sw_context->needs_post_query_barrier = true;
1043 ret = vmw_validation_add_bo(sw_context->ctx,
1044 sw_context->cur_query_bo,
1045 dev_priv->has_mob, false);
1046 if (unlikely(ret != 0))
1047 return ret;
1048 }
1049 sw_context->cur_query_bo = new_query_bo;
1050
1051 ret = vmw_validation_add_bo(sw_context->ctx,
1052 dev_priv->dummy_query_bo,
1053 dev_priv->has_mob, false);
1054 if (unlikely(ret != 0))
1055 return ret;
1056
1057 }
1058
1059 return 0;
1060 }
1061
1062
1063 /**
1064 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1065 *
1066 * @dev_priv: The device private structure.
1067 * @sw_context: The software context used for this command submission batch.
1068 *
1069 * This function will check if we're switching query buffers, and will then,
1070 * issue a dummy occlusion query wait used as a query barrier. When the fence
1071 * object following that query wait has signaled, we are sure that all
1072 * preceding queries have finished, and the old query buffer can be unpinned.
1073 * However, since both the new query buffer and the old one are fenced with
1074 * that fence, we can do an asynchronus unpin now, and be sure that the
1075 * old query buffer won't be moved until the fence has signaled.
1076 *
1077 * As mentioned above, both the new - and old query buffers need to be fenced
1078 * using a sequence emitted *after* calling this function.
1079 */
1080 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1081 struct vmw_sw_context *sw_context)
1082 {
1083 /*
1084 * The validate list should still hold references to all
1085 * contexts here.
1086 */
1087
1088 if (sw_context->needs_post_query_barrier) {
1089 struct vmw_res_cache_entry *ctx_entry =
1090 &sw_context->res_cache[vmw_res_context];
1091 struct vmw_resource *ctx;
1092 int ret;
1093
1094 BUG_ON(!ctx_entry->valid);
1095 ctx = ctx_entry->res;
1096
1097 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1098
1099 if (unlikely(ret != 0))
1100 DRM_ERROR("Out of fifo space for dummy query.\n");
1101 }
1102
1103 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1104 if (dev_priv->pinned_bo) {
1105 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1106 vmw_bo_unreference(&dev_priv->pinned_bo);
1107 }
1108
1109 if (!sw_context->needs_post_query_barrier) {
1110 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1111
1112 /*
1113 * We pin also the dummy_query_bo buffer so that we
1114 * don't need to validate it when emitting
1115 * dummy queries in context destroy paths.
1116 */
1117
1118 if (!dev_priv->dummy_query_bo_pinned) {
1119 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1120 true);
1121 dev_priv->dummy_query_bo_pinned = true;
1122 }
1123
1124 BUG_ON(sw_context->last_query_ctx == NULL);
1125 dev_priv->query_cid = sw_context->last_query_ctx->id;
1126 dev_priv->query_cid_valid = true;
1127 dev_priv->pinned_bo =
1128 vmw_bo_reference(sw_context->cur_query_bo);
1129 }
1130 }
1131 }
1132
1133 /**
1134 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1135 * handle to a MOB id.
1136 *
1137 * @dev_priv: Pointer to a device private structure.
1138 * @sw_context: The software context used for this command batch validation.
1139 * @id: Pointer to the user-space handle to be translated.
1140 * @vmw_bo_p: Points to a location that, on successful return will carry
1141 * a non-reference-counted pointer to the buffer object identified by the
1142 * user-space handle in @id.
1143 *
1144 * This function saves information needed to translate a user-space buffer
1145 * handle to a MOB id. The translation does not take place immediately, but
1146 * during a call to vmw_apply_relocations(). This function builds a relocation
1147 * list and a list of buffers to validate. The former needs to be freed using
1148 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1149 * needs to be freed using vmw_clear_validations.
1150 */
1151 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1152 struct vmw_sw_context *sw_context,
1153 SVGAMobId *id,
1154 struct vmw_buffer_object **vmw_bo_p)
1155 {
1156 struct vmw_buffer_object *vmw_bo;
1157 uint32_t handle = *id;
1158 struct vmw_relocation *reloc;
1159 int ret;
1160
1161 vmw_validation_preload_bo(sw_context->ctx);
1162 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1163 if (IS_ERR(vmw_bo)) {
1164 DRM_ERROR("Could not find or use MOB buffer.\n");
1165 return PTR_ERR(vmw_bo);
1166 }
1167
1168 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1169 vmw_user_bo_noref_release();
1170 if (unlikely(ret != 0))
1171 return ret;
1172
1173 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1174 if (!reloc)
1175 return -ENOMEM;
1176
1177 reloc->mob_loc = id;
1178 reloc->vbo = vmw_bo;
1179
1180 *vmw_bo_p = vmw_bo;
1181 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1182
1183 return 0;
1184 }
1185
1186 /**
1187 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1188 * handle to a valid SVGAGuestPtr
1189 *
1190 * @dev_priv: Pointer to a device private structure.
1191 * @sw_context: The software context used for this command batch validation.
1192 * @ptr: Pointer to the user-space handle to be translated.
1193 * @vmw_bo_p: Points to a location that, on successful return will carry
1194 * a non-reference-counted pointer to the DMA buffer identified by the
1195 * user-space handle in @id.
1196 *
1197 * This function saves information needed to translate a user-space buffer
1198 * handle to a valid SVGAGuestPtr. The translation does not take place
1199 * immediately, but during a call to vmw_apply_relocations().
1200 * This function builds a relocation list and a list of buffers to validate.
1201 * The former needs to be freed using either vmw_apply_relocations() or
1202 * vmw_free_relocations(). The latter needs to be freed using
1203 * vmw_clear_validations.
1204 */
1205 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1206 struct vmw_sw_context *sw_context,
1207 SVGAGuestPtr *ptr,
1208 struct vmw_buffer_object **vmw_bo_p)
1209 {
1210 struct vmw_buffer_object *vmw_bo;
1211 uint32_t handle = ptr->gmrId;
1212 struct vmw_relocation *reloc;
1213 int ret;
1214
1215 vmw_validation_preload_bo(sw_context->ctx);
1216 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1217 if (IS_ERR(vmw_bo)) {
1218 DRM_ERROR("Could not find or use GMR region.\n");
1219 return PTR_ERR(vmw_bo);
1220 }
1221
1222 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1223 vmw_user_bo_noref_release();
1224 if (unlikely(ret != 0))
1225 return ret;
1226
1227 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1228 if (!reloc)
1229 return -ENOMEM;
1230
1231 reloc->location = ptr;
1232 reloc->vbo = vmw_bo;
1233 *vmw_bo_p = vmw_bo;
1234 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1235
1236 return 0;
1237 }
1238
1239
1240
1241 /**
1242 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1243 *
1244 * @dev_priv: Pointer to a device private struct.
1245 * @sw_context: The software context used for this command submission.
1246 * @header: Pointer to the command header in the command stream.
1247 *
1248 * This function adds the new query into the query COTABLE
1249 */
1250 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1251 struct vmw_sw_context *sw_context,
1252 SVGA3dCmdHeader *header)
1253 {
1254 struct vmw_dx_define_query_cmd {
1255 SVGA3dCmdHeader header;
1256 SVGA3dCmdDXDefineQuery q;
1257 } *cmd;
1258
1259 int ret;
1260 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
1261 struct vmw_resource *cotable_res;
1262
1263
1264 if (ctx_node == NULL) {
1265 DRM_ERROR("DX Context not set for query.\n");
1266 return -EINVAL;
1267 }
1268
1269 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1270
1271 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1272 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1273 return -EINVAL;
1274
1275 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1276 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1277
1278 return ret;
1279 }
1280
1281
1282
1283 /**
1284 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1285 *
1286 * @dev_priv: Pointer to a device private struct.
1287 * @sw_context: The software context used for this command submission.
1288 * @header: Pointer to the command header in the command stream.
1289 *
1290 * The query bind operation will eventually associate the query ID
1291 * with its backing MOB. In this function, we take the user mode
1292 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1293 * kernel mode equivalent.
1294 */
1295 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1296 struct vmw_sw_context *sw_context,
1297 SVGA3dCmdHeader *header)
1298 {
1299 struct vmw_dx_bind_query_cmd {
1300 SVGA3dCmdHeader header;
1301 SVGA3dCmdDXBindQuery q;
1302 } *cmd;
1303
1304 struct vmw_buffer_object *vmw_bo;
1305 int ret;
1306
1307
1308 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1309
1310 /*
1311 * Look up the buffer pointed to by q.mobid, put it on the relocation
1312 * list so its kernel mode MOB ID can be filled in later
1313 */
1314 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1315 &vmw_bo);
1316
1317 if (ret != 0)
1318 return ret;
1319
1320 sw_context->dx_query_mob = vmw_bo;
1321 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1322 return 0;
1323 }
1324
1325
1326
1327 /**
1328 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1329 *
1330 * @dev_priv: Pointer to a device private struct.
1331 * @sw_context: The software context used for this command submission.
1332 * @header: Pointer to the command header in the command stream.
1333 */
1334 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1335 struct vmw_sw_context *sw_context,
1336 SVGA3dCmdHeader *header)
1337 {
1338 struct vmw_begin_gb_query_cmd {
1339 SVGA3dCmdHeader header;
1340 SVGA3dCmdBeginGBQuery q;
1341 } *cmd;
1342
1343 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1344 header);
1345
1346 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1347 user_context_converter, &cmd->q.cid,
1348 NULL);
1349 }
1350
1351 /**
1352 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1353 *
1354 * @dev_priv: Pointer to a device private struct.
1355 * @sw_context: The software context used for this command submission.
1356 * @header: Pointer to the command header in the command stream.
1357 */
1358 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1359 struct vmw_sw_context *sw_context,
1360 SVGA3dCmdHeader *header)
1361 {
1362 struct vmw_begin_query_cmd {
1363 SVGA3dCmdHeader header;
1364 SVGA3dCmdBeginQuery q;
1365 } *cmd;
1366
1367 cmd = container_of(header, struct vmw_begin_query_cmd,
1368 header);
1369
1370 if (unlikely(dev_priv->has_mob)) {
1371 struct {
1372 SVGA3dCmdHeader header;
1373 SVGA3dCmdBeginGBQuery q;
1374 } gb_cmd;
1375
1376 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1377
1378 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1379 gb_cmd.header.size = cmd->header.size;
1380 gb_cmd.q.cid = cmd->q.cid;
1381 gb_cmd.q.type = cmd->q.type;
1382
1383 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1384 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1385 }
1386
1387 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1388 user_context_converter, &cmd->q.cid,
1389 NULL);
1390 }
1391
1392 /**
1393 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1394 *
1395 * @dev_priv: Pointer to a device private struct.
1396 * @sw_context: The software context used for this command submission.
1397 * @header: Pointer to the command header in the command stream.
1398 */
1399 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1400 struct vmw_sw_context *sw_context,
1401 SVGA3dCmdHeader *header)
1402 {
1403 struct vmw_buffer_object *vmw_bo;
1404 struct vmw_query_cmd {
1405 SVGA3dCmdHeader header;
1406 SVGA3dCmdEndGBQuery q;
1407 } *cmd;
1408 int ret;
1409
1410 cmd = container_of(header, struct vmw_query_cmd, header);
1411 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1412 if (unlikely(ret != 0))
1413 return ret;
1414
1415 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1416 &cmd->q.mobid,
1417 &vmw_bo);
1418 if (unlikely(ret != 0))
1419 return ret;
1420
1421 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1422
1423 return ret;
1424 }
1425
1426 /**
1427 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1428 *
1429 * @dev_priv: Pointer to a device private struct.
1430 * @sw_context: The software context used for this command submission.
1431 * @header: Pointer to the command header in the command stream.
1432 */
1433 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1434 struct vmw_sw_context *sw_context,
1435 SVGA3dCmdHeader *header)
1436 {
1437 struct vmw_buffer_object *vmw_bo;
1438 struct vmw_query_cmd {
1439 SVGA3dCmdHeader header;
1440 SVGA3dCmdEndQuery q;
1441 } *cmd;
1442 int ret;
1443
1444 cmd = container_of(header, struct vmw_query_cmd, header);
1445 if (dev_priv->has_mob) {
1446 struct {
1447 SVGA3dCmdHeader header;
1448 SVGA3dCmdEndGBQuery q;
1449 } gb_cmd;
1450
1451 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1452
1453 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1454 gb_cmd.header.size = cmd->header.size;
1455 gb_cmd.q.cid = cmd->q.cid;
1456 gb_cmd.q.type = cmd->q.type;
1457 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1458 gb_cmd.q.offset = cmd->q.guestResult.offset;
1459
1460 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1461 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1462 }
1463
1464 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1465 if (unlikely(ret != 0))
1466 return ret;
1467
1468 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1469 &cmd->q.guestResult,
1470 &vmw_bo);
1471 if (unlikely(ret != 0))
1472 return ret;
1473
1474 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1475
1476 return ret;
1477 }
1478
1479 /**
1480 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1481 *
1482 * @dev_priv: Pointer to a device private struct.
1483 * @sw_context: The software context used for this command submission.
1484 * @header: Pointer to the command header in the command stream.
1485 */
1486 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1487 struct vmw_sw_context *sw_context,
1488 SVGA3dCmdHeader *header)
1489 {
1490 struct vmw_buffer_object *vmw_bo;
1491 struct vmw_query_cmd {
1492 SVGA3dCmdHeader header;
1493 SVGA3dCmdWaitForGBQuery q;
1494 } *cmd;
1495 int ret;
1496
1497 cmd = container_of(header, struct vmw_query_cmd, header);
1498 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1499 if (unlikely(ret != 0))
1500 return ret;
1501
1502 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1503 &cmd->q.mobid,
1504 &vmw_bo);
1505 if (unlikely(ret != 0))
1506 return ret;
1507
1508 return 0;
1509 }
1510
1511 /**
1512 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1513 *
1514 * @dev_priv: Pointer to a device private struct.
1515 * @sw_context: The software context used for this command submission.
1516 * @header: Pointer to the command header in the command stream.
1517 */
1518 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1519 struct vmw_sw_context *sw_context,
1520 SVGA3dCmdHeader *header)
1521 {
1522 struct vmw_buffer_object *vmw_bo;
1523 struct vmw_query_cmd {
1524 SVGA3dCmdHeader header;
1525 SVGA3dCmdWaitForQuery q;
1526 } *cmd;
1527 int ret;
1528
1529 cmd = container_of(header, struct vmw_query_cmd, header);
1530 if (dev_priv->has_mob) {
1531 struct {
1532 SVGA3dCmdHeader header;
1533 SVGA3dCmdWaitForGBQuery q;
1534 } gb_cmd;
1535
1536 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1537
1538 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1539 gb_cmd.header.size = cmd->header.size;
1540 gb_cmd.q.cid = cmd->q.cid;
1541 gb_cmd.q.type = cmd->q.type;
1542 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1543 gb_cmd.q.offset = cmd->q.guestResult.offset;
1544
1545 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1546 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1547 }
1548
1549 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1550 if (unlikely(ret != 0))
1551 return ret;
1552
1553 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1554 &cmd->q.guestResult,
1555 &vmw_bo);
1556 if (unlikely(ret != 0))
1557 return ret;
1558
1559 return 0;
1560 }
1561
1562 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1563 struct vmw_sw_context *sw_context,
1564 SVGA3dCmdHeader *header)
1565 {
1566 struct vmw_buffer_object *vmw_bo = NULL;
1567 struct vmw_surface *srf = NULL;
1568 struct vmw_dma_cmd {
1569 SVGA3dCmdHeader header;
1570 SVGA3dCmdSurfaceDMA dma;
1571 } *cmd;
1572 int ret;
1573 SVGA3dCmdSurfaceDMASuffix *suffix;
1574 uint32_t bo_size;
1575
1576 cmd = container_of(header, struct vmw_dma_cmd, header);
1577 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1578 header->size - sizeof(*suffix));
1579
1580 /* Make sure device and verifier stays in sync. */
1581 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1582 DRM_ERROR("Invalid DMA suffix size.\n");
1583 return -EINVAL;
1584 }
1585
1586 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1587 &cmd->dma.guest.ptr,
1588 &vmw_bo);
1589 if (unlikely(ret != 0))
1590 return ret;
1591
1592 /* Make sure DMA doesn't cross BO boundaries. */
1593 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1594 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1595 DRM_ERROR("Invalid DMA offset.\n");
1596 return -EINVAL;
1597 }
1598
1599 bo_size -= cmd->dma.guest.ptr.offset;
1600 if (unlikely(suffix->maximumOffset > bo_size))
1601 suffix->maximumOffset = bo_size;
1602
1603 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1604 user_surface_converter, &cmd->dma.host.sid,
1605 NULL);
1606 if (unlikely(ret != 0)) {
1607 if (unlikely(ret != -ERESTARTSYS))
1608 DRM_ERROR("could not find surface for DMA.\n");
1609 return ret;
1610 }
1611
1612 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1613
1614 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1615 header);
1616
1617 return 0;
1618 }
1619
1620 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1621 struct vmw_sw_context *sw_context,
1622 SVGA3dCmdHeader *header)
1623 {
1624 struct vmw_draw_cmd {
1625 SVGA3dCmdHeader header;
1626 SVGA3dCmdDrawPrimitives body;
1627 } *cmd;
1628 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1629 (unsigned long)header + sizeof(*cmd));
1630 SVGA3dPrimitiveRange *range;
1631 uint32_t i;
1632 uint32_t maxnum;
1633 int ret;
1634
1635 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1636 if (unlikely(ret != 0))
1637 return ret;
1638
1639 cmd = container_of(header, struct vmw_draw_cmd, header);
1640 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1641
1642 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1643 DRM_ERROR("Illegal number of vertex declarations.\n");
1644 return -EINVAL;
1645 }
1646
1647 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1648 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1649 user_surface_converter,
1650 &decl->array.surfaceId, NULL);
1651 if (unlikely(ret != 0))
1652 return ret;
1653 }
1654
1655 maxnum = (header->size - sizeof(cmd->body) -
1656 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1657 if (unlikely(cmd->body.numRanges > maxnum)) {
1658 DRM_ERROR("Illegal number of index ranges.\n");
1659 return -EINVAL;
1660 }
1661
1662 range = (SVGA3dPrimitiveRange *) decl;
1663 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1664 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1665 user_surface_converter,
1666 &range->indexArray.surfaceId, NULL);
1667 if (unlikely(ret != 0))
1668 return ret;
1669 }
1670 return 0;
1671 }
1672
1673
1674 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1675 struct vmw_sw_context *sw_context,
1676 SVGA3dCmdHeader *header)
1677 {
1678 struct vmw_tex_state_cmd {
1679 SVGA3dCmdHeader header;
1680 SVGA3dCmdSetTextureState state;
1681 } *cmd;
1682
1683 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1684 ((unsigned long) header + header->size + sizeof(header));
1685 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1686 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1687 struct vmw_resource *ctx;
1688 struct vmw_resource *res;
1689 int ret;
1690
1691 cmd = container_of(header, struct vmw_tex_state_cmd,
1692 header);
1693
1694 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1695 user_context_converter, &cmd->state.cid,
1696 &ctx);
1697 if (unlikely(ret != 0))
1698 return ret;
1699
1700 for (; cur_state < last_state; ++cur_state) {
1701 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1702 continue;
1703
1704 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1705 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1706 (unsigned) cur_state->stage);
1707 return -EINVAL;
1708 }
1709
1710 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1711 user_surface_converter,
1712 &cur_state->value, &res);
1713 if (unlikely(ret != 0))
1714 return ret;
1715
1716 if (dev_priv->has_mob) {
1717 struct vmw_ctx_bindinfo_tex binding;
1718 struct vmw_ctx_validation_info *node;
1719
1720 node = vmw_execbuf_info_from_res(sw_context, ctx);
1721 if (!node)
1722 return -EINVAL;
1723
1724 binding.bi.ctx = ctx;
1725 binding.bi.res = res;
1726 binding.bi.bt = vmw_ctx_binding_tex;
1727 binding.texture_stage = cur_state->stage;
1728 vmw_binding_add(node->staged, &binding.bi, 0,
1729 binding.texture_stage);
1730 }
1731 }
1732
1733 return 0;
1734 }
1735
1736 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1737 struct vmw_sw_context *sw_context,
1738 void *buf)
1739 {
1740 struct vmw_buffer_object *vmw_bo;
1741
1742 struct {
1743 uint32_t header;
1744 SVGAFifoCmdDefineGMRFB body;
1745 } *cmd = buf;
1746
1747 return vmw_translate_guest_ptr(dev_priv, sw_context,
1748 &cmd->body.ptr,
1749 &vmw_bo);
1750 }
1751
1752
1753 /**
1754 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1755 * switching
1756 *
1757 * @dev_priv: Pointer to a device private struct.
1758 * @sw_context: The software context being used for this batch.
1759 * @val_node: The validation node representing the resource.
1760 * @buf_id: Pointer to the user-space backup buffer handle in the command
1761 * stream.
1762 * @backup_offset: Offset of backup into MOB.
1763 *
1764 * This function prepares for registering a switch of backup buffers
1765 * in the resource metadata just prior to unreserving. It's basically a wrapper
1766 * around vmw_cmd_res_switch_backup with a different interface.
1767 */
1768 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1769 struct vmw_sw_context *sw_context,
1770 struct vmw_resource *res,
1771 uint32_t *buf_id,
1772 unsigned long backup_offset)
1773 {
1774 struct vmw_buffer_object *vbo;
1775 void *info;
1776 int ret;
1777
1778 info = vmw_execbuf_info_from_res(sw_context, res);
1779 if (!info)
1780 return -EINVAL;
1781
1782 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1783 if (ret)
1784 return ret;
1785
1786 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1787 backup_offset);
1788 return 0;
1789 }
1790
1791
1792 /**
1793 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1794 *
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @res_type: The resource type.
1798 * @converter: Information about user-space binding for this resource type.
1799 * @res_id: Pointer to the user-space resource handle in the command stream.
1800 * @buf_id: Pointer to the user-space backup buffer handle in the command
1801 * stream.
1802 * @backup_offset: Offset of backup into MOB.
1803 *
1804 * This function prepares for registering a switch of backup buffers
1805 * in the resource metadata just prior to unreserving. It's basically a wrapper
1806 * around vmw_cmd_res_switch_backup with a different interface.
1807 */
1808 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1809 struct vmw_sw_context *sw_context,
1810 enum vmw_res_type res_type,
1811 const struct vmw_user_resource_conv
1812 *converter,
1813 uint32_t *res_id,
1814 uint32_t *buf_id,
1815 unsigned long backup_offset)
1816 {
1817 struct vmw_resource *res;
1818 int ret;
1819
1820 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1821 converter, res_id, &res);
1822 if (ret)
1823 return ret;
1824
1825 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
1826 buf_id, backup_offset);
1827 }
1828
1829 /**
1830 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1831 * command
1832 *
1833 * @dev_priv: Pointer to a device private struct.
1834 * @sw_context: The software context being used for this batch.
1835 * @header: Pointer to the command header in the command stream.
1836 */
1837 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1838 struct vmw_sw_context *sw_context,
1839 SVGA3dCmdHeader *header)
1840 {
1841 struct vmw_bind_gb_surface_cmd {
1842 SVGA3dCmdHeader header;
1843 SVGA3dCmdBindGBSurface body;
1844 } *cmd;
1845
1846 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1847
1848 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1849 user_surface_converter,
1850 &cmd->body.sid, &cmd->body.mobid,
1851 0);
1852 }
1853
1854 /**
1855 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1856 * command
1857 *
1858 * @dev_priv: Pointer to a device private struct.
1859 * @sw_context: The software context being used for this batch.
1860 * @header: Pointer to the command header in the command stream.
1861 */
1862 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1863 struct vmw_sw_context *sw_context,
1864 SVGA3dCmdHeader *header)
1865 {
1866 struct vmw_gb_surface_cmd {
1867 SVGA3dCmdHeader header;
1868 SVGA3dCmdUpdateGBImage body;
1869 } *cmd;
1870
1871 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1872
1873 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1874 user_surface_converter,
1875 &cmd->body.image.sid, NULL);
1876 }
1877
1878 /**
1879 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1880 * command
1881 *
1882 * @dev_priv: Pointer to a device private struct.
1883 * @sw_context: The software context being used for this batch.
1884 * @header: Pointer to the command header in the command stream.
1885 */
1886 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1887 struct vmw_sw_context *sw_context,
1888 SVGA3dCmdHeader *header)
1889 {
1890 struct vmw_gb_surface_cmd {
1891 SVGA3dCmdHeader header;
1892 SVGA3dCmdUpdateGBSurface body;
1893 } *cmd;
1894
1895 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1896
1897 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1898 user_surface_converter,
1899 &cmd->body.sid, NULL);
1900 }
1901
1902 /**
1903 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1904 * command
1905 *
1906 * @dev_priv: Pointer to a device private struct.
1907 * @sw_context: The software context being used for this batch.
1908 * @header: Pointer to the command header in the command stream.
1909 */
1910 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1911 struct vmw_sw_context *sw_context,
1912 SVGA3dCmdHeader *header)
1913 {
1914 struct vmw_gb_surface_cmd {
1915 SVGA3dCmdHeader header;
1916 SVGA3dCmdReadbackGBImage body;
1917 } *cmd;
1918
1919 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1920
1921 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1922 user_surface_converter,
1923 &cmd->body.image.sid, NULL);
1924 }
1925
1926 /**
1927 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1928 * command
1929 *
1930 * @dev_priv: Pointer to a device private struct.
1931 * @sw_context: The software context being used for this batch.
1932 * @header: Pointer to the command header in the command stream.
1933 */
1934 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1935 struct vmw_sw_context *sw_context,
1936 SVGA3dCmdHeader *header)
1937 {
1938 struct vmw_gb_surface_cmd {
1939 SVGA3dCmdHeader header;
1940 SVGA3dCmdReadbackGBSurface body;
1941 } *cmd;
1942
1943 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1944
1945 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1946 user_surface_converter,
1947 &cmd->body.sid, NULL);
1948 }
1949
1950 /**
1951 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1952 * command
1953 *
1954 * @dev_priv: Pointer to a device private struct.
1955 * @sw_context: The software context being used for this batch.
1956 * @header: Pointer to the command header in the command stream.
1957 */
1958 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1959 struct vmw_sw_context *sw_context,
1960 SVGA3dCmdHeader *header)
1961 {
1962 struct vmw_gb_surface_cmd {
1963 SVGA3dCmdHeader header;
1964 SVGA3dCmdInvalidateGBImage body;
1965 } *cmd;
1966
1967 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1968
1969 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1970 user_surface_converter,
1971 &cmd->body.image.sid, NULL);
1972 }
1973
1974 /**
1975 * vmw_cmd_invalidate_gb_surface - Validate an
1976 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1977 *
1978 * @dev_priv: Pointer to a device private struct.
1979 * @sw_context: The software context being used for this batch.
1980 * @header: Pointer to the command header in the command stream.
1981 */
1982 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1983 struct vmw_sw_context *sw_context,
1984 SVGA3dCmdHeader *header)
1985 {
1986 struct vmw_gb_surface_cmd {
1987 SVGA3dCmdHeader header;
1988 SVGA3dCmdInvalidateGBSurface body;
1989 } *cmd;
1990
1991 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1992
1993 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1994 user_surface_converter,
1995 &cmd->body.sid, NULL);
1996 }
1997
1998
1999 /**
2000 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2001 * command
2002 *
2003 * @dev_priv: Pointer to a device private struct.
2004 * @sw_context: The software context being used for this batch.
2005 * @header: Pointer to the command header in the command stream.
2006 */
2007 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2008 struct vmw_sw_context *sw_context,
2009 SVGA3dCmdHeader *header)
2010 {
2011 struct vmw_shader_define_cmd {
2012 SVGA3dCmdHeader header;
2013 SVGA3dCmdDefineShader body;
2014 } *cmd;
2015 int ret;
2016 size_t size;
2017 struct vmw_resource *ctx;
2018
2019 cmd = container_of(header, struct vmw_shader_define_cmd,
2020 header);
2021
2022 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2023 user_context_converter, &cmd->body.cid,
2024 &ctx);
2025 if (unlikely(ret != 0))
2026 return ret;
2027
2028 if (unlikely(!dev_priv->has_mob))
2029 return 0;
2030
2031 size = cmd->header.size - sizeof(cmd->body);
2032 ret = vmw_compat_shader_add(dev_priv,
2033 vmw_context_res_man(ctx),
2034 cmd->body.shid, cmd + 1,
2035 cmd->body.type, size,
2036 &sw_context->staged_cmd_res);
2037 if (unlikely(ret != 0))
2038 return ret;
2039
2040 return vmw_resource_relocation_add(sw_context,
2041 NULL,
2042 vmw_ptr_diff(sw_context->buf_start,
2043 &cmd->header.id),
2044 vmw_res_rel_nop);
2045 }
2046
2047 /**
2048 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2049 * command
2050 *
2051 * @dev_priv: Pointer to a device private struct.
2052 * @sw_context: The software context being used for this batch.
2053 * @header: Pointer to the command header in the command stream.
2054 */
2055 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2056 struct vmw_sw_context *sw_context,
2057 SVGA3dCmdHeader *header)
2058 {
2059 struct vmw_shader_destroy_cmd {
2060 SVGA3dCmdHeader header;
2061 SVGA3dCmdDestroyShader body;
2062 } *cmd;
2063 int ret;
2064 struct vmw_resource *ctx;
2065
2066 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2067 header);
2068
2069 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2070 user_context_converter, &cmd->body.cid,
2071 &ctx);
2072 if (unlikely(ret != 0))
2073 return ret;
2074
2075 if (unlikely(!dev_priv->has_mob))
2076 return 0;
2077
2078 ret = vmw_shader_remove(vmw_context_res_man(ctx),
2079 cmd->body.shid,
2080 cmd->body.type,
2081 &sw_context->staged_cmd_res);
2082 if (unlikely(ret != 0))
2083 return ret;
2084
2085 return vmw_resource_relocation_add(sw_context,
2086 NULL,
2087 vmw_ptr_diff(sw_context->buf_start,
2088 &cmd->header.id),
2089 vmw_res_rel_nop);
2090 }
2091
2092 /**
2093 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2094 * command
2095 *
2096 * @dev_priv: Pointer to a device private struct.
2097 * @sw_context: The software context being used for this batch.
2098 * @header: Pointer to the command header in the command stream.
2099 */
2100 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2101 struct vmw_sw_context *sw_context,
2102 SVGA3dCmdHeader *header)
2103 {
2104 struct vmw_set_shader_cmd {
2105 SVGA3dCmdHeader header;
2106 SVGA3dCmdSetShader body;
2107 } *cmd;
2108 struct vmw_ctx_bindinfo_shader binding;
2109 struct vmw_resource *ctx, *res = NULL;
2110 struct vmw_ctx_validation_info *ctx_info;
2111 int ret;
2112
2113 cmd = container_of(header, struct vmw_set_shader_cmd,
2114 header);
2115
2116 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2117 DRM_ERROR("Illegal shader type %u.\n",
2118 (unsigned) cmd->body.type);
2119 return -EINVAL;
2120 }
2121
2122 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2123 user_context_converter, &cmd->body.cid,
2124 &ctx);
2125 if (unlikely(ret != 0))
2126 return ret;
2127
2128 if (!dev_priv->has_mob)
2129 return 0;
2130
2131 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2132 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2133 cmd->body.shid,
2134 cmd->body.type);
2135
2136 if (!IS_ERR(res)) {
2137 ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
2138 if (unlikely(ret != 0))
2139 return ret;
2140 }
2141 }
2142
2143 if (IS_ERR_OR_NULL(res)) {
2144 ret = vmw_cmd_res_check(dev_priv, sw_context,
2145 vmw_res_shader,
2146 user_shader_converter,
2147 &cmd->body.shid, &res);
2148 if (unlikely(ret != 0))
2149 return ret;
2150 }
2151
2152 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2153 if (!ctx_info)
2154 return -EINVAL;
2155
2156 binding.bi.ctx = ctx;
2157 binding.bi.res = res;
2158 binding.bi.bt = vmw_ctx_binding_shader;
2159 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2160 vmw_binding_add(ctx_info->staged, &binding.bi,
2161 binding.shader_slot, 0);
2162 return 0;
2163 }
2164
2165 /**
2166 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2167 * command
2168 *
2169 * @dev_priv: Pointer to a device private struct.
2170 * @sw_context: The software context being used for this batch.
2171 * @header: Pointer to the command header in the command stream.
2172 */
2173 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2174 struct vmw_sw_context *sw_context,
2175 SVGA3dCmdHeader *header)
2176 {
2177 struct vmw_set_shader_const_cmd {
2178 SVGA3dCmdHeader header;
2179 SVGA3dCmdSetShaderConst body;
2180 } *cmd;
2181 int ret;
2182
2183 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2184 header);
2185
2186 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2187 user_context_converter, &cmd->body.cid,
2188 NULL);
2189 if (unlikely(ret != 0))
2190 return ret;
2191
2192 if (dev_priv->has_mob)
2193 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2194
2195 return 0;
2196 }
2197
2198 /**
2199 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2200 * command
2201 *
2202 * @dev_priv: Pointer to a device private struct.
2203 * @sw_context: The software context being used for this batch.
2204 * @header: Pointer to the command header in the command stream.
2205 */
2206 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2207 struct vmw_sw_context *sw_context,
2208 SVGA3dCmdHeader *header)
2209 {
2210 struct vmw_bind_gb_shader_cmd {
2211 SVGA3dCmdHeader header;
2212 SVGA3dCmdBindGBShader body;
2213 } *cmd;
2214
2215 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2216 header);
2217
2218 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2219 user_shader_converter,
2220 &cmd->body.shid, &cmd->body.mobid,
2221 cmd->body.offsetInBytes);
2222 }
2223
2224 /**
2225 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2226 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2227 *
2228 * @dev_priv: Pointer to a device private struct.
2229 * @sw_context: The software context being used for this batch.
2230 * @header: Pointer to the command header in the command stream.
2231 */
2232 static int
2233 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2234 struct vmw_sw_context *sw_context,
2235 SVGA3dCmdHeader *header)
2236 {
2237 struct {
2238 SVGA3dCmdHeader header;
2239 SVGA3dCmdDXSetSingleConstantBuffer body;
2240 } *cmd;
2241 struct vmw_resource *res = NULL;
2242 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2243 struct vmw_ctx_bindinfo_cb binding;
2244 int ret;
2245
2246 if (unlikely(ctx_node == NULL)) {
2247 DRM_ERROR("DX Context not set.\n");
2248 return -EINVAL;
2249 }
2250
2251 cmd = container_of(header, typeof(*cmd), header);
2252 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2253 user_surface_converter,
2254 &cmd->body.sid, &res);
2255 if (unlikely(ret != 0))
2256 return ret;
2257
2258 binding.bi.ctx = ctx_node->ctx;
2259 binding.bi.res = res;
2260 binding.bi.bt = vmw_ctx_binding_cb;
2261 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2262 binding.offset = cmd->body.offsetInBytes;
2263 binding.size = cmd->body.sizeInBytes;
2264 binding.slot = cmd->body.slot;
2265
2266 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2267 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2268 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2269 (unsigned) cmd->body.type,
2270 (unsigned) binding.slot);
2271 return -EINVAL;
2272 }
2273
2274 vmw_binding_add(ctx_node->staged, &binding.bi,
2275 binding.shader_slot, binding.slot);
2276
2277 return 0;
2278 }
2279
2280 /**
2281 * vmw_cmd_dx_set_shader_res - Validate an
2282 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2283 *
2284 * @dev_priv: Pointer to a device private struct.
2285 * @sw_context: The software context being used for this batch.
2286 * @header: Pointer to the command header in the command stream.
2287 */
2288 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2289 struct vmw_sw_context *sw_context,
2290 SVGA3dCmdHeader *header)
2291 {
2292 struct {
2293 SVGA3dCmdHeader header;
2294 SVGA3dCmdDXSetShaderResources body;
2295 } *cmd = container_of(header, typeof(*cmd), header);
2296 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2297 sizeof(SVGA3dShaderResourceViewId);
2298
2299 if ((u64) cmd->body.startView + (u64) num_sr_view >
2300 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2301 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2302 DRM_ERROR("Invalid shader binding.\n");
2303 return -EINVAL;
2304 }
2305
2306 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2307 vmw_ctx_binding_sr,
2308 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2309 (void *) &cmd[1], num_sr_view,
2310 cmd->body.startView);
2311 }
2312
2313 /**
2314 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2315 * command
2316 *
2317 * @dev_priv: Pointer to a device private struct.
2318 * @sw_context: The software context being used for this batch.
2319 * @header: Pointer to the command header in the command stream.
2320 */
2321 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2322 struct vmw_sw_context *sw_context,
2323 SVGA3dCmdHeader *header)
2324 {
2325 struct {
2326 SVGA3dCmdHeader header;
2327 SVGA3dCmdDXSetShader body;
2328 } *cmd;
2329 struct vmw_resource *res = NULL;
2330 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2331 struct vmw_ctx_bindinfo_shader binding;
2332 int ret = 0;
2333
2334 if (unlikely(ctx_node == NULL)) {
2335 DRM_ERROR("DX Context not set.\n");
2336 return -EINVAL;
2337 }
2338
2339 cmd = container_of(header, typeof(*cmd), header);
2340
2341 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2342 DRM_ERROR("Illegal shader type %u.\n",
2343 (unsigned) cmd->body.type);
2344 return -EINVAL;
2345 }
2346
2347 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2348 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2349 if (IS_ERR(res)) {
2350 DRM_ERROR("Could not find shader for binding.\n");
2351 return PTR_ERR(res);
2352 }
2353
2354 ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
2355 if (ret)
2356 return ret;
2357 }
2358
2359 binding.bi.ctx = ctx_node->ctx;
2360 binding.bi.res = res;
2361 binding.bi.bt = vmw_ctx_binding_dx_shader;
2362 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2363
2364 vmw_binding_add(ctx_node->staged, &binding.bi,
2365 binding.shader_slot, 0);
2366
2367 return 0;
2368 }
2369
2370 /**
2371 * vmw_cmd_dx_set_vertex_buffers - Validates an
2372 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2373 *
2374 * @dev_priv: Pointer to a device private struct.
2375 * @sw_context: The software context being used for this batch.
2376 * @header: Pointer to the command header in the command stream.
2377 */
2378 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2379 struct vmw_sw_context *sw_context,
2380 SVGA3dCmdHeader *header)
2381 {
2382 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2383 struct vmw_ctx_bindinfo_vb binding;
2384 struct vmw_resource *res;
2385 struct {
2386 SVGA3dCmdHeader header;
2387 SVGA3dCmdDXSetVertexBuffers body;
2388 SVGA3dVertexBuffer buf[];
2389 } *cmd;
2390 int i, ret, num;
2391
2392 if (unlikely(ctx_node == NULL)) {
2393 DRM_ERROR("DX Context not set.\n");
2394 return -EINVAL;
2395 }
2396
2397 cmd = container_of(header, typeof(*cmd), header);
2398 num = (cmd->header.size - sizeof(cmd->body)) /
2399 sizeof(SVGA3dVertexBuffer);
2400 if ((u64)num + (u64)cmd->body.startBuffer >
2401 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2402 DRM_ERROR("Invalid number of vertex buffers.\n");
2403 return -EINVAL;
2404 }
2405
2406 for (i = 0; i < num; i++) {
2407 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2408 user_surface_converter,
2409 &cmd->buf[i].sid, &res);
2410 if (unlikely(ret != 0))
2411 return ret;
2412
2413 binding.bi.ctx = ctx_node->ctx;
2414 binding.bi.bt = vmw_ctx_binding_vb;
2415 binding.bi.res = res;
2416 binding.offset = cmd->buf[i].offset;
2417 binding.stride = cmd->buf[i].stride;
2418 binding.slot = i + cmd->body.startBuffer;
2419
2420 vmw_binding_add(ctx_node->staged, &binding.bi,
2421 0, binding.slot);
2422 }
2423
2424 return 0;
2425 }
2426
2427 /**
2428 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2429 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2430 *
2431 * @dev_priv: Pointer to a device private struct.
2432 * @sw_context: The software context being used for this batch.
2433 * @header: Pointer to the command header in the command stream.
2434 */
2435 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2436 struct vmw_sw_context *sw_context,
2437 SVGA3dCmdHeader *header)
2438 {
2439 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2440 struct vmw_ctx_bindinfo_ib binding;
2441 struct vmw_resource *res;
2442 struct {
2443 SVGA3dCmdHeader header;
2444 SVGA3dCmdDXSetIndexBuffer body;
2445 } *cmd;
2446 int ret;
2447
2448 if (unlikely(ctx_node == NULL)) {
2449 DRM_ERROR("DX Context not set.\n");
2450 return -EINVAL;
2451 }
2452
2453 cmd = container_of(header, typeof(*cmd), header);
2454 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2455 user_surface_converter,
2456 &cmd->body.sid, &res);
2457 if (unlikely(ret != 0))
2458 return ret;
2459
2460 binding.bi.ctx = ctx_node->ctx;
2461 binding.bi.res = res;
2462 binding.bi.bt = vmw_ctx_binding_ib;
2463 binding.offset = cmd->body.offset;
2464 binding.format = cmd->body.format;
2465
2466 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2467
2468 return 0;
2469 }
2470
2471 /**
2472 * vmw_cmd_dx_set_rendertarget - Validate an
2473 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2474 *
2475 * @dev_priv: Pointer to a device private struct.
2476 * @sw_context: The software context being used for this batch.
2477 * @header: Pointer to the command header in the command stream.
2478 */
2479 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2480 struct vmw_sw_context *sw_context,
2481 SVGA3dCmdHeader *header)
2482 {
2483 struct {
2484 SVGA3dCmdHeader header;
2485 SVGA3dCmdDXSetRenderTargets body;
2486 } *cmd = container_of(header, typeof(*cmd), header);
2487 int ret;
2488 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2489 sizeof(SVGA3dRenderTargetViewId);
2490
2491 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2492 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2493 return -EINVAL;
2494 }
2495
2496 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2497 vmw_ctx_binding_ds, 0,
2498 &cmd->body.depthStencilViewId, 1, 0);
2499 if (ret)
2500 return ret;
2501
2502 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2503 vmw_ctx_binding_dx_rt, 0,
2504 (void *)&cmd[1], num_rt_view, 0);
2505 }
2506
2507 /**
2508 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2509 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2510 *
2511 * @dev_priv: Pointer to a device private struct.
2512 * @sw_context: The software context being used for this batch.
2513 * @header: Pointer to the command header in the command stream.
2514 */
2515 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2516 struct vmw_sw_context *sw_context,
2517 SVGA3dCmdHeader *header)
2518 {
2519 struct {
2520 SVGA3dCmdHeader header;
2521 SVGA3dCmdDXClearRenderTargetView body;
2522 } *cmd = container_of(header, typeof(*cmd), header);
2523
2524 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2525 cmd->body.renderTargetViewId));
2526 }
2527
2528 /**
2529 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2530 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2531 *
2532 * @dev_priv: Pointer to a device private struct.
2533 * @sw_context: The software context being used for this batch.
2534 * @header: Pointer to the command header in the command stream.
2535 */
2536 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2537 struct vmw_sw_context *sw_context,
2538 SVGA3dCmdHeader *header)
2539 {
2540 struct {
2541 SVGA3dCmdHeader header;
2542 SVGA3dCmdDXClearDepthStencilView body;
2543 } *cmd = container_of(header, typeof(*cmd), header);
2544
2545 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2546 cmd->body.depthStencilViewId));
2547 }
2548
2549 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2550 struct vmw_sw_context *sw_context,
2551 SVGA3dCmdHeader *header)
2552 {
2553 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2554 struct vmw_resource *srf;
2555 struct vmw_resource *res;
2556 enum vmw_view_type view_type;
2557 int ret;
2558 /*
2559 * This is based on the fact that all affected define commands have
2560 * the same initial command body layout.
2561 */
2562 struct {
2563 SVGA3dCmdHeader header;
2564 uint32 defined_id;
2565 uint32 sid;
2566 } *cmd;
2567
2568 if (unlikely(ctx_node == NULL)) {
2569 DRM_ERROR("DX Context not set.\n");
2570 return -EINVAL;
2571 }
2572
2573 view_type = vmw_view_cmd_to_type(header->id);
2574 if (view_type == vmw_view_max)
2575 return -EINVAL;
2576 cmd = container_of(header, typeof(*cmd), header);
2577 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2578 user_surface_converter,
2579 &cmd->sid, &srf);
2580 if (unlikely(ret != 0))
2581 return ret;
2582
2583 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2584 ret = vmw_cotable_notify(res, cmd->defined_id);
2585 if (unlikely(ret != 0))
2586 return ret;
2587
2588 return vmw_view_add(sw_context->man,
2589 ctx_node->ctx,
2590 srf,
2591 view_type,
2592 cmd->defined_id,
2593 header,
2594 header->size + sizeof(*header),
2595 &sw_context->staged_cmd_res);
2596 }
2597
2598 /**
2599 * vmw_cmd_dx_set_so_targets - Validate an
2600 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2601 *
2602 * @dev_priv: Pointer to a device private struct.
2603 * @sw_context: The software context being used for this batch.
2604 * @header: Pointer to the command header in the command stream.
2605 */
2606 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2607 struct vmw_sw_context *sw_context,
2608 SVGA3dCmdHeader *header)
2609 {
2610 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2611 struct vmw_ctx_bindinfo_so binding;
2612 struct vmw_resource *res;
2613 struct {
2614 SVGA3dCmdHeader header;
2615 SVGA3dCmdDXSetSOTargets body;
2616 SVGA3dSoTarget targets[];
2617 } *cmd;
2618 int i, ret, num;
2619
2620 if (unlikely(ctx_node == NULL)) {
2621 DRM_ERROR("DX Context not set.\n");
2622 return -EINVAL;
2623 }
2624
2625 cmd = container_of(header, typeof(*cmd), header);
2626 num = (cmd->header.size - sizeof(cmd->body)) /
2627 sizeof(SVGA3dSoTarget);
2628
2629 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2630 DRM_ERROR("Invalid DX SO binding.\n");
2631 return -EINVAL;
2632 }
2633
2634 for (i = 0; i < num; i++) {
2635 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2636 user_surface_converter,
2637 &cmd->targets[i].sid, &res);
2638 if (unlikely(ret != 0))
2639 return ret;
2640
2641 binding.bi.ctx = ctx_node->ctx;
2642 binding.bi.res = res;
2643 binding.bi.bt = vmw_ctx_binding_so,
2644 binding.offset = cmd->targets[i].offset;
2645 binding.size = cmd->targets[i].sizeInBytes;
2646 binding.slot = i;
2647
2648 vmw_binding_add(ctx_node->staged, &binding.bi,
2649 0, binding.slot);
2650 }
2651
2652 return 0;
2653 }
2654
2655 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2656 struct vmw_sw_context *sw_context,
2657 SVGA3dCmdHeader *header)
2658 {
2659 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2660 struct vmw_resource *res;
2661 /*
2662 * This is based on the fact that all affected define commands have
2663 * the same initial command body layout.
2664 */
2665 struct {
2666 SVGA3dCmdHeader header;
2667 uint32 defined_id;
2668 } *cmd;
2669 enum vmw_so_type so_type;
2670 int ret;
2671
2672 if (unlikely(ctx_node == NULL)) {
2673 DRM_ERROR("DX Context not set.\n");
2674 return -EINVAL;
2675 }
2676
2677 so_type = vmw_so_cmd_to_type(header->id);
2678 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2679 cmd = container_of(header, typeof(*cmd), header);
2680 ret = vmw_cotable_notify(res, cmd->defined_id);
2681
2682 return ret;
2683 }
2684
2685 /**
2686 * vmw_cmd_dx_check_subresource - Validate an
2687 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2688 *
2689 * @dev_priv: Pointer to a device private struct.
2690 * @sw_context: The software context being used for this batch.
2691 * @header: Pointer to the command header in the command stream.
2692 */
2693 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2694 struct vmw_sw_context *sw_context,
2695 SVGA3dCmdHeader *header)
2696 {
2697 struct {
2698 SVGA3dCmdHeader header;
2699 union {
2700 SVGA3dCmdDXReadbackSubResource r_body;
2701 SVGA3dCmdDXInvalidateSubResource i_body;
2702 SVGA3dCmdDXUpdateSubResource u_body;
2703 SVGA3dSurfaceId sid;
2704 };
2705 } *cmd;
2706
2707 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2708 offsetof(typeof(*cmd), sid));
2709 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2710 offsetof(typeof(*cmd), sid));
2711 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2712 offsetof(typeof(*cmd), sid));
2713
2714 cmd = container_of(header, typeof(*cmd), header);
2715
2716 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2717 user_surface_converter,
2718 &cmd->sid, NULL);
2719 }
2720
2721 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2722 struct vmw_sw_context *sw_context,
2723 SVGA3dCmdHeader *header)
2724 {
2725 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2726
2727 if (unlikely(ctx_node == NULL)) {
2728 DRM_ERROR("DX Context not set.\n");
2729 return -EINVAL;
2730 }
2731
2732 return 0;
2733 }
2734
2735 /**
2736 * vmw_cmd_dx_view_remove - validate a view remove command and
2737 * schedule the view resource for removal.
2738 *
2739 * @dev_priv: Pointer to a device private struct.
2740 * @sw_context: The software context being used for this batch.
2741 * @header: Pointer to the command header in the command stream.
2742 *
2743 * Check that the view exists, and if it was not created using this
2744 * command batch, conditionally make this command a NOP.
2745 */
2746 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2747 struct vmw_sw_context *sw_context,
2748 SVGA3dCmdHeader *header)
2749 {
2750 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2751 struct {
2752 SVGA3dCmdHeader header;
2753 union vmw_view_destroy body;
2754 } *cmd = container_of(header, typeof(*cmd), header);
2755 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2756 struct vmw_resource *view;
2757 int ret;
2758
2759 if (!ctx_node) {
2760 DRM_ERROR("DX Context not set.\n");
2761 return -EINVAL;
2762 }
2763
2764 ret = vmw_view_remove(sw_context->man,
2765 cmd->body.view_id, view_type,
2766 &sw_context->staged_cmd_res,
2767 &view);
2768 if (ret || !view)
2769 return ret;
2770
2771 /*
2772 * If the view wasn't created during this command batch, it might
2773 * have been removed due to a context swapout, so add a
2774 * relocation to conditionally make this command a NOP to avoid
2775 * device errors.
2776 */
2777 return vmw_resource_relocation_add(sw_context,
2778 view,
2779 vmw_ptr_diff(sw_context->buf_start,
2780 &cmd->header.id),
2781 vmw_res_rel_cond_nop);
2782 }
2783
2784 /**
2785 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2786 * command
2787 *
2788 * @dev_priv: Pointer to a device private struct.
2789 * @sw_context: The software context being used for this batch.
2790 * @header: Pointer to the command header in the command stream.
2791 */
2792 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2793 struct vmw_sw_context *sw_context,
2794 SVGA3dCmdHeader *header)
2795 {
2796 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2797 struct vmw_resource *res;
2798 struct {
2799 SVGA3dCmdHeader header;
2800 SVGA3dCmdDXDefineShader body;
2801 } *cmd = container_of(header, typeof(*cmd), header);
2802 int ret;
2803
2804 if (!ctx_node) {
2805 DRM_ERROR("DX Context not set.\n");
2806 return -EINVAL;
2807 }
2808
2809 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2810 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2811 if (ret)
2812 return ret;
2813
2814 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2815 cmd->body.shaderId, cmd->body.type,
2816 &sw_context->staged_cmd_res);
2817 }
2818
2819 /**
2820 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2821 * command
2822 *
2823 * @dev_priv: Pointer to a device private struct.
2824 * @sw_context: The software context being used for this batch.
2825 * @header: Pointer to the command header in the command stream.
2826 */
2827 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2828 struct vmw_sw_context *sw_context,
2829 SVGA3dCmdHeader *header)
2830 {
2831 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2832 struct {
2833 SVGA3dCmdHeader header;
2834 SVGA3dCmdDXDestroyShader body;
2835 } *cmd = container_of(header, typeof(*cmd), header);
2836 int ret;
2837
2838 if (!ctx_node) {
2839 DRM_ERROR("DX Context not set.\n");
2840 return -EINVAL;
2841 }
2842
2843 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2844 &sw_context->staged_cmd_res);
2845 if (ret)
2846 DRM_ERROR("Could not find shader to remove.\n");
2847
2848 return ret;
2849 }
2850
2851 /**
2852 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2853 * command
2854 *
2855 * @dev_priv: Pointer to a device private struct.
2856 * @sw_context: The software context being used for this batch.
2857 * @header: Pointer to the command header in the command stream.
2858 */
2859 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2860 struct vmw_sw_context *sw_context,
2861 SVGA3dCmdHeader *header)
2862 {
2863 struct vmw_resource *ctx;
2864 struct vmw_resource *res;
2865 struct {
2866 SVGA3dCmdHeader header;
2867 SVGA3dCmdDXBindShader body;
2868 } *cmd = container_of(header, typeof(*cmd), header);
2869 int ret;
2870
2871 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2872 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2873 user_context_converter,
2874 &cmd->body.cid, &ctx);
2875 if (ret)
2876 return ret;
2877 } else {
2878 if (!sw_context->dx_ctx_node) {
2879 DRM_ERROR("DX Context not set.\n");
2880 return -EINVAL;
2881 }
2882 ctx = sw_context->dx_ctx_node->ctx;
2883 }
2884
2885 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2886 cmd->body.shid, 0);
2887 if (IS_ERR(res)) {
2888 DRM_ERROR("Could not find shader to bind.\n");
2889 return PTR_ERR(res);
2890 }
2891
2892 ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
2893 if (ret) {
2894 DRM_ERROR("Error creating resource validation node.\n");
2895 return ret;
2896 }
2897
2898 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2899 &cmd->body.mobid,
2900 cmd->body.offsetInBytes);
2901 }
2902
2903 /**
2904 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
2905 *
2906 * @dev_priv: Pointer to a device private struct.
2907 * @sw_context: The software context being used for this batch.
2908 * @header: Pointer to the command header in the command stream.
2909 */
2910 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2911 struct vmw_sw_context *sw_context,
2912 SVGA3dCmdHeader *header)
2913 {
2914 struct {
2915 SVGA3dCmdHeader header;
2916 SVGA3dCmdDXGenMips body;
2917 } *cmd = container_of(header, typeof(*cmd), header);
2918
2919 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2920 cmd->body.shaderResourceViewId));
2921 }
2922
2923 /**
2924 * vmw_cmd_dx_transfer_from_buffer -
2925 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2926 *
2927 * @dev_priv: Pointer to a device private struct.
2928 * @sw_context: The software context being used for this batch.
2929 * @header: Pointer to the command header in the command stream.
2930 */
2931 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2932 struct vmw_sw_context *sw_context,
2933 SVGA3dCmdHeader *header)
2934 {
2935 struct {
2936 SVGA3dCmdHeader header;
2937 SVGA3dCmdDXTransferFromBuffer body;
2938 } *cmd = container_of(header, typeof(*cmd), header);
2939 int ret;
2940
2941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2942 user_surface_converter,
2943 &cmd->body.srcSid, NULL);
2944 if (ret != 0)
2945 return ret;
2946
2947 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2948 user_surface_converter,
2949 &cmd->body.destSid, NULL);
2950 }
2951
2952 /**
2953 * vmw_cmd_intra_surface_copy -
2954 * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
2955 *
2956 * @dev_priv: Pointer to a device private struct.
2957 * @sw_context: The software context being used for this batch.
2958 * @header: Pointer to the command header in the command stream.
2959 */
2960 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2961 struct vmw_sw_context *sw_context,
2962 SVGA3dCmdHeader *header)
2963 {
2964 struct {
2965 SVGA3dCmdHeader header;
2966 SVGA3dCmdIntraSurfaceCopy body;
2967 } *cmd = container_of(header, typeof(*cmd), header);
2968
2969 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2970 return -EINVAL;
2971
2972 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2973 user_surface_converter,
2974 &cmd->body.surface.sid, NULL);
2975 }
2976
2977
2978 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2979 struct vmw_sw_context *sw_context,
2980 void *buf, uint32_t *size)
2981 {
2982 uint32_t size_remaining = *size;
2983 uint32_t cmd_id;
2984
2985 cmd_id = ((uint32_t *)buf)[0];
2986 switch (cmd_id) {
2987 case SVGA_CMD_UPDATE:
2988 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2989 break;
2990 case SVGA_CMD_DEFINE_GMRFB:
2991 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2992 break;
2993 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2994 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2995 break;
2996 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2997 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2998 break;
2999 default:
3000 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3001 return -EINVAL;
3002 }
3003
3004 if (*size > size_remaining) {
3005 DRM_ERROR("Invalid SVGA command (size mismatch):"
3006 " %u.\n", cmd_id);
3007 return -EINVAL;
3008 }
3009
3010 if (unlikely(!sw_context->kernel)) {
3011 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3012 return -EPERM;
3013 }
3014
3015 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3016 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3017
3018 return 0;
3019 }
3020
3021 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3022 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3023 false, false, false),
3024 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3025 false, false, false),
3026 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3027 true, false, false),
3028 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3029 true, false, false),
3030 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3031 true, false, false),
3032 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3033 false, false, false),
3034 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3035 false, false, false),
3036 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3037 true, false, false),
3038 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3039 true, false, false),
3040 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3041 true, false, false),
3042 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3043 &vmw_cmd_set_render_target_check, true, false, false),
3044 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3045 true, false, false),
3046 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3047 true, false, false),
3048 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3049 true, false, false),
3050 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3051 true, false, false),
3052 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3053 true, false, false),
3054 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3055 true, false, false),
3056 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3057 true, false, false),
3058 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3059 false, false, false),
3060 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3061 true, false, false),
3062 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3063 true, false, false),
3064 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3065 true, false, false),
3066 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3067 true, false, false),
3068 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3069 true, false, false),
3070 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3071 true, false, false),
3072 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3073 true, false, false),
3074 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3075 true, false, false),
3076 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3077 true, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3079 true, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3081 &vmw_cmd_blt_surf_screen_check, false, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3083 false, false, false),
3084 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3085 false, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3087 false, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3089 false, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3091 false, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3093 false, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3095 false, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3097 false, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3099 false, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3101 false, false, false),
3102 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3103 false, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3105 false, false, false),
3106 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3107 false, false, false),
3108 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3109 false, false, true),
3110 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3111 false, false, true),
3112 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3113 false, false, true),
3114 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3115 false, false, true),
3116 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3117 false, false, true),
3118 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3119 false, false, true),
3120 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3121 false, false, true),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3123 false, false, true),
3124 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3125 true, false, true),
3126 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3127 false, false, true),
3128 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3129 true, false, true),
3130 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3131 &vmw_cmd_update_gb_surface, true, false, true),
3132 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3133 &vmw_cmd_readback_gb_image, true, false, true),
3134 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3135 &vmw_cmd_readback_gb_surface, true, false, true),
3136 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3137 &vmw_cmd_invalidate_gb_image, true, false, true),
3138 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3139 &vmw_cmd_invalidate_gb_surface, true, false, true),
3140 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3141 false, false, true),
3142 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3143 false, false, true),
3144 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3145 false, false, true),
3146 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3147 false, false, true),
3148 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3149 false, false, true),
3150 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3151 false, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3153 true, false, true),
3154 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3155 false, false, true),
3156 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3157 false, false, false),
3158 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3159 true, false, true),
3160 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3161 true, false, true),
3162 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3163 true, false, true),
3164 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3165 true, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3167 true, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3169 false, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3171 false, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3173 false, false, true),
3174 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3175 false, false, true),
3176 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3177 false, false, true),
3178 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3179 false, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3181 false, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3183 false, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3185 false, false, true),
3186 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3187 false, false, true),
3188 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3189 true, false, true),
3190 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3191 false, false, true),
3192 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3193 false, false, true),
3194 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3195 false, false, true),
3196 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3197 false, false, true),
3198
3199 /*
3200 * DX commands
3201 */
3202 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3207 false, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3209 false, false, true),
3210 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3211 false, false, true),
3212 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3213 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3215 &vmw_cmd_dx_set_shader_res, true, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3217 true, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3219 true, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3221 true, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3223 true, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3225 true, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3227 &vmw_cmd_dx_cid_check, true, false, true),
3228 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3229 true, false, true),
3230 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3231 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3232 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3233 &vmw_cmd_dx_set_index_buffer, true, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3235 &vmw_cmd_dx_set_rendertargets, true, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3237 true, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3239 &vmw_cmd_dx_cid_check, true, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3241 &vmw_cmd_dx_cid_check, true, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3243 true, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3245 true, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3247 true, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3249 &vmw_cmd_dx_cid_check, true, false, true),
3250 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3251 true, false, true),
3252 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3253 true, false, true),
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3255 true, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3257 true, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3259 true, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3261 true, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3263 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3265 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3267 true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3269 true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3271 &vmw_cmd_dx_check_subresource, true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3273 &vmw_cmd_dx_check_subresource, true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3275 &vmw_cmd_dx_check_subresource, true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3277 &vmw_cmd_dx_view_define, true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3279 &vmw_cmd_dx_view_remove, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3281 &vmw_cmd_dx_view_define, true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3283 &vmw_cmd_dx_view_remove, true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3285 &vmw_cmd_dx_view_define, true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3287 &vmw_cmd_dx_view_remove, true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3289 &vmw_cmd_dx_so_define, true, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3291 &vmw_cmd_dx_cid_check, true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3293 &vmw_cmd_dx_so_define, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3295 &vmw_cmd_dx_cid_check, true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3297 &vmw_cmd_dx_so_define, true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3299 &vmw_cmd_dx_cid_check, true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3301 &vmw_cmd_dx_so_define, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3303 &vmw_cmd_dx_cid_check, true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3305 &vmw_cmd_dx_so_define, true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3307 &vmw_cmd_dx_cid_check, true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3309 &vmw_cmd_dx_define_shader, true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3311 &vmw_cmd_dx_destroy_shader, true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3313 &vmw_cmd_dx_bind_shader, true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3315 &vmw_cmd_dx_so_define, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3317 &vmw_cmd_dx_cid_check, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3319 true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3321 &vmw_cmd_dx_set_so_targets, true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3323 &vmw_cmd_dx_cid_check, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3325 &vmw_cmd_dx_cid_check, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3327 &vmw_cmd_buffer_copy_check, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3329 &vmw_cmd_pred_copy_check, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3331 &vmw_cmd_dx_transfer_from_buffer,
3332 true, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3334 true, false, true),
3335 };
3336
3337 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3338 {
3339 u32 cmd_id = ((u32 *) buf)[0];
3340
3341 if (cmd_id >= SVGA_CMD_MAX) {
3342 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3343 const struct vmw_cmd_entry *entry;
3344
3345 *size = header->size + sizeof(SVGA3dCmdHeader);
3346 cmd_id = header->id;
3347 if (cmd_id >= SVGA_3D_CMD_MAX)
3348 return false;
3349
3350 cmd_id -= SVGA_3D_CMD_BASE;
3351 entry = &vmw_cmd_entries[cmd_id];
3352 *cmd = entry->cmd_name;
3353 return true;
3354 }
3355
3356 switch (cmd_id) {
3357 case SVGA_CMD_UPDATE:
3358 *cmd = "SVGA_CMD_UPDATE";
3359 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3360 break;
3361 case SVGA_CMD_DEFINE_GMRFB:
3362 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3363 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3364 break;
3365 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3366 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3367 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3368 break;
3369 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3370 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3371 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3372 break;
3373 default:
3374 *cmd = "UNKNOWN";
3375 *size = 0;
3376 return false;
3377 }
3378
3379 return true;
3380 }
3381
3382 static int vmw_cmd_check(struct vmw_private *dev_priv,
3383 struct vmw_sw_context *sw_context,
3384 void *buf, uint32_t *size)
3385 {
3386 uint32_t cmd_id;
3387 uint32_t size_remaining = *size;
3388 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3389 int ret;
3390 const struct vmw_cmd_entry *entry;
3391 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3392
3393 cmd_id = ((uint32_t *)buf)[0];
3394 /* Handle any none 3D commands */
3395 if (unlikely(cmd_id < SVGA_CMD_MAX))
3396 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3397
3398
3399 cmd_id = header->id;
3400 *size = header->size + sizeof(SVGA3dCmdHeader);
3401
3402 cmd_id -= SVGA_3D_CMD_BASE;
3403 if (unlikely(*size > size_remaining))
3404 goto out_invalid;
3405
3406 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3407 goto out_invalid;
3408
3409 entry = &vmw_cmd_entries[cmd_id];
3410 if (unlikely(!entry->func))
3411 goto out_invalid;
3412
3413 if (unlikely(!entry->user_allow && !sw_context->kernel))
3414 goto out_privileged;
3415
3416 if (unlikely(entry->gb_disable && gb))
3417 goto out_old;
3418
3419 if (unlikely(entry->gb_enable && !gb))
3420 goto out_new;
3421
3422 ret = entry->func(dev_priv, sw_context, header);
3423 if (unlikely(ret != 0))
3424 goto out_invalid;
3425
3426 return 0;
3427 out_invalid:
3428 DRM_ERROR("Invalid SVGA3D command: %d\n",
3429 cmd_id + SVGA_3D_CMD_BASE);
3430 return -EINVAL;
3431 out_privileged:
3432 DRM_ERROR("Privileged SVGA3D command: %d\n",
3433 cmd_id + SVGA_3D_CMD_BASE);
3434 return -EPERM;
3435 out_old:
3436 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3437 cmd_id + SVGA_3D_CMD_BASE);
3438 return -EINVAL;
3439 out_new:
3440 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3441 cmd_id + SVGA_3D_CMD_BASE);
3442 return -EINVAL;
3443 }
3444
3445 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3446 struct vmw_sw_context *sw_context,
3447 void *buf,
3448 uint32_t size)
3449 {
3450 int32_t cur_size = size;
3451 int ret;
3452
3453 sw_context->buf_start = buf;
3454
3455 while (cur_size > 0) {
3456 size = cur_size;
3457 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3458 if (unlikely(ret != 0))
3459 return ret;
3460 buf = (void *)((unsigned long) buf + size);
3461 cur_size -= size;
3462 }
3463
3464 if (unlikely(cur_size != 0)) {
3465 DRM_ERROR("Command verifier out of sync.\n");
3466 return -EINVAL;
3467 }
3468
3469 return 0;
3470 }
3471
3472 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3473 {
3474 /* Memory is validation context memory, so no need to free it */
3475
3476 INIT_LIST_HEAD(&sw_context->bo_relocations);
3477 }
3478
3479 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3480 {
3481 struct vmw_relocation *reloc;
3482 struct ttm_buffer_object *bo;
3483
3484 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3485 bo = &reloc->vbo->base;
3486 switch (bo->mem.mem_type) {
3487 case TTM_PL_VRAM:
3488 reloc->location->offset += bo->offset;
3489 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3490 break;
3491 case VMW_PL_GMR:
3492 reloc->location->gmrId = bo->mem.start;
3493 break;
3494 case VMW_PL_MOB:
3495 *reloc->mob_loc = bo->mem.start;
3496 break;
3497 default:
3498 BUG();
3499 }
3500 }
3501 vmw_free_relocations(sw_context);
3502 }
3503
3504 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3505 uint32_t size)
3506 {
3507 if (likely(sw_context->cmd_bounce_size >= size))
3508 return 0;
3509
3510 if (sw_context->cmd_bounce_size == 0)
3511 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3512
3513 while (sw_context->cmd_bounce_size < size) {
3514 sw_context->cmd_bounce_size =
3515 PAGE_ALIGN(sw_context->cmd_bounce_size +
3516 (sw_context->cmd_bounce_size >> 1));
3517 }
3518
3519 vfree(sw_context->cmd_bounce);
3520 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3521
3522 if (sw_context->cmd_bounce == NULL) {
3523 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3524 sw_context->cmd_bounce_size = 0;
3525 return -ENOMEM;
3526 }
3527
3528 return 0;
3529 }
3530
3531 /**
3532 * vmw_execbuf_fence_commands - create and submit a command stream fence
3533 *
3534 * Creates a fence object and submits a command stream marker.
3535 * If this fails for some reason, We sync the fifo and return NULL.
3536 * It is then safe to fence buffers with a NULL pointer.
3537 *
3538 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3539 * a userspace handle if @p_handle is not NULL, otherwise not.
3540 */
3541
3542 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3543 struct vmw_private *dev_priv,
3544 struct vmw_fence_obj **p_fence,
3545 uint32_t *p_handle)
3546 {
3547 uint32_t sequence;
3548 int ret;
3549 bool synced = false;
3550
3551 /* p_handle implies file_priv. */
3552 BUG_ON(p_handle != NULL && file_priv == NULL);
3553
3554 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3555 if (unlikely(ret != 0)) {
3556 DRM_ERROR("Fence submission error. Syncing.\n");
3557 synced = true;
3558 }
3559
3560 if (p_handle != NULL)
3561 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3562 sequence, p_fence, p_handle);
3563 else
3564 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3565
3566 if (unlikely(ret != 0 && !synced)) {
3567 (void) vmw_fallback_wait(dev_priv, false, false,
3568 sequence, false,
3569 VMW_FENCE_WAIT_TIMEOUT);
3570 *p_fence = NULL;
3571 }
3572
3573 return 0;
3574 }
3575
3576 /**
3577 * vmw_execbuf_copy_fence_user - copy fence object information to
3578 * user-space.
3579 *
3580 * @dev_priv: Pointer to a vmw_private struct.
3581 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3582 * @ret: Return value from fence object creation.
3583 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3584 * which the information should be copied.
3585 * @fence: Pointer to the fenc object.
3586 * @fence_handle: User-space fence handle.
3587 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3588 * @sync_file: Only used to clean up in case of an error in this function.
3589 *
3590 * This function copies fence information to user-space. If copying fails,
3591 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3592 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3593 * the error will hopefully be detected.
3594 * Also if copying fails, user-space will be unable to signal the fence
3595 * object so we wait for it immediately, and then unreference the
3596 * user-space reference.
3597 */
3598 void
3599 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3600 struct vmw_fpriv *vmw_fp,
3601 int ret,
3602 struct drm_vmw_fence_rep __user *user_fence_rep,
3603 struct vmw_fence_obj *fence,
3604 uint32_t fence_handle,
3605 int32_t out_fence_fd,
3606 struct sync_file *sync_file)
3607 {
3608 struct drm_vmw_fence_rep fence_rep;
3609
3610 if (user_fence_rep == NULL)
3611 return;
3612
3613 memset(&fence_rep, 0, sizeof(fence_rep));
3614
3615 fence_rep.error = ret;
3616 fence_rep.fd = out_fence_fd;
3617 if (ret == 0) {
3618 BUG_ON(fence == NULL);
3619
3620 fence_rep.handle = fence_handle;
3621 fence_rep.seqno = fence->base.seqno;
3622 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3623 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3624 }
3625
3626 /*
3627 * copy_to_user errors will be detected by user space not
3628 * seeing fence_rep::error filled in. Typically
3629 * user-space would have pre-set that member to -EFAULT.
3630 */
3631 ret = copy_to_user(user_fence_rep, &fence_rep,
3632 sizeof(fence_rep));
3633
3634 /*
3635 * User-space lost the fence object. We need to sync
3636 * and unreference the handle.
3637 */
3638 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3639 if (sync_file)
3640 fput(sync_file->file);
3641
3642 if (fence_rep.fd != -1) {
3643 put_unused_fd(fence_rep.fd);
3644 fence_rep.fd = -1;
3645 }
3646
3647 ttm_ref_object_base_unref(vmw_fp->tfile,
3648 fence_handle, TTM_REF_USAGE);
3649 DRM_ERROR("Fence copy error. Syncing.\n");
3650 (void) vmw_fence_obj_wait(fence, false, false,
3651 VMW_FENCE_WAIT_TIMEOUT);
3652 }
3653 }
3654
3655 /**
3656 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3657 * the fifo.
3658 *
3659 * @dev_priv: Pointer to a device private structure.
3660 * @kernel_commands: Pointer to the unpatched command batch.
3661 * @command_size: Size of the unpatched command batch.
3662 * @sw_context: Structure holding the relocation lists.
3663 *
3664 * Side effects: If this function returns 0, then the command batch
3665 * pointed to by @kernel_commands will have been modified.
3666 */
3667 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3668 void *kernel_commands,
3669 u32 command_size,
3670 struct vmw_sw_context *sw_context)
3671 {
3672 void *cmd;
3673
3674 if (sw_context->dx_ctx_node)
3675 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3676 sw_context->dx_ctx_node->ctx->id);
3677 else
3678 cmd = vmw_fifo_reserve(dev_priv, command_size);
3679 if (!cmd) {
3680 DRM_ERROR("Failed reserving fifo space for commands.\n");
3681 return -ENOMEM;
3682 }
3683
3684 vmw_apply_relocations(sw_context);
3685 memcpy(cmd, kernel_commands, command_size);
3686 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3687 vmw_resource_relocations_free(&sw_context->res_relocations);
3688 vmw_fifo_commit(dev_priv, command_size);
3689
3690 return 0;
3691 }
3692
3693 /**
3694 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3695 * the command buffer manager.
3696 *
3697 * @dev_priv: Pointer to a device private structure.
3698 * @header: Opaque handle to the command buffer allocation.
3699 * @command_size: Size of the unpatched command batch.
3700 * @sw_context: Structure holding the relocation lists.
3701 *
3702 * Side effects: If this function returns 0, then the command buffer
3703 * represented by @header will have been modified.
3704 */
3705 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3706 struct vmw_cmdbuf_header *header,
3707 u32 command_size,
3708 struct vmw_sw_context *sw_context)
3709 {
3710 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3711 SVGA3D_INVALID_ID);
3712 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3713 id, false, header);
3714
3715 vmw_apply_relocations(sw_context);
3716 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3717 vmw_resource_relocations_free(&sw_context->res_relocations);
3718 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3719
3720 return 0;
3721 }
3722
3723 /**
3724 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3725 * submission using a command buffer.
3726 *
3727 * @dev_priv: Pointer to a device private structure.
3728 * @user_commands: User-space pointer to the commands to be submitted.
3729 * @command_size: Size of the unpatched command batch.
3730 * @header: Out parameter returning the opaque pointer to the command buffer.
3731 *
3732 * This function checks whether we can use the command buffer manager for
3733 * submission and if so, creates a command buffer of suitable size and
3734 * copies the user data into that buffer.
3735 *
3736 * On successful return, the function returns a pointer to the data in the
3737 * command buffer and *@header is set to non-NULL.
3738 * If command buffers could not be used, the function will return the value
3739 * of @kernel_commands on function call. That value may be NULL. In that case,
3740 * the value of *@header will be set to NULL.
3741 * If an error is encountered, the function will return a pointer error value.
3742 * If the function is interrupted by a signal while sleeping, it will return
3743 * -ERESTARTSYS casted to a pointer error value.
3744 */
3745 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3746 void __user *user_commands,
3747 void *kernel_commands,
3748 u32 command_size,
3749 struct vmw_cmdbuf_header **header)
3750 {
3751 size_t cmdbuf_size;
3752 int ret;
3753
3754 *header = NULL;
3755 if (command_size > SVGA_CB_MAX_SIZE) {
3756 DRM_ERROR("Command buffer is too large.\n");
3757 return ERR_PTR(-EINVAL);
3758 }
3759
3760 if (!dev_priv->cman || kernel_commands)
3761 return kernel_commands;
3762
3763 /* If possible, add a little space for fencing. */
3764 cmdbuf_size = command_size + 512;
3765 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3766 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3767 true, header);
3768 if (IS_ERR(kernel_commands))
3769 return kernel_commands;
3770
3771 ret = copy_from_user(kernel_commands, user_commands,
3772 command_size);
3773 if (ret) {
3774 DRM_ERROR("Failed copying commands.\n");
3775 vmw_cmdbuf_header_free(*header);
3776 *header = NULL;
3777 return ERR_PTR(-EFAULT);
3778 }
3779
3780 return kernel_commands;
3781 }
3782
3783 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3784 struct vmw_sw_context *sw_context,
3785 uint32_t handle)
3786 {
3787 struct vmw_resource *res;
3788 int ret;
3789 unsigned int size;
3790
3791 if (handle == SVGA3D_INVALID_ID)
3792 return 0;
3793
3794 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3795 ret = vmw_validation_preload_res(sw_context->ctx, size);
3796 if (ret)
3797 return ret;
3798
3799 res = vmw_user_resource_noref_lookup_handle
3800 (dev_priv, sw_context->fp->tfile, handle,
3801 user_context_converter);
3802 if (unlikely(IS_ERR(res))) {
3803 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3804 (unsigned) handle);
3805 return PTR_ERR(res);
3806 }
3807
3808 ret = vmw_execbuf_res_noref_val_add(sw_context, res);
3809 if (unlikely(ret != 0))
3810 return ret;
3811
3812 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3813 sw_context->man = vmw_context_res_man(res);
3814
3815 return 0;
3816 }
3817
3818 int vmw_execbuf_process(struct drm_file *file_priv,
3819 struct vmw_private *dev_priv,
3820 void __user *user_commands,
3821 void *kernel_commands,
3822 uint32_t command_size,
3823 uint64_t throttle_us,
3824 uint32_t dx_context_handle,
3825 struct drm_vmw_fence_rep __user *user_fence_rep,
3826 struct vmw_fence_obj **out_fence,
3827 uint32_t flags)
3828 {
3829 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3830 struct vmw_fence_obj *fence = NULL;
3831 struct vmw_cmdbuf_header *header;
3832 uint32_t handle;
3833 int ret;
3834 int32_t out_fence_fd = -1;
3835 struct sync_file *sync_file = NULL;
3836 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3837
3838 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3839 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3840 if (out_fence_fd < 0) {
3841 DRM_ERROR("Failed to get a fence file descriptor.\n");
3842 return out_fence_fd;
3843 }
3844 }
3845
3846 if (throttle_us) {
3847 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3848 throttle_us);
3849
3850 if (ret)
3851 goto out_free_fence_fd;
3852 }
3853
3854 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3855 kernel_commands, command_size,
3856 &header);
3857 if (IS_ERR(kernel_commands)) {
3858 ret = PTR_ERR(kernel_commands);
3859 goto out_free_fence_fd;
3860 }
3861
3862 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3863 if (ret) {
3864 ret = -ERESTARTSYS;
3865 goto out_free_header;
3866 }
3867
3868 sw_context->kernel = false;
3869 if (kernel_commands == NULL) {
3870 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3871 if (unlikely(ret != 0))
3872 goto out_unlock;
3873
3874
3875 ret = copy_from_user(sw_context->cmd_bounce,
3876 user_commands, command_size);
3877
3878 if (unlikely(ret != 0)) {
3879 ret = -EFAULT;
3880 DRM_ERROR("Failed copying commands.\n");
3881 goto out_unlock;
3882 }
3883 kernel_commands = sw_context->cmd_bounce;
3884 } else if (!header)
3885 sw_context->kernel = true;
3886
3887 sw_context->fp = vmw_fpriv(file_priv);
3888 INIT_LIST_HEAD(&sw_context->ctx_list);
3889 sw_context->cur_query_bo = dev_priv->pinned_bo;
3890 sw_context->last_query_ctx = NULL;
3891 sw_context->needs_post_query_barrier = false;
3892 sw_context->dx_ctx_node = NULL;
3893 sw_context->dx_query_mob = NULL;
3894 sw_context->dx_query_ctx = NULL;
3895 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3896 INIT_LIST_HEAD(&sw_context->res_relocations);
3897 INIT_LIST_HEAD(&sw_context->bo_relocations);
3898 if (sw_context->staged_bindings)
3899 vmw_binding_state_reset(sw_context->staged_bindings);
3900
3901 if (!sw_context->res_ht_initialized) {
3902 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3903 if (unlikely(ret != 0))
3904 goto out_unlock;
3905 sw_context->res_ht_initialized = true;
3906 }
3907 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3908 sw_context->ctx = &val_ctx;
3909 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3910 if (unlikely(ret != 0))
3911 goto out_err_nores;
3912
3913 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3914 command_size);
3915 if (unlikely(ret != 0))
3916 goto out_err_nores;
3917
3918 ret = vmw_resources_reserve(sw_context);
3919 if (unlikely(ret != 0))
3920 goto out_err_nores;
3921
3922 ret = vmw_validation_bo_reserve(&val_ctx, true);
3923 if (unlikely(ret != 0))
3924 goto out_err_nores;
3925
3926 ret = vmw_validation_bo_validate(&val_ctx, true);
3927 if (unlikely(ret != 0))
3928 goto out_err;
3929
3930 ret = vmw_validation_res_validate(&val_ctx, true);
3931 if (unlikely(ret != 0))
3932 goto out_err;
3933 vmw_validation_drop_ht(&val_ctx);
3934
3935 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3936 if (unlikely(ret != 0)) {
3937 ret = -ERESTARTSYS;
3938 goto out_err;
3939 }
3940
3941 if (dev_priv->has_mob) {
3942 ret = vmw_rebind_contexts(sw_context);
3943 if (unlikely(ret != 0))
3944 goto out_unlock_binding;
3945 }
3946
3947 if (!header) {
3948 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3949 command_size, sw_context);
3950 } else {
3951 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3952 sw_context);
3953 header = NULL;
3954 }
3955 mutex_unlock(&dev_priv->binding_mutex);
3956 if (ret)
3957 goto out_err;
3958
3959 vmw_query_bo_switch_commit(dev_priv, sw_context);
3960 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
3961 &fence,
3962 (user_fence_rep) ? &handle : NULL);
3963 /*
3964 * This error is harmless, because if fence submission fails,
3965 * vmw_fifo_send_fence will sync. The error will be propagated to
3966 * user-space in @fence_rep
3967 */
3968
3969 if (ret != 0)
3970 DRM_ERROR("Fence submission error. Syncing.\n");
3971
3972 vmw_execbuf_bindings_commit(sw_context, false);
3973 vmw_bind_dx_query_mob(sw_context);
3974 vmw_validation_res_unreserve(&val_ctx, false);
3975
3976 vmw_validation_bo_fence(sw_context->ctx, fence);
3977
3978 if (unlikely(dev_priv->pinned_bo != NULL &&
3979 !dev_priv->query_cid_valid))
3980 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3981
3982 /*
3983 * If anything fails here, give up trying to export the fence
3984 * and do a sync since the user mode will not be able to sync
3985 * the fence itself. This ensures we are still functionally
3986 * correct.
3987 */
3988 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3989
3990 sync_file = sync_file_create(&fence->base);
3991 if (!sync_file) {
3992 DRM_ERROR("Unable to create sync file for fence\n");
3993 put_unused_fd(out_fence_fd);
3994 out_fence_fd = -1;
3995
3996 (void) vmw_fence_obj_wait(fence, false, false,
3997 VMW_FENCE_WAIT_TIMEOUT);
3998 } else {
3999 /* Link the fence with the FD created earlier */
4000 fd_install(out_fence_fd, sync_file->file);
4001 }
4002 }
4003
4004 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4005 user_fence_rep, fence, handle,
4006 out_fence_fd, sync_file);
4007
4008 /* Don't unreference when handing fence out */
4009 if (unlikely(out_fence != NULL)) {
4010 *out_fence = fence;
4011 fence = NULL;
4012 } else if (likely(fence != NULL)) {
4013 vmw_fence_obj_unreference(&fence);
4014 }
4015
4016 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4017 mutex_unlock(&dev_priv->cmdbuf_mutex);
4018
4019 /*
4020 * Unreference resources outside of the cmdbuf_mutex to
4021 * avoid deadlocks in resource destruction paths.
4022 */
4023 vmw_validation_unref_lists(&val_ctx);
4024
4025 return 0;
4026
4027 out_unlock_binding:
4028 mutex_unlock(&dev_priv->binding_mutex);
4029 out_err:
4030 vmw_validation_bo_backoff(&val_ctx);
4031 out_err_nores:
4032 vmw_execbuf_bindings_commit(sw_context, true);
4033 vmw_validation_res_unreserve(&val_ctx, true);
4034 vmw_resource_relocations_free(&sw_context->res_relocations);
4035 vmw_free_relocations(sw_context);
4036 if (unlikely(dev_priv->pinned_bo != NULL &&
4037 !dev_priv->query_cid_valid))
4038 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4039 out_unlock:
4040 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4041 vmw_validation_drop_ht(&val_ctx);
4042 WARN_ON(!list_empty(&sw_context->ctx_list));
4043 mutex_unlock(&dev_priv->cmdbuf_mutex);
4044
4045 /*
4046 * Unreference resources outside of the cmdbuf_mutex to
4047 * avoid deadlocks in resource destruction paths.
4048 */
4049 vmw_validation_unref_lists(&val_ctx);
4050 out_free_header:
4051 if (header)
4052 vmw_cmdbuf_header_free(header);
4053 out_free_fence_fd:
4054 if (out_fence_fd >= 0)
4055 put_unused_fd(out_fence_fd);
4056
4057 return ret;
4058 }
4059
4060 /**
4061 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4062 *
4063 * @dev_priv: The device private structure.
4064 *
4065 * This function is called to idle the fifo and unpin the query buffer
4066 * if the normal way to do this hits an error, which should typically be
4067 * extremely rare.
4068 */
4069 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4070 {
4071 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4072
4073 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4074 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4075 if (dev_priv->dummy_query_bo_pinned) {
4076 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4077 dev_priv->dummy_query_bo_pinned = false;
4078 }
4079 }
4080
4081
4082 /**
4083 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4084 * query bo.
4085 *
4086 * @dev_priv: The device private structure.
4087 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4088 * _after_ a query barrier that flushes all queries touching the current
4089 * buffer pointed to by @dev_priv->pinned_bo
4090 *
4091 * This function should be used to unpin the pinned query bo, or
4092 * as a query barrier when we need to make sure that all queries have
4093 * finished before the next fifo command. (For example on hardware
4094 * context destructions where the hardware may otherwise leak unfinished
4095 * queries).
4096 *
4097 * This function does not return any failure codes, but make attempts
4098 * to do safe unpinning in case of errors.
4099 *
4100 * The function will synchronize on the previous query barrier, and will
4101 * thus not finish until that barrier has executed.
4102 *
4103 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4104 * before calling this function.
4105 */
4106 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4107 struct vmw_fence_obj *fence)
4108 {
4109 int ret = 0;
4110 struct vmw_fence_obj *lfence = NULL;
4111 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4112
4113 if (dev_priv->pinned_bo == NULL)
4114 goto out_unlock;
4115
4116 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4117 false);
4118 if (ret)
4119 goto out_no_reserve;
4120
4121 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4122 false);
4123 if (ret)
4124 goto out_no_reserve;
4125
4126 ret = vmw_validation_bo_reserve(&val_ctx, false);
4127 if (ret)
4128 goto out_no_reserve;
4129
4130 if (dev_priv->query_cid_valid) {
4131 BUG_ON(fence != NULL);
4132 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4133 if (ret)
4134 goto out_no_emit;
4135 dev_priv->query_cid_valid = false;
4136 }
4137
4138 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4139 if (dev_priv->dummy_query_bo_pinned) {
4140 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4141 dev_priv->dummy_query_bo_pinned = false;
4142 }
4143 if (fence == NULL) {
4144 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4145 NULL);
4146 fence = lfence;
4147 }
4148 vmw_validation_bo_fence(&val_ctx, fence);
4149 if (lfence != NULL)
4150 vmw_fence_obj_unreference(&lfence);
4151
4152 vmw_validation_unref_lists(&val_ctx);
4153 vmw_bo_unreference(&dev_priv->pinned_bo);
4154 out_unlock:
4155 return;
4156
4157 out_no_emit:
4158 vmw_validation_bo_backoff(&val_ctx);
4159 out_no_reserve:
4160 vmw_validation_unref_lists(&val_ctx);
4161 vmw_execbuf_unpin_panic(dev_priv);
4162 vmw_bo_unreference(&dev_priv->pinned_bo);
4163
4164 }
4165
4166 /**
4167 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4168 * query bo.
4169 *
4170 * @dev_priv: The device private structure.
4171 *
4172 * This function should be used to unpin the pinned query bo, or
4173 * as a query barrier when we need to make sure that all queries have
4174 * finished before the next fifo command. (For example on hardware
4175 * context destructions where the hardware may otherwise leak unfinished
4176 * queries).
4177 *
4178 * This function does not return any failure codes, but make attempts
4179 * to do safe unpinning in case of errors.
4180 *
4181 * The function will synchronize on the previous query barrier, and will
4182 * thus not finish until that barrier has executed.
4183 */
4184 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4185 {
4186 mutex_lock(&dev_priv->cmdbuf_mutex);
4187 if (dev_priv->query_cid_valid)
4188 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4189 mutex_unlock(&dev_priv->cmdbuf_mutex);
4190 }
4191
4192 int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4193 struct drm_file *file_priv, size_t size)
4194 {
4195 struct vmw_private *dev_priv = vmw_priv(dev);
4196 struct drm_vmw_execbuf_arg arg;
4197 int ret;
4198 static const size_t copy_offset[] = {
4199 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4200 sizeof(struct drm_vmw_execbuf_arg)};
4201 struct dma_fence *in_fence = NULL;
4202
4203 if (unlikely(size < copy_offset[0])) {
4204 DRM_ERROR("Invalid command size, ioctl %d\n",
4205 DRM_VMW_EXECBUF);
4206 return -EINVAL;
4207 }
4208
4209 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4210 return -EFAULT;
4211
4212 /*
4213 * Extend the ioctl argument while
4214 * maintaining backwards compatibility:
4215 * We take different code paths depending on the value of
4216 * arg.version.
4217 */
4218
4219 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4220 arg.version == 0)) {
4221 DRM_ERROR("Incorrect execbuf version.\n");
4222 return -EINVAL;
4223 }
4224
4225 if (arg.version > 1 &&
4226 copy_from_user(&arg.context_handle,
4227 (void __user *) (data + copy_offset[0]),
4228 copy_offset[arg.version - 1] -
4229 copy_offset[0]) != 0)
4230 return -EFAULT;
4231
4232 switch (arg.version) {
4233 case 1:
4234 arg.context_handle = (uint32_t) -1;
4235 break;
4236 case 2:
4237 default:
4238 break;
4239 }
4240
4241
4242 /* If imported a fence FD from elsewhere, then wait on it */
4243 if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4244 in_fence = sync_file_get_fence(arg.imported_fence_fd);
4245
4246 if (!in_fence) {
4247 DRM_ERROR("Cannot get imported fence\n");
4248 return -EINVAL;
4249 }
4250
4251 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4252 if (ret)
4253 goto out;
4254 }
4255
4256 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4257 if (unlikely(ret != 0))
4258 return ret;
4259
4260 ret = vmw_execbuf_process(file_priv, dev_priv,
4261 (void __user *)(unsigned long)arg.commands,
4262 NULL, arg.command_size, arg.throttle_us,
4263 arg.context_handle,
4264 (void __user *)(unsigned long)arg.fence_rep,
4265 NULL,
4266 arg.flags);
4267 ttm_read_unlock(&dev_priv->reservation_sem);
4268 if (unlikely(ret != 0))
4269 goto out;
4270
4271 vmw_kms_cursor_post_execbuf(dev_priv);
4272
4273 out:
4274 if (in_fence)
4275 dma_fence_put(in_fence);
4276 return ret;
4277 }