1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
31 #include <linux/suspend.h>
32 #include <linux/sync_file.h>
33 #include <linux/hashtable.h>
35 #include <drm/drm_auth.h>
36 #include <drm/drm_device.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_rect.h>
40 #include <drm/ttm/ttm_execbuf_util.h>
41 #include <drm/ttm/ttm_tt.h>
42 #include <drm/ttm/ttm_placement.h>
43 #include <drm/ttm/ttm_bo.h>
45 #include "ttm_object.h"
47 #include "vmwgfx_fence.h"
48 #include "vmwgfx_reg.h"
49 #include "vmwgfx_validation.h"
52 * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
53 * uapi headers should not depend on header files outside uapi/.
55 #include <drm/vmwgfx_drm.h>
58 #define VMWGFX_DRIVER_NAME "vmwgfx"
59 #define VMWGFX_DRIVER_DATE "20211206"
60 #define VMWGFX_DRIVER_MAJOR 2
61 #define VMWGFX_DRIVER_MINOR 20
62 #define VMWGFX_DRIVER_PATCHLEVEL 0
63 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
64 #define VMWGFX_MAX_DISPLAYS 16
65 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
67 #define VMWGFX_MIN_INITIAL_WIDTH 1280
68 #define VMWGFX_MIN_INITIAL_HEIGHT 800
70 #define VMWGFX_PCI_ID_SVGA2 0x0405
71 #define VMWGFX_PCI_ID_SVGA3 0x0406
74 * This has to match get_count_order(SVGA_IRQFLAG_MAX)
76 #define VMWGFX_MAX_NUM_IRQS 6
79 * Perhaps we should have sysfs entries for these.
81 #define VMWGFX_NUM_GB_CONTEXT 256
82 #define VMWGFX_NUM_GB_SHADER 20000
83 #define VMWGFX_NUM_GB_SURFACE 32768
84 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
85 #define VMWGFX_NUM_DXCONTEXT 256
86 #define VMWGFX_NUM_DXQUERY 512
87 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
88 VMWGFX_NUM_GB_SHADER +\
89 VMWGFX_NUM_GB_SURFACE +\
90 VMWGFX_NUM_GB_SCREEN_TARGET)
92 #define VMW_PL_GMR (TTM_PL_PRIV + 0)
93 #define VMW_PL_MOB (TTM_PL_PRIV + 1)
94 #define VMW_PL_SYSTEM (TTM_PL_PRIV + 2)
96 #define VMW_RES_CONTEXT ttm_driver_type0
97 #define VMW_RES_SURFACE ttm_driver_type1
98 #define VMW_RES_STREAM ttm_driver_type2
99 #define VMW_RES_FENCE ttm_driver_type3
100 #define VMW_RES_SHADER ttm_driver_type4
101 #define VMW_RES_HT_ORDER 12
103 #define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8
104 #define VMW_CURSOR_SNOOP_WIDTH 64
105 #define VMW_CURSOR_SNOOP_HEIGHT 64
107 #define MKSSTAT_CAPACITY_LOG2 5U
108 #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2)
111 struct ttm_object_file
*tfile
;
112 bool gb_aware
; /* user-space is guest-backed aware */
115 struct vmwgfx_hash_item
{
116 struct hlist_node head
;
123 * struct vmw-resource - base class for hardware resources
125 * @kref: For refcounting.
126 * @dev_priv: Pointer to the device private for this resource. Immutable.
127 * @id: Device id. Protected by @dev_priv::resource_lock.
128 * @guest_memory_size: Guest memory buffer size. Immutable.
129 * @res_dirty: Resource contains data not yet in the guest memory buffer.
130 * Protected by resource reserved.
131 * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW
132 * resource. Protected by resource reserved.
133 * @coherent: Emulate coherency by tracking vm accesses.
134 * @guest_memory_bo: The guest memory buffer if any. Protected by resource
136 * @guest_memory_offset: Offset into the guest memory buffer if any. Protected
137 * by resource reserved. Note that only a few resource types can have a
138 * @guest_memory_offset different from zero.
139 * @pin_count: The pin count for this resource. A pinned resource has a
140 * pin-count greater than zero. It is not on the resource LRU lists and its
141 * guest memory buffer is pinned. Hence it can't be evicted.
142 * @func: Method vtable for this resource. Immutable.
143 * @mob_node; Node for the MOB guest memory rbtree. Protected by
144 * @guest_memory_bo reserved.
145 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
146 * @binding_head: List head for the context binding list. Protected by
147 * the @dev_priv::binding_mutex
148 * @res_free: The resource destructor.
149 * @hw_destroy: Callback to destroy the resource on the device, as part of
150 * resource destruction.
154 struct vmw_resource_dirty
;
155 struct vmw_resource
{
157 struct vmw_private
*dev_priv
;
160 unsigned long guest_memory_size
;
162 u32 guest_memory_dirty
: 1;
164 struct vmw_bo
*guest_memory_bo
;
165 unsigned long guest_memory_offset
;
166 unsigned long pin_count
;
167 const struct vmw_res_func
*func
;
168 struct rb_node mob_node
;
169 struct list_head lru_head
;
170 struct list_head binding_head
;
171 struct vmw_resource_dirty
*dirty
;
172 void (*res_free
) (struct vmw_resource
*res
);
173 void (*hw_destroy
) (struct vmw_resource
*res
);
178 * Resources that are managed using ioctls.
188 vmw_res_streamoutput
,
193 * Resources that are managed using command streams.
195 enum vmw_cmdbuf_res_type
{
196 vmw_cmdbuf_res_shader
,
198 vmw_cmdbuf_res_streamoutput
201 struct vmw_cmdbuf_res_manager
;
203 struct vmw_cursor_snooper
{
208 struct vmw_framebuffer
;
209 struct vmw_surface_offset
;
212 * struct vmw_surface_metadata - Metadata describing a surface.
214 * @flags: Device flags.
215 * @format: Surface SVGA3D_x format.
216 * @mip_levels: Mip level for each face. For GB first index is used only.
217 * @multisample_count: Sample count.
218 * @multisample_pattern: Sample patterns.
219 * @quality_level: Quality level.
220 * @autogen_filter: Filter for automatically generated mipmaps.
221 * @array_size: Number of array elements for a 1D/2D texture. For cubemap
222 texture number of faces * array_size. This should be 0 for pre
224 * @buffer_byte_stride: Buffer byte stride.
225 * @num_sizes: Size of @sizes. For GB surface this should always be 1.
226 * @base_size: Surface dimension.
227 * @sizes: Array representing mip sizes. Legacy only.
228 * @scanout: Whether this surface will be used for scanout.
230 * This tracks metadata for both legacy and guest backed surface.
232 struct vmw_surface_metadata
{
235 u32 mip_levels
[DRM_VMW_MAX_SURFACE_FACES
];
236 u32 multisample_count
;
237 u32 multisample_pattern
;
242 u32 buffer_byte_stride
;
243 struct drm_vmw_size base_size
;
244 struct drm_vmw_size
*sizes
;
249 * struct vmw_surface: Resource structure for a surface.
251 * @res: The base resource for this surface.
252 * @metadata: Metadata for this surface resource.
253 * @snooper: Cursor data. Legacy surface only.
254 * @offsets: Legacy surface only.
255 * @view_list: List of views bound to this surface.
258 struct vmw_resource res
;
259 struct vmw_surface_metadata metadata
;
260 struct vmw_cursor_snooper snooper
;
261 struct vmw_surface_offset
*offsets
;
262 struct list_head view_list
;
265 struct vmw_fifo_state
{
266 unsigned long reserved_size
;
269 unsigned long static_buffer_size
;
270 bool using_bounce_buffer
;
271 uint32_t capabilities
;
272 struct mutex fifo_mutex
;
273 struct rw_semaphore rwsem
;
277 * struct vmw_res_cache_entry - resource information cache entry
278 * @handle: User-space handle of a resource.
279 * @res: Non-ref-counted pointer to the resource.
280 * @valid_handle: Whether the @handle member is valid.
281 * @valid: Whether the entry is valid, which also implies that the execbuf
282 * code holds a reference to the resource, and it's placed on the
285 * Used to avoid frequent repeated user-space handle lookups of the
288 struct vmw_res_cache_entry
{
290 struct vmw_resource
*res
;
292 unsigned short valid_handle
;
293 unsigned short valid
;
297 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
299 enum vmw_dma_map_mode
{
300 vmw_dma_alloc_coherent
, /* Use TTM coherent pages */
301 vmw_dma_map_populate
, /* Unmap from DMA just after unpopulate */
302 vmw_dma_map_bind
, /* Unmap from DMA just before unbind */
307 * struct vmw_sg_table - Scatter/gather table for binding, with additional
308 * device-specific information.
310 * @sgt: Pointer to a struct sg_table with binding information
311 * @num_regions: Number of regions with device-address contiguous pages
313 struct vmw_sg_table
{
314 enum vmw_dma_map_mode mode
;
316 const dma_addr_t
*addrs
;
317 struct sg_table
*sgt
;
318 unsigned long num_pages
;
322 * struct vmw_piter - Page iterator that iterates over a list of pages
323 * and DMA addresses that could be either a scatter-gather list or
326 * @pages: Array of page pointers to the pages.
327 * @addrs: DMA addresses to the pages if coherent pages are used.
328 * @iter: Scatter-gather page iterator. Current position in SG list.
329 * @i: Current position in arrays.
330 * @num_pages: Number of pages total.
331 * @next: Function to advance the iterator. Returns false if past the list
332 * of pages, true otherwise.
333 * @dma_address: Function to return the DMA address of the current page.
337 const dma_addr_t
*addrs
;
338 struct sg_dma_page_iter iter
;
340 unsigned long num_pages
;
341 bool (*next
)(struct vmw_piter
*);
342 dma_addr_t (*dma_address
)(struct vmw_piter
*);
347 struct ttm_tt dma_ttm
;
348 struct vmw_private
*dev_priv
;
353 struct vmw_sg_table vsgt
;
359 * enum vmw_display_unit_type - Describes the display unit
361 enum vmw_display_unit_type
{
364 vmw_du_screen_object
,
365 vmw_du_screen_target
,
369 struct vmw_validation_context
;
370 struct vmw_ctx_validation_info
;
373 * struct vmw_sw_context - Command submission context
374 * @res_ht: Pointer hash table used to find validation duplicates
375 * @kernel: Whether the command buffer originates from kernel code rather
376 * than from user-space
377 * @fp: If @kernel is false, points to the file of the client. Otherwise
379 * @cmd_bounce: Command bounce buffer used for command validation before
380 * copying to fifo space
381 * @cmd_bounce_size: Current command bounce buffer size
382 * @cur_query_bo: Current buffer object used as query result buffer
383 * @bo_relocations: List of buffer object relocations
384 * @res_relocations: List of resource relocations
385 * @buf_start: Pointer to start of memory where command validation takes
387 * @res_cache: Cache of recently looked up resources
388 * @last_query_ctx: Last context that submitted a query
389 * @needs_post_query_barrier: Whether a query barrier is needed after
391 * @staged_bindings: Cached per-context binding tracker
392 * @staged_bindings_inuse: Whether the cached per-context binding tracker
394 * @staged_cmd_res: List of staged command buffer managed resources in this
396 * @ctx_list: List of context resources referenced in this command buffer
397 * @dx_ctx_node: Validation metadata of the current DX context
398 * @dx_query_mob: The MOB used for DX queries
399 * @dx_query_ctx: The DX context used for the last DX query
400 * @man: Pointer to the command buffer managed resource manager
401 * @ctx: The validation context
403 struct vmw_sw_context
{
404 DECLARE_HASHTABLE(res_ht
, VMW_RES_HT_ORDER
);
406 struct vmw_fpriv
*fp
;
407 struct drm_file
*filp
;
408 uint32_t *cmd_bounce
;
409 uint32_t cmd_bounce_size
;
410 struct vmw_bo
*cur_query_bo
;
411 struct list_head bo_relocations
;
412 struct list_head res_relocations
;
414 struct vmw_res_cache_entry res_cache
[vmw_res_max
];
415 struct vmw_resource
*last_query_ctx
;
416 bool needs_post_query_barrier
;
417 struct vmw_ctx_binding_state
*staged_bindings
;
418 bool staged_bindings_inuse
;
419 struct list_head staged_cmd_res
;
420 struct list_head ctx_list
;
421 struct vmw_ctx_validation_info
*dx_ctx_node
;
422 struct vmw_bo
*dx_query_mob
;
423 struct vmw_resource
*dx_query_ctx
;
424 struct vmw_cmdbuf_res_manager
*man
;
425 struct vmw_validation_context
*ctx
;
428 struct vmw_legacy_display
;
432 * struct vmw_otable - Guest Memory OBject table metadata
434 * @size: Size of the table (page-aligned).
435 * @page_table: Pointer to a struct vmw_mob holding the page table.
439 struct vmw_mob
*page_table
;
443 struct vmw_otable_batch
{
444 unsigned num_otables
;
445 struct vmw_otable
*otables
;
446 struct vmw_resource
*context
;
447 struct vmw_bo
*otable_bo
;
452 VMW_IRQTHREAD_CMDBUF
,
457 * enum vmw_sm_type - Graphics context capability supported by device.
458 * @VMW_SM_LEGACY: Pre DX context.
459 * @VMW_SM_4: Context support upto SM4.
460 * @VMW_SM_4_1: Context support upto SM4_1.
461 * @VMW_SM_5: Context support up to SM5.
462 * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions.
463 * @VMW_SM_MAX: Should be the last.
475 struct drm_device drm
;
476 struct ttm_device bdev
;
479 resource_size_t io_start
;
480 resource_size_t vram_start
;
481 resource_size_t vram_size
;
482 resource_size_t max_primary_mem
;
485 resource_size_t fifo_mem_size
;
486 uint32_t fb_max_width
;
487 uint32_t fb_max_height
;
488 uint32_t texture_max_width
;
489 uint32_t texture_max_height
;
490 uint32_t stdu_max_width
;
491 uint32_t stdu_max_height
;
492 uint32_t initial_width
;
493 uint32_t initial_height
;
494 uint32_t capabilities
;
495 uint32_t capabilities2
;
496 uint32_t max_gmr_ids
;
497 uint32_t max_gmr_pages
;
498 uint32_t max_mob_pages
;
499 uint32_t max_mob_size
;
500 uint32_t memory_size
;
505 u32 irqs
[VMWGFX_MAX_NUM_IRQS
];
508 enum vmw_sm_type sm_type
;
514 enum vmw_display_unit_type active_display_unit
;
515 struct vmw_legacy_display
*ldu_priv
;
516 struct vmw_overlay
*overlay_priv
;
517 struct drm_property
*hotplug_mode_update_property
;
518 struct drm_property
*implicit_placement_property
;
519 spinlock_t cursor_lock
;
520 struct drm_atomic_state
*suspend_state
;
523 * Context and surface management.
526 spinlock_t resource_lock
;
527 struct idr res_idr
[vmw_res_max
];
530 * A resource manager for kernel-only surfaces and
534 struct ttm_object_device
*tdev
;
541 wait_queue_head_t fence_queue
;
542 wait_queue_head_t fifo_queue
;
543 spinlock_t waiter_lock
;
544 int fence_queue_waiters
; /* Protected by waiter_lock */
545 int goal_queue_waiters
; /* Protected by waiter_lock */
546 int cmdbuf_waiters
; /* Protected by waiter_lock */
547 int error_waiters
; /* Protected by waiter_lock */
548 int fifo_queue_waiters
; /* Protected by waiter_lock */
549 uint32_t last_read_seqno
;
550 struct vmw_fence_manager
*fman
;
551 uint32_t irq_mask
; /* Updates protected by waiter_lock */
557 uint32_t traces_state
;
558 uint32_t enable_state
;
559 uint32_t config_done_state
;
565 * Protected by the cmdbuf mutex.
568 struct vmw_sw_context ctx
;
569 struct mutex cmdbuf_mutex
;
570 struct mutex binding_mutex
;
575 struct notifier_block pm_nb
;
576 bool refuse_hibernation
;
579 atomic_t num_fifo_resources
;
582 * Query processing. These members
583 * are protected by the cmdbuf mutex.
586 struct vmw_bo
*dummy_query_bo
;
587 struct vmw_bo
*pinned_bo
;
589 uint32_t query_cid_valid
;
590 bool dummy_query_bo_pinned
;
593 * Surface swapping. The "surface_lru" list is protected by the
594 * resource lock in order to be able to destroy a surface and take
595 * it off the lru atomically. "used_memory_size" is currently
596 * protected by the cmdbuf mutex for simplicity.
599 struct list_head res_lru
[vmw_res_max
];
600 uint32_t used_memory_size
;
605 enum vmw_dma_map_mode map_mode
;
610 struct vmw_otable_batch otable_batch
;
612 struct vmw_fifo_state
*fifo
;
613 struct vmw_cmdbuf_man
*cman
;
614 DECLARE_BITMAP(irqthread_pending
, VMW_IRQTHREAD_MAX
);
619 struct workqueue_struct
*crc_workq
;
622 * mksGuestStat instance-descriptor and pid arrays
624 struct page
*mksstat_user_pages
[MKSSTAT_CAPACITY
];
625 atomic_t mksstat_user_pids
[MKSSTAT_CAPACITY
];
627 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
628 struct page
*mksstat_kern_pages
[MKSSTAT_CAPACITY
];
629 u8 mksstat_kern_top_timer
[MKSSTAT_CAPACITY
];
630 atomic_t mksstat_kern_pids
[MKSSTAT_CAPACITY
];
634 static inline struct vmw_surface
*vmw_res_to_srf(struct vmw_resource
*res
)
636 return container_of(res
, struct vmw_surface
, res
);
639 static inline struct vmw_private
*vmw_priv(struct drm_device
*dev
)
641 return (struct vmw_private
*)dev
->dev_private
;
644 static inline struct vmw_private
*vmw_priv_from_ttm(struct ttm_device
*bdev
)
646 return container_of(bdev
, struct vmw_private
, bdev
);
649 static inline struct vmw_fpriv
*vmw_fpriv(struct drm_file
*file_priv
)
651 return (struct vmw_fpriv
*)file_priv
->driver_priv
;
655 * SVGA v3 has mmio register access and lacks fifo cmds
657 static inline bool vmw_is_svga_v3(const struct vmw_private
*dev
)
659 return dev
->pci_id
== VMWGFX_PCI_ID_SVGA3
;
663 * The locking here is fine-grained, so that it is performed once
664 * for every read- and write operation. This is of course costly, but we
665 * don't perform much register access in the timing critical paths anyway.
666 * Instead we have the extra benefit of being sure that we don't forget
667 * the hw lock around register accesses.
669 static inline void vmw_write(struct vmw_private
*dev_priv
,
670 unsigned int offset
, uint32_t value
)
672 if (vmw_is_svga_v3(dev_priv
)) {
673 iowrite32(value
, dev_priv
->rmmio
+ offset
);
675 spin_lock(&dev_priv
->hw_lock
);
676 outl(offset
, dev_priv
->io_start
+ SVGA_INDEX_PORT
);
677 outl(value
, dev_priv
->io_start
+ SVGA_VALUE_PORT
);
678 spin_unlock(&dev_priv
->hw_lock
);
682 static inline uint32_t vmw_read(struct vmw_private
*dev_priv
,
687 if (vmw_is_svga_v3(dev_priv
)) {
688 val
= ioread32(dev_priv
->rmmio
+ offset
);
690 spin_lock(&dev_priv
->hw_lock
);
691 outl(offset
, dev_priv
->io_start
+ SVGA_INDEX_PORT
);
692 val
= inl(dev_priv
->io_start
+ SVGA_VALUE_PORT
);
693 spin_unlock(&dev_priv
->hw_lock
);
700 * has_sm4_context - Does the device support SM4 context.
701 * @dev_priv: Device private.
703 * Return: Bool value if device support SM4 context or not.
705 static inline bool has_sm4_context(const struct vmw_private
*dev_priv
)
707 return (dev_priv
->sm_type
>= VMW_SM_4
);
711 * has_sm4_1_context - Does the device support SM4_1 context.
712 * @dev_priv: Device private.
714 * Return: Bool value if device support SM4_1 context or not.
716 static inline bool has_sm4_1_context(const struct vmw_private
*dev_priv
)
718 return (dev_priv
->sm_type
>= VMW_SM_4_1
);
722 * has_sm5_context - Does the device support SM5 context.
723 * @dev_priv: Device private.
725 * Return: Bool value if device support SM5 context or not.
727 static inline bool has_sm5_context(const struct vmw_private
*dev_priv
)
729 return (dev_priv
->sm_type
>= VMW_SM_5
);
733 * has_gl43_context - Does the device support GL43 context.
734 * @dev_priv: Device private.
736 * Return: Bool value if device support SM5 context or not.
738 static inline bool has_gl43_context(const struct vmw_private
*dev_priv
)
740 return (dev_priv
->sm_type
>= VMW_SM_5_1X
);
744 static inline u32
vmw_max_num_uavs(struct vmw_private
*dev_priv
)
746 return (has_gl43_context(dev_priv
) ?
747 SVGA3D_DX11_1_MAX_UAVIEWS
: SVGA3D_MAX_UAVIEWS
);
750 extern void vmw_svga_enable(struct vmw_private
*dev_priv
);
751 extern void vmw_svga_disable(struct vmw_private
*dev_priv
);
752 bool vmwgfx_supported(struct vmw_private
*vmw
);
756 * GMR utilities - vmwgfx_gmr.c
759 extern int vmw_gmr_bind(struct vmw_private
*dev_priv
,
760 const struct vmw_sg_table
*vsgt
,
761 unsigned long num_pages
,
763 extern void vmw_gmr_unbind(struct vmw_private
*dev_priv
, int gmr_id
);
766 * Resource utilities - vmwgfx_resource.c
768 struct vmw_user_resource_conv
;
770 extern void vmw_resource_unreference(struct vmw_resource
**p_res
);
771 extern struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
);
772 extern struct vmw_resource
*
773 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
);
774 extern int vmw_resource_validate(struct vmw_resource
*res
, bool intr
,
776 extern int vmw_resource_reserve(struct vmw_resource
*res
, bool interruptible
,
778 extern bool vmw_resource_needs_backup(const struct vmw_resource
*res
);
779 extern int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
780 struct drm_file
*filp
,
782 struct vmw_surface
**out_surf
,
783 struct vmw_bo
**out_buf
);
784 extern int vmw_user_resource_lookup_handle(
785 struct vmw_private
*dev_priv
,
786 struct ttm_object_file
*tfile
,
788 const struct vmw_user_resource_conv
*converter
,
789 struct vmw_resource
**p_res
);
791 extern int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
792 struct drm_file
*file_priv
);
793 extern int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
794 struct drm_file
*file_priv
);
795 extern int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
796 struct ttm_object_file
*tfile
,
798 struct vmw_resource
**out
);
799 extern void vmw_resource_unreserve(struct vmw_resource
*res
,
802 bool switch_guest_memory
,
803 struct vmw_bo
*new_guest_memory
,
804 unsigned long new_guest_memory_offset
);
805 extern void vmw_query_move_notify(struct ttm_buffer_object
*bo
,
806 struct ttm_resource
*old_mem
,
807 struct ttm_resource
*new_mem
);
808 int vmw_query_readback_all(struct vmw_bo
*dx_query_mob
);
809 void vmw_resource_evict_all(struct vmw_private
*dev_priv
);
810 void vmw_resource_unbind_list(struct vmw_bo
*vbo
);
811 void vmw_resource_mob_attach(struct vmw_resource
*res
);
812 void vmw_resource_mob_detach(struct vmw_resource
*res
);
813 void vmw_resource_dirty_update(struct vmw_resource
*res
, pgoff_t start
,
815 int vmw_resource_clean(struct vmw_resource
*res
);
816 int vmw_resources_clean(struct vmw_bo
*vbo
, pgoff_t start
,
817 pgoff_t end
, pgoff_t
*num_prefault
);
820 * vmw_resource_mob_attached - Whether a resource currently has a mob attached
823 * Return: true if the resource has a mob attached, false otherwise.
825 static inline bool vmw_resource_mob_attached(const struct vmw_resource
*res
)
827 return !RB_EMPTY_NODE(&res
->mob_node
);
831 * GEM related functionality - vmwgfx_gem.c
833 struct vmw_bo_params
;
834 int vmw_gem_object_create(struct vmw_private
*vmw
,
835 struct vmw_bo_params
*params
,
836 struct vmw_bo
**p_vbo
);
837 extern int vmw_gem_object_create_with_handle(struct vmw_private
*dev_priv
,
838 struct drm_file
*filp
,
841 struct vmw_bo
**p_vbo
);
842 extern int vmw_gem_object_create_ioctl(struct drm_device
*dev
, void *data
,
843 struct drm_file
*filp
);
844 extern void vmw_debugfs_gem_init(struct vmw_private
*vdev
);
847 * Misc Ioctl functionality - vmwgfx_ioctl.c
850 extern int vmw_getparam_ioctl(struct drm_device
*dev
, void *data
,
851 struct drm_file
*file_priv
);
852 extern int vmw_get_cap_3d_ioctl(struct drm_device
*dev
, void *data
,
853 struct drm_file
*file_priv
);
854 extern int vmw_present_ioctl(struct drm_device
*dev
, void *data
,
855 struct drm_file
*file_priv
);
856 extern int vmw_present_readback_ioctl(struct drm_device
*dev
, void *data
,
857 struct drm_file
*file_priv
);
860 * Fifo utilities - vmwgfx_fifo.c
863 extern struct vmw_fifo_state
*vmw_fifo_create(struct vmw_private
*dev_priv
);
864 extern void vmw_fifo_destroy(struct vmw_private
*dev_priv
);
865 extern bool vmw_cmd_supported(struct vmw_private
*vmw
);
867 vmw_cmd_ctx_reserve(struct vmw_private
*dev_priv
, uint32_t bytes
, int ctx_id
);
868 extern void vmw_cmd_commit(struct vmw_private
*dev_priv
, uint32_t bytes
);
869 extern void vmw_cmd_commit_flush(struct vmw_private
*dev_priv
, uint32_t bytes
);
870 extern int vmw_cmd_send_fence(struct vmw_private
*dev_priv
, uint32_t *seqno
);
871 extern bool vmw_supports_3d(struct vmw_private
*dev_priv
);
872 extern void vmw_fifo_ping_host(struct vmw_private
*dev_priv
, uint32_t reason
);
873 extern bool vmw_fifo_have_pitchlock(struct vmw_private
*dev_priv
);
874 extern int vmw_cmd_emit_dummy_query(struct vmw_private
*dev_priv
,
876 extern int vmw_cmd_flush(struct vmw_private
*dev_priv
,
879 #define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \
881 vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \
882 DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
883 __func__, (unsigned int) __bytes); \
888 #define VMW_CMD_RESERVE(__priv, __bytes) \
889 VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
893 * vmw_fifo_caps - Returns the capabilities of the FIFO command
894 * queue or 0 if fifo memory isn't present.
895 * @dev_priv: The device private context
897 static inline uint32_t vmw_fifo_caps(const struct vmw_private
*dev_priv
)
899 if (!dev_priv
->fifo_mem
|| !dev_priv
->fifo
)
901 return dev_priv
->fifo
->capabilities
;
906 * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
907 * is enabled in the FIFO.
908 * @dev_priv: The device private context
911 vmw_is_cursor_bypass3_enabled(const struct vmw_private
*dev_priv
)
913 return (vmw_fifo_caps(dev_priv
) & SVGA_FIFO_CAP_CURSOR_BYPASS_3
) != 0;
917 * TTM buffer object driver - vmwgfx_ttm_buffer.c
920 extern const size_t vmw_tt_size
;
921 extern struct ttm_placement vmw_vram_placement
;
922 extern struct ttm_placement vmw_sys_placement
;
923 extern struct ttm_device_funcs vmw_bo_driver
;
924 extern const struct vmw_sg_table
*
925 vmw_bo_sg_table(struct ttm_buffer_object
*bo
);
926 int vmw_bo_create_and_populate(struct vmw_private
*dev_priv
,
929 struct vmw_bo
**bo_p
);
931 extern void vmw_piter_start(struct vmw_piter
*viter
,
932 const struct vmw_sg_table
*vsgt
,
933 unsigned long p_offs
);
936 * vmw_piter_next - Advance the iterator one page.
938 * @viter: Pointer to the iterator to advance.
940 * Returns false if past the list of pages, true otherwise.
942 static inline bool vmw_piter_next(struct vmw_piter
*viter
)
944 return viter
->next(viter
);
948 * vmw_piter_dma_addr - Return the DMA address of the current page.
950 * @viter: Pointer to the iterator
952 * Returns the DMA address of the page pointed to by @viter.
954 static inline dma_addr_t
vmw_piter_dma_addr(struct vmw_piter
*viter
)
956 return viter
->dma_address(viter
);
960 * vmw_piter_page - Return a pointer to the current page.
962 * @viter: Pointer to the iterator
964 * Returns the DMA address of the page pointed to by @viter.
966 static inline struct page
*vmw_piter_page(struct vmw_piter
*viter
)
968 return viter
->pages
[viter
->i
];
972 * Command submission - vmwgfx_execbuf.c
975 extern int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
976 struct drm_file
*file_priv
);
977 extern int vmw_execbuf_process(struct drm_file
*file_priv
,
978 struct vmw_private
*dev_priv
,
979 void __user
*user_commands
,
980 void *kernel_commands
,
981 uint32_t command_size
,
982 uint64_t throttle_us
,
983 uint32_t dx_context_handle
,
984 struct drm_vmw_fence_rep __user
986 struct vmw_fence_obj
**out_fence
,
988 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
989 struct vmw_fence_obj
*fence
);
990 extern void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
);
992 extern int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
993 struct vmw_private
*dev_priv
,
994 struct vmw_fence_obj
**p_fence
,
996 extern int vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
997 struct vmw_fpriv
*vmw_fp
,
999 struct drm_vmw_fence_rep __user
1001 struct vmw_fence_obj
*fence
,
1002 uint32_t fence_handle
,
1003 int32_t out_fence_fd
);
1004 bool vmw_cmd_describe(const void *buf
, u32
*size
, char const **cmd
);
1007 * IRQs and wating - vmwgfx_irq.c
1010 extern int vmw_irq_install(struct vmw_private
*dev_priv
);
1011 extern void vmw_irq_uninstall(struct drm_device
*dev
);
1012 extern bool vmw_seqno_passed(struct vmw_private
*dev_priv
,
1014 extern int vmw_fallback_wait(struct vmw_private
*dev_priv
,
1019 unsigned long timeout
);
1020 extern void vmw_update_seqno(struct vmw_private
*dev_priv
);
1021 extern void vmw_seqno_waiter_add(struct vmw_private
*dev_priv
);
1022 extern void vmw_seqno_waiter_remove(struct vmw_private
*dev_priv
);
1023 extern void vmw_goal_waiter_add(struct vmw_private
*dev_priv
);
1024 extern void vmw_goal_waiter_remove(struct vmw_private
*dev_priv
);
1025 extern void vmw_generic_waiter_add(struct vmw_private
*dev_priv
, u32 flag
,
1027 extern void vmw_generic_waiter_remove(struct vmw_private
*dev_priv
,
1028 u32 flag
, int *waiter_count
);
1031 * Kernel modesetting - vmwgfx_kms.c
1034 int vmw_kms_init(struct vmw_private
*dev_priv
);
1035 int vmw_kms_close(struct vmw_private
*dev_priv
);
1036 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
1037 struct drm_file
*file_priv
);
1038 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
);
1039 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
1040 struct ttm_object_file
*tfile
,
1041 struct ttm_buffer_object
*bo
,
1042 SVGA3dCmdHeader
*header
);
1043 int vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
1044 unsigned width
, unsigned height
, unsigned pitch
,
1045 unsigned bpp
, unsigned depth
);
1046 int vmw_kms_present(struct vmw_private
*dev_priv
,
1047 struct drm_file
*file_priv
,
1048 struct vmw_framebuffer
*vfb
,
1049 struct vmw_surface
*surface
,
1050 uint32_t sid
, int32_t destX
, int32_t destY
,
1051 struct drm_vmw_rect
*clips
,
1052 uint32_t num_clips
);
1053 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
1054 struct drm_file
*file_priv
);
1055 void vmw_kms_legacy_hotspot_clear(struct vmw_private
*dev_priv
);
1056 int vmw_kms_suspend(struct drm_device
*dev
);
1057 int vmw_kms_resume(struct drm_device
*dev
);
1058 void vmw_kms_lost_device(struct drm_device
*dev
);
1060 int vmw_dumb_create(struct drm_file
*file_priv
,
1061 struct drm_device
*dev
,
1062 struct drm_mode_create_dumb
*args
);
1063 extern int vmw_resource_pin(struct vmw_resource
*res
, bool interruptible
);
1064 extern void vmw_resource_unpin(struct vmw_resource
*res
);
1065 extern enum vmw_res_type
vmw_res_type(const struct vmw_resource
*res
);
1068 * Overlay control - vmwgfx_overlay.c
1071 int vmw_overlay_init(struct vmw_private
*dev_priv
);
1072 int vmw_overlay_close(struct vmw_private
*dev_priv
);
1073 int vmw_overlay_ioctl(struct drm_device
*dev
, void *data
,
1074 struct drm_file
*file_priv
);
1075 int vmw_overlay_resume_all(struct vmw_private
*dev_priv
);
1076 int vmw_overlay_pause_all(struct vmw_private
*dev_priv
);
1077 int vmw_overlay_claim(struct vmw_private
*dev_priv
, uint32_t *out
);
1078 int vmw_overlay_unref(struct vmw_private
*dev_priv
, uint32_t stream_id
);
1079 int vmw_overlay_num_overlays(struct vmw_private
*dev_priv
);
1080 int vmw_overlay_num_free_overlays(struct vmw_private
*dev_priv
);
1086 int vmw_gmrid_man_init(struct vmw_private
*dev_priv
, int type
);
1087 void vmw_gmrid_man_fini(struct vmw_private
*dev_priv
, int type
);
1090 * System memory manager
1092 int vmw_sys_man_init(struct vmw_private
*dev_priv
);
1093 void vmw_sys_man_fini(struct vmw_private
*dev_priv
);
1096 * Prime - vmwgfx_prime.c
1099 extern const struct dma_buf_ops vmw_prime_dmabuf_ops
;
1100 extern int vmw_prime_fd_to_handle(struct drm_device
*dev
,
1101 struct drm_file
*file_priv
,
1102 int fd
, u32
*handle
);
1103 extern int vmw_prime_handle_to_fd(struct drm_device
*dev
,
1104 struct drm_file
*file_priv
,
1105 uint32_t handle
, uint32_t flags
,
1107 struct drm_gem_object
*vmw_prime_import_sg_table(struct drm_device
*dev
,
1108 struct dma_buf_attachment
*attach
,
1109 struct sg_table
*table
);
1112 * MemoryOBject management - vmwgfx_mob.c
1115 extern int vmw_mob_bind(struct vmw_private
*dev_priv
, struct vmw_mob
*mob
,
1116 const struct vmw_sg_table
*vsgt
,
1117 unsigned long num_data_pages
, int32_t mob_id
);
1118 extern void vmw_mob_unbind(struct vmw_private
*dev_priv
,
1119 struct vmw_mob
*mob
);
1120 extern void vmw_mob_destroy(struct vmw_mob
*mob
);
1121 extern struct vmw_mob
*vmw_mob_create(unsigned long data_pages
);
1122 extern int vmw_otables_setup(struct vmw_private
*dev_priv
);
1123 extern void vmw_otables_takedown(struct vmw_private
*dev_priv
);
1126 * Context management - vmwgfx_context.c
1129 extern const struct vmw_user_resource_conv
*user_context_converter
;
1131 extern int vmw_context_define_ioctl(struct drm_device
*dev
, void *data
,
1132 struct drm_file
*file_priv
);
1133 extern int vmw_extended_context_define_ioctl(struct drm_device
*dev
, void *data
,
1134 struct drm_file
*file_priv
);
1135 extern int vmw_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
1136 struct drm_file
*file_priv
);
1137 extern struct list_head
*vmw_context_binding_list(struct vmw_resource
*ctx
);
1138 extern struct vmw_cmdbuf_res_manager
*
1139 vmw_context_res_man(struct vmw_resource
*ctx
);
1140 extern struct vmw_resource
*vmw_context_cotable(struct vmw_resource
*ctx
,
1141 SVGACOTableType cotable_type
);
1142 struct vmw_ctx_binding_state
;
1143 extern struct vmw_ctx_binding_state
*
1144 vmw_context_binding_state(struct vmw_resource
*ctx
);
1145 extern void vmw_dx_context_scrub_cotables(struct vmw_resource
*ctx
,
1147 extern int vmw_context_bind_dx_query(struct vmw_resource
*ctx_res
,
1148 struct vmw_bo
*mob
);
1149 extern struct vmw_bo
*
1150 vmw_context_get_dx_query_mob(struct vmw_resource
*ctx_res
);
1154 * Surface management - vmwgfx_surface.c
1157 extern const struct vmw_user_resource_conv
*user_surface_converter
;
1159 extern int vmw_surface_destroy_ioctl(struct drm_device
*dev
, void *data
,
1160 struct drm_file
*file_priv
);
1161 extern int vmw_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1162 struct drm_file
*file_priv
);
1163 extern int vmw_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1164 struct drm_file
*file_priv
);
1165 extern int vmw_gb_surface_define_ioctl(struct drm_device
*dev
, void *data
,
1166 struct drm_file
*file_priv
);
1167 extern int vmw_gb_surface_reference_ioctl(struct drm_device
*dev
, void *data
,
1168 struct drm_file
*file_priv
);
1169 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device
*dev
,
1171 struct drm_file
*file_priv
);
1172 extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device
*dev
,
1174 struct drm_file
*file_priv
);
1176 int vmw_gb_surface_define(struct vmw_private
*dev_priv
,
1177 const struct vmw_surface_metadata
*req
,
1178 struct vmw_surface
**srf_out
);
1181 * Shader management - vmwgfx_shader.c
1184 extern const struct vmw_user_resource_conv
*user_shader_converter
;
1186 extern int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
1187 struct drm_file
*file_priv
);
1188 extern int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
1189 struct drm_file
*file_priv
);
1190 extern int vmw_compat_shader_add(struct vmw_private
*dev_priv
,
1191 struct vmw_cmdbuf_res_manager
*man
,
1192 u32 user_key
, const void *bytecode
,
1193 SVGA3dShaderType shader_type
,
1195 struct list_head
*list
);
1196 extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager
*man
,
1197 u32 user_key
, SVGA3dShaderType shader_type
,
1198 struct list_head
*list
);
1199 extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager
*man
,
1200 struct vmw_resource
*ctx
,
1202 SVGA3dShaderType shader_type
,
1203 struct list_head
*list
);
1204 extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private
*dev_priv
,
1205 struct list_head
*list
,
1208 extern struct vmw_resource
*
1209 vmw_shader_lookup(struct vmw_cmdbuf_res_manager
*man
,
1210 u32 user_key
, SVGA3dShaderType shader_type
);
1213 * Streamoutput management
1215 struct vmw_resource
*
1216 vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager
*man
,
1218 int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager
*man
,
1219 struct vmw_resource
*ctx
,
1220 SVGA3dStreamOutputId user_key
,
1221 struct list_head
*list
);
1222 void vmw_dx_streamoutput_set_size(struct vmw_resource
*res
, u32 size
);
1223 int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager
*man
,
1224 SVGA3dStreamOutputId user_key
,
1225 struct list_head
*list
);
1226 void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private
*dev_priv
,
1227 struct list_head
*list
,
1231 * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1234 extern struct vmw_cmdbuf_res_manager
*
1235 vmw_cmdbuf_res_man_create(struct vmw_private
*dev_priv
);
1236 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager
*man
);
1237 extern struct vmw_resource
*
1238 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager
*man
,
1239 enum vmw_cmdbuf_res_type res_type
,
1241 extern void vmw_cmdbuf_res_revert(struct list_head
*list
);
1242 extern void vmw_cmdbuf_res_commit(struct list_head
*list
);
1243 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager
*man
,
1244 enum vmw_cmdbuf_res_type res_type
,
1246 struct vmw_resource
*res
,
1247 struct list_head
*list
);
1248 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager
*man
,
1249 enum vmw_cmdbuf_res_type res_type
,
1251 struct list_head
*list
,
1252 struct vmw_resource
**res
);
1255 * COTable management - vmwgfx_cotable.c
1257 extern const SVGACOTableType vmw_cotable_scrub_order
[];
1258 extern struct vmw_resource
*vmw_cotable_alloc(struct vmw_private
*dev_priv
,
1259 struct vmw_resource
*ctx
,
1261 extern int vmw_cotable_notify(struct vmw_resource
*res
, int id
);
1262 extern int vmw_cotable_scrub(struct vmw_resource
*res
, bool readback
);
1263 extern void vmw_cotable_add_resource(struct vmw_resource
*ctx
,
1264 struct list_head
*head
);
1267 * Command buffer managerment vmwgfx_cmdbuf.c
1269 struct vmw_cmdbuf_man
;
1270 struct vmw_cmdbuf_header
;
1272 extern struct vmw_cmdbuf_man
*
1273 vmw_cmdbuf_man_create(struct vmw_private
*dev_priv
);
1274 extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man
*man
, size_t size
);
1275 extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man
*man
);
1276 extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man
*man
);
1277 extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man
*man
, bool interruptible
,
1278 unsigned long timeout
);
1279 extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man
*man
, size_t size
,
1280 int ctx_id
, bool interruptible
,
1281 struct vmw_cmdbuf_header
*header
);
1282 extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man
*man
, size_t size
,
1283 struct vmw_cmdbuf_header
*header
,
1285 extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man
*man
,
1286 size_t size
, bool interruptible
,
1287 struct vmw_cmdbuf_header
**p_header
);
1288 extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header
*header
);
1289 extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man
*man
,
1290 bool interruptible
);
1291 extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man
*man
);
1293 /* CPU blit utilities - vmwgfx_blit.c */
1296 * struct vmw_diff_cpy - CPU blit information structure
1298 * @rect: The output bounding box rectangle.
1299 * @line: The current line of the blit.
1300 * @line_offset: Offset of the current line segment.
1301 * @cpp: Bytes per pixel (granularity information).
1302 * @memcpy: Which memcpy function to use.
1304 struct vmw_diff_cpy
{
1305 struct drm_rect rect
;
1309 void (*do_cpy
)(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
,
1313 #define VMW_CPU_BLIT_INITIALIZER { \
1314 .do_cpy = vmw_memcpy, \
1317 #define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
1320 .rect = { .x1 = INT_MAX/2, \
1326 .do_cpy = vmw_diff_memcpy, \
1329 void vmw_diff_memcpy(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
,
1332 void vmw_memcpy(struct vmw_diff_cpy
*diff
, u8
*dest
, const u8
*src
, size_t n
);
1334 int vmw_bo_cpu_blit(struct ttm_buffer_object
*dst
,
1335 u32 dst_offset
, u32 dst_stride
,
1336 struct ttm_buffer_object
*src
,
1337 u32 src_offset
, u32 src_stride
,
1339 struct vmw_diff_cpy
*diff
);
1341 /* Host messaging -vmwgfx_msg.c: */
1342 void vmw_disable_backdoor(void);
1343 int vmw_host_get_guestinfo(const char *guest_info_param
,
1344 char *buffer
, size_t *length
);
1345 __printf(1, 2) int vmw_host_printf(const char *fmt
, ...);
1346 int vmw_msg_ioctl(struct drm_device
*dev
, void *data
,
1347 struct drm_file
*file_priv
);
1349 /* Host mksGuestStats -vmwgfx_msg.c: */
1350 int vmw_mksstat_get_kern_slot(pid_t pid
, struct vmw_private
*dev_priv
);
1352 int vmw_mksstat_reset_ioctl(struct drm_device
*dev
, void *data
,
1353 struct drm_file
*file_priv
);
1354 int vmw_mksstat_add_ioctl(struct drm_device
*dev
, void *data
,
1355 struct drm_file
*file_priv
);
1356 int vmw_mksstat_remove_ioctl(struct drm_device
*dev
, void *data
,
1357 struct drm_file
*file_priv
);
1358 int vmw_mksstat_remove_all(struct vmw_private
*dev_priv
);
1363 * VMW_DEBUG_USER - Debug output for user-space debugging.
1365 * @fmt: printf() like format string.
1367 * This macro is for logging user-space error and debugging messages for e.g.
1368 * command buffer execution errors due to malformed commands, invalid context,
1371 #define VMW_DEBUG_USER(fmt, ...) \
1372 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1374 /* Resource dirtying - vmwgfx_page_dirty.c */
1375 void vmw_bo_dirty_scan(struct vmw_bo
*vbo
);
1376 int vmw_bo_dirty_add(struct vmw_bo
*vbo
);
1377 void vmw_bo_dirty_transfer_to_res(struct vmw_resource
*res
);
1378 void vmw_bo_dirty_clear_res(struct vmw_resource
*res
);
1379 void vmw_bo_dirty_release(struct vmw_bo
*vbo
);
1380 void vmw_bo_dirty_unmap(struct vmw_bo
*vbo
,
1381 pgoff_t start
, pgoff_t end
);
1382 vm_fault_t
vmw_bo_vm_fault(struct vm_fault
*vmf
);
1383 vm_fault_t
vmw_bo_vm_mkwrite(struct vm_fault
*vmf
);
1387 * VMW_DEBUG_KMS - Debug output for kernel mode-setting
1389 * This macro is for debugging vmwgfx mode-setting code.
1391 #define VMW_DEBUG_KMS(fmt, ...) \
1392 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
1395 * Inline helper functions
1398 static inline void vmw_surface_unreference(struct vmw_surface
**srf
)
1400 struct vmw_surface
*tmp_srf
= *srf
;
1401 struct vmw_resource
*res
= &tmp_srf
->res
;
1404 vmw_resource_unreference(&res
);
1407 static inline struct vmw_surface
*vmw_surface_reference(struct vmw_surface
*srf
)
1409 (void) vmw_resource_reference(&srf
->res
);
1413 static inline void vmw_fifo_resource_inc(struct vmw_private
*dev_priv
)
1415 atomic_inc(&dev_priv
->num_fifo_resources
);
1418 static inline void vmw_fifo_resource_dec(struct vmw_private
*dev_priv
)
1420 atomic_dec(&dev_priv
->num_fifo_resources
);
1424 * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
1426 * @fifo_reg: The fifo register to read from
1428 * This function is intended to be equivalent to ioread32() on
1429 * memremap'd memory, but without byteswapping.
1431 static inline u32
vmw_fifo_mem_read(struct vmw_private
*vmw
, uint32 fifo_reg
)
1433 BUG_ON(vmw_is_svga_v3(vmw
));
1434 return READ_ONCE(*(vmw
->fifo_mem
+ fifo_reg
));
1438 * vmw_fifo_mem_write - Perform a MMIO write to volatile memory
1440 * @addr: The fifo register to write to
1442 * This function is intended to be equivalent to iowrite32 on
1443 * memremap'd memory, but without byteswapping.
1445 static inline void vmw_fifo_mem_write(struct vmw_private
*vmw
, u32 fifo_reg
,
1448 BUG_ON(vmw_is_svga_v3(vmw
));
1449 WRITE_ONCE(*(vmw
->fifo_mem
+ fifo_reg
), value
);
1452 static inline u32
vmw_fence_read(struct vmw_private
*dev_priv
)
1455 if (vmw_is_svga_v3(dev_priv
))
1456 fence
= vmw_read(dev_priv
, SVGA_REG_FENCE
);
1458 fence
= vmw_fifo_mem_read(dev_priv
, SVGA_FIFO_FENCE
);
1462 static inline void vmw_fence_write(struct vmw_private
*dev_priv
,
1465 BUG_ON(vmw_is_svga_v3(dev_priv
));
1466 vmw_fifo_mem_write(dev_priv
, SVGA_FIFO_FENCE
, fence
);
1469 static inline u32
vmw_irq_status_read(struct vmw_private
*vmw
)
1472 if (vmw_is_svga_v3(vmw
))
1473 status
= vmw_read(vmw
, SVGA_REG_IRQ_STATUS
);
1475 status
= inl(vmw
->io_start
+ SVGA_IRQSTATUS_PORT
);
1479 static inline void vmw_irq_status_write(struct vmw_private
*vmw
,
1482 if (vmw_is_svga_v3(vmw
))
1483 vmw_write(vmw
, SVGA_REG_IRQ_STATUS
, status
);
1485 outl(status
, vmw
->io_start
+ SVGA_IRQSTATUS_PORT
);
1488 static inline bool vmw_has_fences(struct vmw_private
*vmw
)
1490 if ((vmw
->capabilities
& (SVGA_CAP_COMMAND_BUFFERS
|
1491 SVGA_CAP_CMD_BUFFERS_2
)) != 0)
1493 return (vmw_fifo_caps(vmw
) & SVGA_FIFO_CAP_FENCE
) != 0;
1496 static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model
,
1499 SVGA3dShaderType max_allowed
= SVGA3D_SHADERTYPE_PREDX_MAX
;
1501 if (shader_model
>= VMW_SM_5
)
1502 max_allowed
= SVGA3D_SHADERTYPE_MAX
;
1503 else if (shader_model
>= VMW_SM_4
)
1504 max_allowed
= SVGA3D_SHADERTYPE_DX10_MAX
;
1505 return shader_type
>= SVGA3D_SHADERTYPE_MIN
&& shader_type
< max_allowed
;