]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/staging/vboxvideo/vbox_drv.h
staging: vboxvideo: Stop disabling/enabling accel support on master set / drop
[thirdparty/linux.git] / drivers / staging / vboxvideo / vbox_drv.h
1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_drv.h
5 * Copyright 2012 Red Hat Inc.
6 * Authors: Dave Airlie <airlied@redhat.com>
7 * Michael Thayer <michael.thayer@oracle.com,
8 * Hans de Goede <hdegoede@redhat.com>
9 */
10 #ifndef __VBOX_DRV_H__
11 #define __VBOX_DRV_H__
12
13 #include <linux/genalloc.h>
14 #include <linux/io.h>
15 #include <linux/string.h>
16 #include <linux/version.h>
17
18 #include <drm/drmP.h>
19 #include <drm/drm_encoder.h>
20 #include <drm/drm_fb_helper.h>
21 #include <drm/drm_gem.h>
22
23 #include <drm/ttm/ttm_bo_api.h>
24 #include <drm/ttm/ttm_bo_driver.h>
25 #include <drm/ttm/ttm_placement.h>
26 #include <drm/ttm/ttm_memory.h>
27 #include <drm/ttm/ttm_module.h>
28
29 #include "vboxvideo_guest.h"
30 #include "vboxvideo_vbe.h"
31 #include "hgsmi_ch_setup.h"
32
33 #define DRIVER_NAME "vboxvideo"
34 #define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
35 #define DRIVER_DATE "20130823"
36
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 0
39 #define DRIVER_PATCHLEVEL 0
40
41 #define VBOX_MAX_CURSOR_WIDTH 64
42 #define VBOX_MAX_CURSOR_HEIGHT 64
43 #define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
44 #define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
45
46 #define VBOX_MAX_SCREENS 32
47
48 #define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
49 VBVA_ADAPTER_INFORMATION_SIZE)
50 #define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
51 #define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
52 sizeof(struct hgsmi_host_flags))
53 #define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
54
55 struct vbox_framebuffer {
56 struct drm_framebuffer base;
57 struct drm_gem_object *obj;
58 };
59
60 struct vbox_private {
61 /* Must be first; or we must define our own release callback */
62 struct drm_device ddev;
63 struct drm_fb_helper fb_helper;
64 struct vbox_framebuffer afb;
65
66 u8 __iomem *guest_heap;
67 u8 __iomem *vbva_buffers;
68 struct gen_pool *guest_pool;
69 struct vbva_buf_ctx *vbva_info;
70 bool any_pitch;
71 u32 num_crtcs;
72 /* Amount of available VRAM, including space used for buffers. */
73 u32 full_vram_size;
74 /* Amount of available VRAM, not including space used for buffers. */
75 u32 available_vram_size;
76 /* Array of structures for receiving mode hints. */
77 struct vbva_modehint *last_mode_hints;
78
79 int fb_mtrr;
80
81 struct {
82 struct drm_global_reference mem_global_ref;
83 struct ttm_bo_global_ref bo_global_ref;
84 struct ttm_bo_device bdev;
85 } ttm;
86
87 struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
88 /*
89 * We decide whether or not user-space supports display hot-plug
90 * depending on whether they react to a hot-plug event after the initial
91 * mode query.
92 */
93 bool initial_mode_queried;
94 struct work_struct hotplug_work;
95 u32 input_mapping_width;
96 u32 input_mapping_height;
97 /*
98 * Is user-space using an X.Org-style layout of one large frame-buffer
99 * encompassing all screen ones or is the fbdev console active?
100 */
101 bool single_framebuffer;
102 u8 cursor_data[CURSOR_DATA_SIZE];
103 };
104
105 #undef CURSOR_PIXEL_COUNT
106 #undef CURSOR_DATA_SIZE
107
108 struct vbox_gem_object;
109
110 struct vbox_connector {
111 struct drm_connector base;
112 char name[32];
113 struct vbox_crtc *vbox_crtc;
114 struct {
115 u32 width;
116 u32 height;
117 bool disconnected;
118 } mode_hint;
119 };
120
121 struct vbox_crtc {
122 struct drm_crtc base;
123 bool disconnected;
124 unsigned int crtc_id;
125 u32 fb_offset;
126 bool cursor_enabled;
127 u32 x_hint;
128 u32 y_hint;
129 /*
130 * When setting a mode we not only pass the mode to the hypervisor,
131 * but also information on how to map / translate input coordinates
132 * for the emulated USB tablet. This input-mapping may change when
133 * the mode on *another* crtc changes.
134 *
135 * This means that sometimes we must do a modeset on other crtc-s then
136 * the one being changed to update the input-mapping. Including crtc-s
137 * which may be disabled inside the guest (shown as a black window
138 * on the host unless closed by the user).
139 *
140 * With atomic modesetting the mode-info of disabled crtcs gets zeroed
141 * yet we need it when updating the input-map to avoid resizing the
142 * window as a side effect of a mode_set on another crtc. Therefor we
143 * cache the info of the last mode below.
144 */
145 u32 width;
146 u32 height;
147 u32 x;
148 u32 y;
149 };
150
151 struct vbox_encoder {
152 struct drm_encoder base;
153 };
154
155 #define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
156 #define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
157 #define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
158 #define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
159
160 bool vbox_check_supported(u16 id);
161 int vbox_hw_init(struct vbox_private *vbox);
162 void vbox_hw_fini(struct vbox_private *vbox);
163
164 int vbox_mode_init(struct vbox_private *vbox);
165 void vbox_mode_fini(struct vbox_private *vbox);
166
167 #define DRM_MODE_FB_CMD drm_mode_fb_cmd2
168
169 void vbox_report_caps(struct vbox_private *vbox);
170
171 void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
172 struct drm_clip_rect *rects,
173 unsigned int num_rects);
174
175 int vbox_framebuffer_init(struct vbox_private *vbox,
176 struct vbox_framebuffer *vbox_fb,
177 const struct DRM_MODE_FB_CMD *mode_cmd,
178 struct drm_gem_object *obj);
179
180 int vboxfb_create(struct drm_fb_helper *helper,
181 struct drm_fb_helper_surface_size *sizes);
182 void vbox_fbdev_fini(struct vbox_private *vbox);
183
184 struct vbox_bo {
185 struct ttm_buffer_object bo;
186 struct ttm_placement placement;
187 struct ttm_bo_kmap_obj kmap;
188 struct drm_gem_object gem;
189 struct ttm_place placements[3];
190 int pin_count;
191 };
192
193 #define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
194
195 static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
196 {
197 return container_of(bo, struct vbox_bo, bo);
198 }
199
200 #define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
201
202 static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
203 {
204 return bo->bo.offset;
205 }
206
207 int vbox_dumb_create(struct drm_file *file,
208 struct drm_device *dev,
209 struct drm_mode_create_dumb *args);
210
211 void vbox_gem_free_object(struct drm_gem_object *obj);
212 int vbox_dumb_mmap_offset(struct drm_file *file,
213 struct drm_device *dev,
214 u32 handle, u64 *offset);
215
216 #define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
217
218 int vbox_mm_init(struct vbox_private *vbox);
219 void vbox_mm_fini(struct vbox_private *vbox);
220
221 int vbox_bo_create(struct vbox_private *vbox, int size, int align,
222 u32 flags, struct vbox_bo **pvboxbo);
223
224 int vbox_gem_create(struct vbox_private *vbox,
225 u32 size, bool iskernel, struct drm_gem_object **obj);
226
227 int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag);
228 int vbox_bo_unpin(struct vbox_bo *bo);
229
230 static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
231 {
232 int ret;
233
234 ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
235 if (ret) {
236 if (ret != -ERESTARTSYS && ret != -EBUSY)
237 DRM_ERROR("reserve failed %p\n", bo);
238 return ret;
239 }
240 return 0;
241 }
242
243 static inline void vbox_bo_unreserve(struct vbox_bo *bo)
244 {
245 ttm_bo_unreserve(&bo->bo);
246 }
247
248 void vbox_ttm_placement(struct vbox_bo *bo, int domain);
249 int vbox_bo_push_sysram(struct vbox_bo *bo);
250 int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
251 void *vbox_bo_kmap(struct vbox_bo *bo);
252 void vbox_bo_kunmap(struct vbox_bo *bo);
253
254 /* vbox_prime.c */
255 int vbox_gem_prime_pin(struct drm_gem_object *obj);
256 void vbox_gem_prime_unpin(struct drm_gem_object *obj);
257 struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
258 struct drm_gem_object *vbox_gem_prime_import_sg_table(
259 struct drm_device *dev, struct dma_buf_attachment *attach,
260 struct sg_table *table);
261 void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
262 void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
263 int vbox_gem_prime_mmap(struct drm_gem_object *obj,
264 struct vm_area_struct *area);
265
266 /* vbox_irq.c */
267 int vbox_irq_init(struct vbox_private *vbox);
268 void vbox_irq_fini(struct vbox_private *vbox);
269 void vbox_report_hotplug(struct vbox_private *vbox);
270 irqreturn_t vbox_irq_handler(int irq, void *arg);
271
272 /* vbox_hgsmi.c */
273 void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
274 u8 channel, u16 channel_info);
275 void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
276 int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
277
278 static inline void vbox_write_ioport(u16 index, u16 data)
279 {
280 outw(index, VBE_DISPI_IOPORT_INDEX);
281 outw(data, VBE_DISPI_IOPORT_DATA);
282 }
283
284 #endif