]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/staging/vboxvideo/vbox_main.c
staging: vboxvideo: Stop disabling/enabling accel support on master set / drop
[thirdparty/linux.git] / drivers / staging / vboxvideo / vbox_main.c
CommitLineData
acc962c5 1// SPDX-License-Identifier: MIT
dd55d44f
HG
2/*
3 * Copyright (C) 2013-2017 Oracle Corporation
4 * This file is based on ast_main.c
5 * Copyright 2012 Red Hat Inc.
dd55d44f
HG
6 * Authors: Dave Airlie <airlied@redhat.com>,
7 * Michael Thayer <michael.thayer@oracle.com,
8 * Hans de Goede <hdegoede@redhat.com>
9 */
685bb884
HG
10
11#include <linux/vbox_err.h>
dd55d44f
HG
12#include <drm/drm_fb_helper.h>
13#include <drm/drm_crtc_helper.h>
14
15#include "vbox_drv.h"
dd55d44f
HG
16#include "vboxvideo_guest.h"
17#include "vboxvideo_vbe.h"
18
19static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
20{
21 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
22
23 if (vbox_fb->obj)
c58ac649 24 drm_gem_object_put_unlocked(vbox_fb->obj);
dd55d44f
HG
25
26 drm_framebuffer_cleanup(fb);
27 kfree(fb);
28}
29
dd55d44f
HG
30void vbox_report_caps(struct vbox_private *vbox)
31{
32 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
33 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
34
35 if (vbox->initial_mode_queried)
36 caps |= VBVACAPS_VIDEO_MODE_HINTS;
37
38 hgsmi_send_caps_info(vbox->guest_pool, caps);
39}
40
40497c52 41/* Send information about dirty rectangles to VBVA. */
dd55d44f
HG
42void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
43 struct drm_clip_rect *rects,
44 unsigned int num_rects)
45{
46 struct vbox_private *vbox = fb->dev->dev_private;
32f2ed7e 47 struct drm_display_mode *mode;
dd55d44f 48 struct drm_crtc *crtc;
32f2ed7e 49 int crtc_x, crtc_y;
dd55d44f
HG
50 unsigned int i;
51
52 mutex_lock(&vbox->hw_mutex);
53 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
32f2ed7e 54 if (crtc->primary->state->fb != fb)
dd55d44f
HG
55 continue;
56
32f2ed7e
HG
57 mode = &crtc->state->mode;
58 crtc_x = crtc->primary->state->src_x >> 16;
59 crtc_y = crtc->primary->state->src_y >> 16;
60
dd55d44f
HG
61 for (i = 0; i < num_rects; ++i) {
62 struct vbva_cmd_hdr cmd_hdr;
63 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
64
dff94010
SLM
65 if (rects[i].x1 > crtc_x + mode->hdisplay ||
66 rects[i].y1 > crtc_y + mode->vdisplay ||
67 rects[i].x2 < crtc_x ||
68 rects[i].y2 < crtc_y)
dd55d44f
HG
69 continue;
70
71 cmd_hdr.x = (s16)rects[i].x1;
72 cmd_hdr.y = (s16)rects[i].y1;
73 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
74 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
75
76 if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
77 vbox->guest_pool))
78 continue;
79
80 vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
81 &cmd_hdr, sizeof(cmd_hdr));
82 vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
83 }
84 }
85 mutex_unlock(&vbox->hw_mutex);
86}
87
88static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
89 struct drm_file *file_priv,
90 unsigned int flags, unsigned int color,
91 struct drm_clip_rect *rects,
92 unsigned int num_rects)
93{
94 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
95
96 return 0;
97}
98
99static const struct drm_framebuffer_funcs vbox_fb_funcs = {
100 .destroy = vbox_user_framebuffer_destroy,
101 .dirty = vbox_user_framebuffer_dirty,
102};
103
01648890 104int vbox_framebuffer_init(struct vbox_private *vbox,
dd55d44f
HG
105 struct vbox_framebuffer *vbox_fb,
106 const struct DRM_MODE_FB_CMD *mode_cmd,
107 struct drm_gem_object *obj)
108{
109 int ret;
110
01648890 111 drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
dd55d44f 112 vbox_fb->obj = obj;
01648890 113 ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
dd55d44f
HG
114 if (ret) {
115 DRM_ERROR("framebuffer init failed %d\n", ret);
116 return ret;
117 }
118
119 return 0;
120}
121
dd55d44f
HG
122static int vbox_accel_init(struct vbox_private *vbox)
123{
40497c52 124 struct vbva_buffer *vbva;
dd55d44f
HG
125 unsigned int i;
126
01648890 127 vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
dd55d44f
HG
128 sizeof(*vbox->vbva_info), GFP_KERNEL);
129 if (!vbox->vbva_info)
130 return -ENOMEM;
131
132 /* Take a command buffer for each screen from the end of usable VRAM. */
133 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
134
01648890 135 vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
dd55d44f
HG
136 vbox->available_vram_size,
137 vbox->num_crtcs *
138 VBVA_MIN_BUFFER_SIZE);
139 if (!vbox->vbva_buffers)
140 return -ENOMEM;
141
40497c52 142 for (i = 0; i < vbox->num_crtcs; ++i) {
dd55d44f
HG
143 vbva_setup_buffer_context(&vbox->vbva_info[i],
144 vbox->available_vram_size +
145 i * VBVA_MIN_BUFFER_SIZE,
146 VBVA_MIN_BUFFER_SIZE);
40497c52
HG
147 vbva = (void __force *)vbox->vbva_buffers +
148 i * VBVA_MIN_BUFFER_SIZE;
149 if (!vbva_enable(&vbox->vbva_info[i],
150 vbox->guest_pool, vbva, i)) {
151 /* very old host or driver error. */
152 DRM_ERROR("vboxvideo: vbva_enable failed\n");
153 }
154 }
dd55d44f
HG
155
156 return 0;
157}
158
159static void vbox_accel_fini(struct vbox_private *vbox)
160{
40497c52
HG
161 unsigned int i;
162
163 for (i = 0; i < vbox->num_crtcs; ++i)
164 vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
165
01648890 166 pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
dd55d44f
HG
167}
168
cd76c287 169/* Do we support the 4.3 plus mode hint reporting interface? */
dd55d44f
HG
170static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
171{
172 u32 have_hints, have_cursor;
173 int ret;
174
175 ret = hgsmi_query_conf(vbox->guest_pool,
176 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
177 &have_hints);
178 if (ret)
179 return false;
180
181 ret = hgsmi_query_conf(vbox->guest_pool,
182 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
183 &have_cursor);
184 if (ret)
185 return false;
186
187 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
188}
189
d4670909 190bool vbox_check_supported(u16 id)
dd55d44f
HG
191{
192 u16 dispi_id;
193
194 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
195 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
196
197 return dispi_id == id;
198}
199
d4670909 200int vbox_hw_init(struct vbox_private *vbox)
dd55d44f
HG
201{
202 int ret = -ENOMEM;
203
204 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
205 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
206
207 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
208
209 /* Map guest-heap at end of vram */
210 vbox->guest_heap =
01648890 211 pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
dd55d44f
HG
212 GUEST_HEAP_SIZE);
213 if (!vbox->guest_heap)
214 return -ENOMEM;
215
216 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
217 vbox->guest_pool = gen_pool_create(4, -1);
218 if (!vbox->guest_pool)
219 goto err_unmap_guest_heap;
220
221 ret = gen_pool_add_virt(vbox->guest_pool,
222 (unsigned long)vbox->guest_heap,
223 GUEST_HEAP_OFFSET(vbox),
224 GUEST_HEAP_USABLE_SIZE, -1);
225 if (ret)
226 goto err_destroy_guest_pool;
227
228 ret = hgsmi_test_query_conf(vbox->guest_pool);
229 if (ret) {
230 DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
231 goto err_destroy_guest_pool;
232 }
233
234 /* Reduce available VRAM size to reflect the guest heap. */
235 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
236 /* Linux drm represents monitors as a 32-bit array. */
237 hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
238 &vbox->num_crtcs);
239 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
240
241 if (!have_hgsmi_mode_hints(vbox)) {
242 ret = -ENOTSUPP;
243 goto err_destroy_guest_pool;
244 }
245
01648890 246 vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
dd55d44f
HG
247 sizeof(struct vbva_modehint),
248 GFP_KERNEL);
249 if (!vbox->last_mode_hints) {
250 ret = -ENOMEM;
251 goto err_destroy_guest_pool;
252 }
253
254 ret = vbox_accel_init(vbox);
255 if (ret)
256 goto err_destroy_guest_pool;
257
258 return 0;
259
260err_destroy_guest_pool:
261 gen_pool_destroy(vbox->guest_pool);
262err_unmap_guest_heap:
01648890 263 pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
dd55d44f
HG
264 return ret;
265}
266
d4670909 267void vbox_hw_fini(struct vbox_private *vbox)
dd55d44f
HG
268{
269 vbox_accel_fini(vbox);
270 gen_pool_destroy(vbox->guest_pool);
01648890 271 pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
dd55d44f
HG
272}
273
01648890 274int vbox_gem_create(struct vbox_private *vbox,
dd55d44f
HG
275 u32 size, bool iskernel, struct drm_gem_object **obj)
276{
277 struct vbox_bo *vboxbo;
278 int ret;
279
280 *obj = NULL;
281
282 size = roundup(size, PAGE_SIZE);
283 if (size == 0)
284 return -EINVAL;
285
01648890 286 ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo);
dd55d44f
HG
287 if (ret) {
288 if (ret != -ERESTARTSYS)
289 DRM_ERROR("failed to allocate GEM object\n");
290 return ret;
291 }
292
293 *obj = &vboxbo->gem;
294
295 return 0;
296}
297
298int vbox_dumb_create(struct drm_file *file,
299 struct drm_device *dev, struct drm_mode_create_dumb *args)
300{
01648890
HG
301 struct vbox_private *vbox =
302 container_of(dev, struct vbox_private, ddev);
dd55d44f
HG
303 struct drm_gem_object *gobj;
304 u32 handle;
01648890 305 int ret;
dd55d44f
HG
306
307 args->pitch = args->width * ((args->bpp + 7) / 8);
308 args->size = args->pitch * args->height;
309
01648890 310 ret = vbox_gem_create(vbox, args->size, false, &gobj);
dd55d44f
HG
311 if (ret)
312 return ret;
313
314 ret = drm_gem_handle_create(file, gobj, &handle);
c58ac649 315 drm_gem_object_put_unlocked(gobj);
dd55d44f
HG
316 if (ret)
317 return ret;
318
319 args->handle = handle;
320
321 return 0;
322}
323
dd55d44f
HG
324void vbox_gem_free_object(struct drm_gem_object *obj)
325{
326 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
327
4f6f4408 328 ttm_bo_put(&vbox_bo->bo);
dd55d44f
HG
329}
330
331static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
332{
333 return drm_vma_node_offset_addr(&bo->bo.vma_node);
334}
335
336int
337vbox_dumb_mmap_offset(struct drm_file *file,
338 struct drm_device *dev,
339 u32 handle, u64 *offset)
340{
341 struct drm_gem_object *obj;
342 int ret;
343 struct vbox_bo *bo;
344
345 mutex_lock(&dev->struct_mutex);
346 obj = drm_gem_object_lookup(file, handle);
347 if (!obj) {
348 ret = -ENOENT;
349 goto out_unlock;
350 }
351
352 bo = gem_to_vbox_bo(obj);
353 *offset = vbox_bo_mmap_offset(bo);
354
c58ac649 355 drm_gem_object_put(obj);
dd55d44f
HG
356 ret = 0;
357
358out_unlock:
359 mutex_unlock(&dev->struct_mutex);
360 return ret;
361}