]>
Commit | Line | Data |
---|---|---|
579db9d4 HG |
1 | /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ |
2 | /* | |
3 | * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp, | |
4 | * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn. | |
5 | * | |
6 | * Copyright (C) 2006-2016 Oracle Corporation | |
7 | */ | |
8 | ||
9 | #include <linux/errno.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/sizes.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/uaccess.h> | |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/vbox_err.h> | |
18 | #include <linux/vbox_utils.h> | |
19 | #include "vboxguest_core.h" | |
20 | ||
21 | /* Get the pointer to the first parameter of a HGCM call request. */ | |
22 | #define VMMDEV_HGCM_CALL_PARMS(a) \ | |
23 | ((struct vmmdev_hgcm_function_parameter *)( \ | |
24 | (u8 *)(a) + sizeof(struct vmmdev_hgcm_call))) | |
25 | ||
26 | /* The max parameter buffer size for a user request. */ | |
27 | #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M) | |
28 | /* The max parameter buffer size for a kernel request. */ | |
29 | #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M) | |
30 | ||
31 | #define VBG_DEBUG_PORT 0x504 | |
32 | ||
33 | /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */ | |
34 | static DEFINE_SPINLOCK(vbg_log_lock); | |
35 | static char vbg_log_buf[128]; | |
36 | ||
37 | #define VBG_LOG(name, pr_func) \ | |
38 | void name(const char *fmt, ...) \ | |
39 | { \ | |
40 | unsigned long flags; \ | |
41 | va_list args; \ | |
42 | int i, count; \ | |
43 | \ | |
44 | va_start(args, fmt); \ | |
45 | spin_lock_irqsave(&vbg_log_lock, flags); \ | |
46 | \ | |
47 | count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\ | |
48 | for (i = 0; i < count; i++) \ | |
49 | outb(vbg_log_buf[i], VBG_DEBUG_PORT); \ | |
50 | \ | |
51 | pr_func("%s", vbg_log_buf); \ | |
52 | \ | |
53 | spin_unlock_irqrestore(&vbg_log_lock, flags); \ | |
54 | va_end(args); \ | |
55 | } \ | |
56 | EXPORT_SYMBOL(name) | |
57 | ||
58 | VBG_LOG(vbg_info, pr_info); | |
59 | VBG_LOG(vbg_warn, pr_warn); | |
60 | VBG_LOG(vbg_err, pr_err); | |
61 | #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) | |
62 | VBG_LOG(vbg_debug, pr_debug); | |
63 | #endif | |
64 | ||
65 | void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) | |
66 | { | |
67 | struct vmmdev_request_header *req; | |
faf6a2a4 | 68 | int order = get_order(PAGE_ALIGN(len)); |
579db9d4 | 69 | |
faf6a2a4 | 70 | req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); |
579db9d4 HG |
71 | if (!req) |
72 | return NULL; | |
73 | ||
74 | memset(req, 0xaa, len); | |
75 | ||
76 | req->size = len; | |
77 | req->version = VMMDEV_REQUEST_HEADER_VERSION; | |
78 | req->request_type = req_type; | |
79 | req->rc = VERR_GENERAL_FAILURE; | |
80 | req->reserved1 = 0; | |
81 | req->reserved2 = 0; | |
82 | ||
83 | return req; | |
84 | } | |
85 | ||
f6f9885b HG |
86 | void vbg_req_free(void *req, size_t len) |
87 | { | |
88 | if (!req) | |
89 | return; | |
90 | ||
faf6a2a4 | 91 | free_pages((unsigned long)req, get_order(PAGE_ALIGN(len))); |
f6f9885b HG |
92 | } |
93 | ||
579db9d4 HG |
94 | /* Note this function returns a VBox status code, not a negative errno!! */ |
95 | int vbg_req_perform(struct vbg_dev *gdev, void *req) | |
96 | { | |
97 | unsigned long phys_req = virt_to_phys(req); | |
98 | ||
99 | outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST); | |
100 | /* | |
101 | * The host changes the request as a result of the outl, make sure | |
102 | * the outl and any reads of the req happen in the correct order. | |
103 | */ | |
104 | mb(); | |
105 | ||
106 | return ((struct vmmdev_request_header *)req)->rc; | |
107 | } | |
108 | ||
109 | static bool hgcm_req_done(struct vbg_dev *gdev, | |
110 | struct vmmdev_hgcmreq_header *header) | |
111 | { | |
112 | unsigned long flags; | |
113 | bool done; | |
114 | ||
115 | spin_lock_irqsave(&gdev->event_spinlock, flags); | |
116 | done = header->flags & VMMDEV_HGCM_REQ_DONE; | |
117 | spin_unlock_irqrestore(&gdev->event_spinlock, flags); | |
118 | ||
119 | return done; | |
120 | } | |
121 | ||
122 | int vbg_hgcm_connect(struct vbg_dev *gdev, | |
123 | struct vmmdev_hgcm_service_location *loc, | |
124 | u32 *client_id, int *vbox_status) | |
125 | { | |
126 | struct vmmdev_hgcm_connect *hgcm_connect = NULL; | |
127 | int rc; | |
128 | ||
129 | hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), | |
130 | VMMDEVREQ_HGCM_CONNECT); | |
131 | if (!hgcm_connect) | |
132 | return -ENOMEM; | |
133 | ||
134 | hgcm_connect->header.flags = 0; | |
135 | memcpy(&hgcm_connect->loc, loc, sizeof(*loc)); | |
136 | hgcm_connect->client_id = 0; | |
137 | ||
138 | rc = vbg_req_perform(gdev, hgcm_connect); | |
139 | ||
140 | if (rc == VINF_HGCM_ASYNC_EXECUTE) | |
141 | wait_event(gdev->hgcm_wq, | |
142 | hgcm_req_done(gdev, &hgcm_connect->header)); | |
143 | ||
144 | if (rc >= 0) { | |
145 | *client_id = hgcm_connect->client_id; | |
146 | rc = hgcm_connect->header.result; | |
147 | } | |
148 | ||
f6f9885b | 149 | vbg_req_free(hgcm_connect, sizeof(*hgcm_connect)); |
579db9d4 HG |
150 | |
151 | *vbox_status = rc; | |
152 | return 0; | |
153 | } | |
154 | EXPORT_SYMBOL(vbg_hgcm_connect); | |
155 | ||
156 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) | |
157 | { | |
158 | struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; | |
159 | int rc; | |
160 | ||
161 | hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), | |
162 | VMMDEVREQ_HGCM_DISCONNECT); | |
163 | if (!hgcm_disconnect) | |
164 | return -ENOMEM; | |
165 | ||
166 | hgcm_disconnect->header.flags = 0; | |
167 | hgcm_disconnect->client_id = client_id; | |
168 | ||
169 | rc = vbg_req_perform(gdev, hgcm_disconnect); | |
170 | ||
171 | if (rc == VINF_HGCM_ASYNC_EXECUTE) | |
172 | wait_event(gdev->hgcm_wq, | |
173 | hgcm_req_done(gdev, &hgcm_disconnect->header)); | |
174 | ||
175 | if (rc >= 0) | |
176 | rc = hgcm_disconnect->header.result; | |
177 | ||
f6f9885b | 178 | vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect)); |
579db9d4 HG |
179 | |
180 | *vbox_status = rc; | |
181 | return 0; | |
182 | } | |
183 | EXPORT_SYMBOL(vbg_hgcm_disconnect); | |
184 | ||
185 | static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len) | |
186 | { | |
187 | u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK)); | |
188 | ||
189 | return size >> PAGE_SHIFT; | |
190 | } | |
191 | ||
192 | static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra) | |
193 | { | |
194 | u32 page_count; | |
195 | ||
196 | page_count = hgcm_call_buf_size_in_pages(buf, len); | |
197 | *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); | |
198 | } | |
199 | ||
200 | static int hgcm_call_preprocess_linaddr( | |
201 | const struct vmmdev_hgcm_function_parameter *src_parm, | |
202 | void **bounce_buf_ret, size_t *extra) | |
203 | { | |
204 | void *buf, *bounce_buf; | |
205 | bool copy_in; | |
206 | u32 len; | |
207 | int ret; | |
208 | ||
209 | buf = (void *)src_parm->u.pointer.u.linear_addr; | |
210 | len = src_parm->u.pointer.size; | |
211 | copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT; | |
212 | ||
213 | if (len > VBG_MAX_HGCM_USER_PARM) | |
214 | return -E2BIG; | |
215 | ||
216 | bounce_buf = kvmalloc(len, GFP_KERNEL); | |
217 | if (!bounce_buf) | |
218 | return -ENOMEM; | |
219 | ||
220 | if (copy_in) { | |
221 | ret = copy_from_user(bounce_buf, (void __user *)buf, len); | |
222 | if (ret) | |
223 | return -EFAULT; | |
224 | } else { | |
225 | memset(bounce_buf, 0, len); | |
226 | } | |
227 | ||
228 | *bounce_buf_ret = bounce_buf; | |
229 | hgcm_call_add_pagelist_size(bounce_buf, len, extra); | |
230 | return 0; | |
231 | } | |
232 | ||
233 | /** | |
234 | * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and | |
235 | * figure out how much extra storage we need for page lists. | |
236 | * Return: 0 or negative errno value. | |
237 | * @src_parm: Pointer to source function call parameters | |
238 | * @parm_count: Number of function call parameters. | |
239 | * @bounce_bufs_ret: Where to return the allocated bouncebuffer array | |
240 | * @extra: Where to return the extra request space needed for | |
241 | * physical page lists. | |
242 | */ | |
243 | static int hgcm_call_preprocess( | |
244 | const struct vmmdev_hgcm_function_parameter *src_parm, | |
245 | u32 parm_count, void ***bounce_bufs_ret, size_t *extra) | |
246 | { | |
247 | void *buf, **bounce_bufs = NULL; | |
248 | u32 i, len; | |
249 | int ret; | |
250 | ||
251 | for (i = 0; i < parm_count; i++, src_parm++) { | |
252 | switch (src_parm->type) { | |
253 | case VMMDEV_HGCM_PARM_TYPE_32BIT: | |
254 | case VMMDEV_HGCM_PARM_TYPE_64BIT: | |
255 | break; | |
256 | ||
257 | case VMMDEV_HGCM_PARM_TYPE_LINADDR: | |
258 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: | |
259 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: | |
260 | if (!bounce_bufs) { | |
261 | bounce_bufs = kcalloc(parm_count, | |
262 | sizeof(void *), | |
263 | GFP_KERNEL); | |
264 | if (!bounce_bufs) | |
265 | return -ENOMEM; | |
266 | ||
267 | *bounce_bufs_ret = bounce_bufs; | |
268 | } | |
269 | ||
270 | ret = hgcm_call_preprocess_linaddr(src_parm, | |
271 | &bounce_bufs[i], | |
272 | extra); | |
273 | if (ret) | |
274 | return ret; | |
275 | ||
276 | break; | |
277 | ||
278 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: | |
279 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: | |
280 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: | |
281 | buf = (void *)src_parm->u.pointer.u.linear_addr; | |
282 | len = src_parm->u.pointer.size; | |
283 | if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM)) | |
284 | return -E2BIG; | |
285 | ||
286 | hgcm_call_add_pagelist_size(buf, len, extra); | |
287 | break; | |
288 | ||
289 | default: | |
290 | return -EINVAL; | |
291 | } | |
292 | } | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
297 | /** | |
298 | * Translates linear address types to page list direction flags. | |
299 | * | |
300 | * Return: page list flags. | |
301 | * @type: The type. | |
302 | */ | |
303 | static u32 hgcm_call_linear_addr_type_to_pagelist_flags( | |
304 | enum vmmdev_hgcm_function_parameter_type type) | |
305 | { | |
306 | switch (type) { | |
307 | default: | |
308 | WARN_ON(1); | |
309 | /* Fall through */ | |
310 | case VMMDEV_HGCM_PARM_TYPE_LINADDR: | |
311 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: | |
312 | return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH; | |
313 | ||
314 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: | |
315 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: | |
316 | return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST; | |
317 | ||
318 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: | |
319 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: | |
320 | return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST; | |
321 | } | |
322 | } | |
323 | ||
324 | static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call, | |
325 | struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len, | |
326 | enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra) | |
327 | { | |
328 | struct vmmdev_hgcm_pagelist *dst_pg_lst; | |
329 | struct page *page; | |
330 | bool is_vmalloc; | |
331 | u32 i, page_count; | |
332 | ||
333 | dst_parm->type = type; | |
334 | ||
335 | if (len == 0) { | |
336 | dst_parm->u.pointer.size = 0; | |
337 | dst_parm->u.pointer.u.linear_addr = 0; | |
338 | return; | |
339 | } | |
340 | ||
341 | dst_pg_lst = (void *)call + *off_extra; | |
342 | page_count = hgcm_call_buf_size_in_pages(buf, len); | |
343 | is_vmalloc = is_vmalloc_addr(buf); | |
344 | ||
345 | dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST; | |
346 | dst_parm->u.page_list.size = len; | |
347 | dst_parm->u.page_list.offset = *off_extra; | |
348 | dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type); | |
349 | dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK; | |
350 | dst_pg_lst->page_count = page_count; | |
351 | ||
352 | for (i = 0; i < page_count; i++) { | |
353 | if (is_vmalloc) | |
354 | page = vmalloc_to_page(buf); | |
355 | else | |
356 | page = virt_to_page(buf); | |
357 | ||
358 | dst_pg_lst->pages[i] = page_to_phys(page); | |
359 | buf += PAGE_SIZE; | |
360 | } | |
361 | ||
362 | *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]); | |
363 | } | |
364 | ||
365 | /** | |
366 | * Initializes the call request that we're sending to the host. | |
367 | * @call: The call to initialize. | |
368 | * @client_id: The client ID of the caller. | |
369 | * @function: The function number of the function to call. | |
370 | * @src_parm: Pointer to source function call parameters. | |
371 | * @parm_count: Number of function call parameters. | |
372 | * @bounce_bufs: The bouncebuffer array. | |
373 | */ | |
374 | static void hgcm_call_init_call( | |
375 | struct vmmdev_hgcm_call *call, u32 client_id, u32 function, | |
376 | const struct vmmdev_hgcm_function_parameter *src_parm, | |
377 | u32 parm_count, void **bounce_bufs) | |
378 | { | |
379 | struct vmmdev_hgcm_function_parameter *dst_parm = | |
380 | VMMDEV_HGCM_CALL_PARMS(call); | |
381 | u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call; | |
382 | void *buf; | |
383 | ||
384 | call->header.flags = 0; | |
385 | call->header.result = VINF_SUCCESS; | |
386 | call->client_id = client_id; | |
387 | call->function = function; | |
388 | call->parm_count = parm_count; | |
389 | ||
390 | for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { | |
391 | switch (src_parm->type) { | |
392 | case VMMDEV_HGCM_PARM_TYPE_32BIT: | |
393 | case VMMDEV_HGCM_PARM_TYPE_64BIT: | |
394 | *dst_parm = *src_parm; | |
395 | break; | |
396 | ||
397 | case VMMDEV_HGCM_PARM_TYPE_LINADDR: | |
398 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: | |
399 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: | |
400 | hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i], | |
401 | src_parm->u.pointer.size, | |
402 | src_parm->type, &off_extra); | |
403 | break; | |
404 | ||
405 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: | |
406 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: | |
407 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: | |
408 | buf = (void *)src_parm->u.pointer.u.linear_addr; | |
409 | hgcm_call_init_linaddr(call, dst_parm, buf, | |
410 | src_parm->u.pointer.size, | |
411 | src_parm->type, &off_extra); | |
412 | break; | |
413 | ||
414 | default: | |
415 | WARN_ON(1); | |
416 | dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID; | |
417 | } | |
418 | } | |
419 | } | |
420 | ||
421 | /** | |
422 | * Tries to cancel a pending HGCM call. | |
423 | * | |
424 | * Return: VBox status code | |
425 | */ | |
426 | static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call) | |
427 | { | |
428 | int rc; | |
429 | ||
430 | /* | |
431 | * We use a pre-allocated request for cancellations, which is | |
432 | * protected by cancel_req_mutex. This means that all cancellations | |
433 | * get serialized, this should be fine since they should be rare. | |
434 | */ | |
435 | mutex_lock(&gdev->cancel_req_mutex); | |
436 | gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call); | |
437 | rc = vbg_req_perform(gdev, gdev->cancel_req); | |
438 | mutex_unlock(&gdev->cancel_req_mutex); | |
439 | ||
440 | if (rc == VERR_NOT_IMPLEMENTED) { | |
441 | call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; | |
442 | call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL; | |
443 | ||
444 | rc = vbg_req_perform(gdev, call); | |
445 | if (rc == VERR_INVALID_PARAMETER) | |
446 | rc = VERR_NOT_FOUND; | |
447 | } | |
448 | ||
449 | if (rc >= 0) | |
450 | call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED; | |
451 | ||
452 | return rc; | |
453 | } | |
454 | ||
455 | /** | |
456 | * Performs the call and completion wait. | |
457 | * Return: 0 or negative errno value. | |
458 | * @gdev: The VBoxGuest device extension. | |
459 | * @call: The call to execute. | |
460 | * @timeout_ms: Timeout in ms. | |
461 | * @leak_it: Where to return the leak it / free it, indicator. | |
462 | * Cancellation fun. | |
463 | */ | |
464 | static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, | |
465 | u32 timeout_ms, bool *leak_it) | |
466 | { | |
467 | int rc, cancel_rc, ret; | |
468 | long timeout; | |
469 | ||
470 | *leak_it = false; | |
471 | ||
472 | rc = vbg_req_perform(gdev, call); | |
473 | ||
474 | /* | |
475 | * If the call failed, then pretend success. Upper layers will | |
476 | * interpret the result code in the packet. | |
477 | */ | |
478 | if (rc < 0) { | |
479 | call->header.result = rc; | |
480 | return 0; | |
481 | } | |
482 | ||
483 | if (rc != VINF_HGCM_ASYNC_EXECUTE) | |
484 | return 0; | |
485 | ||
486 | /* Host decided to process the request asynchronously, wait for it */ | |
487 | if (timeout_ms == U32_MAX) | |
488 | timeout = MAX_SCHEDULE_TIMEOUT; | |
489 | else | |
490 | timeout = msecs_to_jiffies(timeout_ms); | |
491 | ||
492 | timeout = wait_event_interruptible_timeout( | |
493 | gdev->hgcm_wq, | |
494 | hgcm_req_done(gdev, &call->header), | |
495 | timeout); | |
496 | ||
497 | /* timeout > 0 means hgcm_req_done has returned true, so success */ | |
498 | if (timeout > 0) | |
499 | return 0; | |
500 | ||
501 | if (timeout == 0) | |
502 | ret = -ETIMEDOUT; | |
503 | else | |
504 | ret = -EINTR; | |
505 | ||
506 | /* Cancel the request */ | |
507 | cancel_rc = hgcm_cancel_call(gdev, call); | |
508 | if (cancel_rc >= 0) | |
509 | return ret; | |
510 | ||
511 | /* | |
512 | * Failed to cancel, this should mean that the cancel has lost the | |
513 | * race with normal completion, wait while the host completes it. | |
514 | */ | |
515 | if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED) | |
516 | timeout = msecs_to_jiffies(500); | |
517 | else | |
518 | timeout = msecs_to_jiffies(2000); | |
519 | ||
520 | timeout = wait_event_timeout(gdev->hgcm_wq, | |
521 | hgcm_req_done(gdev, &call->header), | |
522 | timeout); | |
523 | ||
524 | if (WARN_ON(timeout == 0)) { | |
525 | /* We really should never get here */ | |
526 | vbg_err("%s: Call timedout and cancellation failed, leaking the request\n", | |
527 | __func__); | |
528 | *leak_it = true; | |
529 | return ret; | |
530 | } | |
531 | ||
532 | /* The call has completed normally after all */ | |
533 | return 0; | |
534 | } | |
535 | ||
536 | /** | |
537 | * Copies the result of the call back to the caller info structure and user | |
538 | * buffers. | |
539 | * Return: 0 or negative errno value. | |
540 | * @call: HGCM call request. | |
541 | * @dst_parm: Pointer to function call parameters destination. | |
542 | * @parm_count: Number of function call parameters. | |
543 | * @bounce_bufs: The bouncebuffer array. | |
544 | */ | |
545 | static int hgcm_call_copy_back_result( | |
546 | const struct vmmdev_hgcm_call *call, | |
547 | struct vmmdev_hgcm_function_parameter *dst_parm, | |
548 | u32 parm_count, void **bounce_bufs) | |
549 | { | |
550 | const struct vmmdev_hgcm_function_parameter *src_parm = | |
551 | VMMDEV_HGCM_CALL_PARMS(call); | |
552 | void __user *p; | |
553 | int ret; | |
554 | u32 i; | |
555 | ||
556 | /* Copy back parameters. */ | |
557 | for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { | |
558 | switch (dst_parm->type) { | |
559 | case VMMDEV_HGCM_PARM_TYPE_32BIT: | |
560 | case VMMDEV_HGCM_PARM_TYPE_64BIT: | |
561 | *dst_parm = *src_parm; | |
562 | break; | |
563 | ||
564 | case VMMDEV_HGCM_PARM_TYPE_PAGELIST: | |
565 | dst_parm->u.page_list.size = src_parm->u.page_list.size; | |
566 | break; | |
567 | ||
568 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: | |
569 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: | |
570 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: | |
571 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT: | |
572 | dst_parm->u.pointer.size = src_parm->u.pointer.size; | |
573 | break; | |
574 | ||
575 | case VMMDEV_HGCM_PARM_TYPE_LINADDR: | |
576 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: | |
577 | dst_parm->u.pointer.size = src_parm->u.pointer.size; | |
578 | ||
579 | p = (void __user *)dst_parm->u.pointer.u.linear_addr; | |
580 | ret = copy_to_user(p, bounce_bufs[i], | |
581 | min(src_parm->u.pointer.size, | |
582 | dst_parm->u.pointer.size)); | |
583 | if (ret) | |
584 | return -EFAULT; | |
585 | break; | |
586 | ||
587 | default: | |
588 | WARN_ON(1); | |
589 | return -EINVAL; | |
590 | } | |
591 | } | |
592 | ||
593 | return 0; | |
594 | } | |
595 | ||
596 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, | |
597 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, | |
598 | u32 parm_count, int *vbox_status) | |
599 | { | |
600 | struct vmmdev_hgcm_call *call; | |
601 | void **bounce_bufs = NULL; | |
602 | bool leak_it; | |
603 | size_t size; | |
604 | int i, ret; | |
605 | ||
606 | size = sizeof(struct vmmdev_hgcm_call) + | |
607 | parm_count * sizeof(struct vmmdev_hgcm_function_parameter); | |
608 | /* | |
609 | * Validate and buffer the parameters for the call. This also increases | |
610 | * call_size with the amount of extra space needed for page lists. | |
611 | */ | |
612 | ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size); | |
613 | if (ret) { | |
614 | /* Even on error bounce bufs may still have been allocated */ | |
615 | goto free_bounce_bufs; | |
616 | } | |
617 | ||
618 | call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); | |
619 | if (!call) { | |
620 | ret = -ENOMEM; | |
621 | goto free_bounce_bufs; | |
622 | } | |
623 | ||
624 | hgcm_call_init_call(call, client_id, function, parms, parm_count, | |
625 | bounce_bufs); | |
626 | ||
627 | ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it); | |
628 | if (ret == 0) { | |
629 | *vbox_status = call->header.result; | |
630 | ret = hgcm_call_copy_back_result(call, parms, parm_count, | |
631 | bounce_bufs); | |
632 | } | |
633 | ||
634 | if (!leak_it) | |
f6f9885b | 635 | vbg_req_free(call, size); |
579db9d4 HG |
636 | |
637 | free_bounce_bufs: | |
638 | if (bounce_bufs) { | |
639 | for (i = 0; i < parm_count; i++) | |
640 | kvfree(bounce_bufs[i]); | |
641 | kfree(bounce_bufs); | |
642 | } | |
643 | ||
644 | return ret; | |
645 | } | |
646 | EXPORT_SYMBOL(vbg_hgcm_call); | |
647 | ||
648 | #ifdef CONFIG_COMPAT | |
649 | int vbg_hgcm_call32( | |
650 | struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, | |
651 | struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, | |
652 | int *vbox_status) | |
653 | { | |
654 | struct vmmdev_hgcm_function_parameter *parm64 = NULL; | |
655 | u32 i, size; | |
656 | int ret = 0; | |
657 | ||
658 | /* KISS allocate a temporary request and convert the parameters. */ | |
659 | size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter); | |
660 | parm64 = kzalloc(size, GFP_KERNEL); | |
661 | if (!parm64) | |
662 | return -ENOMEM; | |
663 | ||
664 | for (i = 0; i < parm_count; i++) { | |
665 | switch (parm32[i].type) { | |
666 | case VMMDEV_HGCM_PARM_TYPE_32BIT: | |
667 | parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT; | |
668 | parm64[i].u.value32 = parm32[i].u.value32; | |
669 | break; | |
670 | ||
671 | case VMMDEV_HGCM_PARM_TYPE_64BIT: | |
672 | parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT; | |
673 | parm64[i].u.value64 = parm32[i].u.value64; | |
674 | break; | |
675 | ||
676 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: | |
677 | case VMMDEV_HGCM_PARM_TYPE_LINADDR: | |
678 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: | |
679 | parm64[i].type = parm32[i].type; | |
680 | parm64[i].u.pointer.size = parm32[i].u.pointer.size; | |
681 | parm64[i].u.pointer.u.linear_addr = | |
682 | parm32[i].u.pointer.u.linear_addr; | |
683 | break; | |
684 | ||
685 | default: | |
686 | ret = -EINVAL; | |
687 | } | |
688 | if (ret < 0) | |
689 | goto out_free; | |
690 | } | |
691 | ||
692 | ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, | |
693 | parm64, parm_count, vbox_status); | |
694 | if (ret < 0) | |
695 | goto out_free; | |
696 | ||
697 | /* Copy back. */ | |
698 | for (i = 0; i < parm_count; i++, parm32++, parm64++) { | |
699 | switch (parm64[i].type) { | |
700 | case VMMDEV_HGCM_PARM_TYPE_32BIT: | |
701 | parm32[i].u.value32 = parm64[i].u.value32; | |
702 | break; | |
703 | ||
704 | case VMMDEV_HGCM_PARM_TYPE_64BIT: | |
705 | parm32[i].u.value64 = parm64[i].u.value64; | |
706 | break; | |
707 | ||
708 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: | |
709 | case VMMDEV_HGCM_PARM_TYPE_LINADDR: | |
710 | case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: | |
711 | parm32[i].u.pointer.size = parm64[i].u.pointer.size; | |
712 | break; | |
713 | ||
714 | default: | |
715 | WARN_ON(1); | |
716 | ret = -EINVAL; | |
717 | } | |
718 | } | |
719 | ||
720 | out_free: | |
721 | kfree(parm64); | |
722 | return ret; | |
723 | } | |
724 | #endif | |
725 | ||
726 | static const int vbg_status_code_to_errno_table[] = { | |
727 | [-VERR_ACCESS_DENIED] = -EPERM, | |
728 | [-VERR_FILE_NOT_FOUND] = -ENOENT, | |
729 | [-VERR_PROCESS_NOT_FOUND] = -ESRCH, | |
730 | [-VERR_INTERRUPTED] = -EINTR, | |
731 | [-VERR_DEV_IO_ERROR] = -EIO, | |
732 | [-VERR_TOO_MUCH_DATA] = -E2BIG, | |
733 | [-VERR_BAD_EXE_FORMAT] = -ENOEXEC, | |
734 | [-VERR_INVALID_HANDLE] = -EBADF, | |
735 | [-VERR_TRY_AGAIN] = -EAGAIN, | |
736 | [-VERR_NO_MEMORY] = -ENOMEM, | |
737 | [-VERR_INVALID_POINTER] = -EFAULT, | |
738 | [-VERR_RESOURCE_BUSY] = -EBUSY, | |
739 | [-VERR_ALREADY_EXISTS] = -EEXIST, | |
740 | [-VERR_NOT_SAME_DEVICE] = -EXDEV, | |
741 | [-VERR_NOT_A_DIRECTORY] = -ENOTDIR, | |
742 | [-VERR_PATH_NOT_FOUND] = -ENOTDIR, | |
f72c3500 | 743 | [-VERR_INVALID_NAME] = -ENOENT, |
579db9d4 HG |
744 | [-VERR_IS_A_DIRECTORY] = -EISDIR, |
745 | [-VERR_INVALID_PARAMETER] = -EINVAL, | |
746 | [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE, | |
747 | [-VERR_INVALID_FUNCTION] = -ENOTTY, | |
748 | [-VERR_SHARING_VIOLATION] = -ETXTBSY, | |
749 | [-VERR_FILE_TOO_BIG] = -EFBIG, | |
750 | [-VERR_DISK_FULL] = -ENOSPC, | |
751 | [-VERR_SEEK_ON_DEVICE] = -ESPIPE, | |
752 | [-VERR_WRITE_PROTECT] = -EROFS, | |
753 | [-VERR_BROKEN_PIPE] = -EPIPE, | |
754 | [-VERR_DEADLOCK] = -EDEADLK, | |
755 | [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG, | |
756 | [-VERR_FILE_LOCK_FAILED] = -ENOLCK, | |
757 | [-VERR_NOT_IMPLEMENTED] = -ENOSYS, | |
758 | [-VERR_NOT_SUPPORTED] = -ENOSYS, | |
759 | [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY, | |
760 | [-VERR_TOO_MANY_SYMLINKS] = -ELOOP, | |
f72c3500 | 761 | [-VERR_NO_MORE_FILES] = -ENODATA, |
579db9d4 HG |
762 | [-VERR_NO_DATA] = -ENODATA, |
763 | [-VERR_NET_NO_NETWORK] = -ENONET, | |
764 | [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ, | |
765 | [-VERR_NO_TRANSLATION] = -EILSEQ, | |
766 | [-VERR_NET_NOT_SOCKET] = -ENOTSOCK, | |
767 | [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ, | |
768 | [-VERR_NET_MSG_SIZE] = -EMSGSIZE, | |
769 | [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE, | |
770 | [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT, | |
771 | [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT, | |
772 | [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT, | |
773 | [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP, | |
774 | [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT, | |
775 | [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT, | |
776 | [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE, | |
777 | [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL, | |
778 | [-VERR_NET_DOWN] = -ENETDOWN, | |
779 | [-VERR_NET_UNREACHABLE] = -ENETUNREACH, | |
780 | [-VERR_NET_CONNECTION_RESET] = -ENETRESET, | |
781 | [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED, | |
782 | [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET, | |
783 | [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS, | |
784 | [-VERR_NET_ALREADY_CONNECTED] = -EISCONN, | |
785 | [-VERR_NET_NOT_CONNECTED] = -ENOTCONN, | |
786 | [-VERR_NET_SHUTDOWN] = -ESHUTDOWN, | |
787 | [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS, | |
788 | [-VERR_TIMEOUT] = -ETIMEDOUT, | |
789 | [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED, | |
790 | [-VERR_NET_HOST_DOWN] = -EHOSTDOWN, | |
791 | [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH, | |
792 | [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY, | |
793 | [-VERR_NET_IN_PROGRESS] = -EINPROGRESS, | |
794 | [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM, | |
795 | [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE, | |
796 | }; | |
797 | ||
798 | int vbg_status_code_to_errno(int rc) | |
799 | { | |
800 | if (rc >= 0) | |
801 | return 0; | |
802 | ||
803 | rc = -rc; | |
804 | if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) || | |
805 | vbg_status_code_to_errno_table[rc] == 0) { | |
806 | vbg_warn("%s: Unhandled err %d\n", __func__, -rc); | |
807 | return -EPROTO; | |
808 | } | |
809 | ||
810 | return vbg_status_code_to_errno_table[rc]; | |
811 | } | |
812 | EXPORT_SYMBOL(vbg_status_code_to_errno); |