/* Get the pointer to the first parameter of a HGCM call request. */ #define VMMDEV_HGCM_CALL_PARMS(a) \
((struct vmmdev_hgcm_function_parameter *)( \
(u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
/* The max parameter buffer size for a user request. */ #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M) /* The max parameter buffer size for a kernel request. */ #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
#define VBG_DEBUG_PORT 0x504
/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */ static DEFINE_SPINLOCK(vbg_log_lock); staticchar vbg_log_buf[128];
/* Note this function returns a VBox status code, not a negative errno!! */ int vbg_req_perform(struct vbg_dev *gdev, void *req)
{ unsignedlong phys_req = virt_to_phys(req);
outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST); /* * The host changes the request as a result of the outl, make sure * the outl and any reads of the req happen in the correct order.
*/
mb();
/** * hgcm_call_preprocess - Preprocesses the HGCM call, validate parameters, * alloc bounce buffers and figure out how much extra storage we need for * page lists. * @src_parm: Pointer to source function call parameters * @parm_count: Number of function call parameters. * @bounce_bufs_ret: Where to return the allocated bouncebuffer array * @extra: Where to return the extra request space needed for * physical page lists. * * Return: %0 or negative errno value.
*/ staticint hgcm_call_preprocess( conststruct vmmdev_hgcm_function_parameter *src_parm,
u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
{ void *buf, **bounce_bufs = NULL;
u32 i, len; int ret;
for (i = 0; i < parm_count; i++, src_parm++) { switch (src_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT: break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: if (!bounce_bufs) {
bounce_bufs = kcalloc(parm_count, sizeof(void *),
GFP_KERNEL); if (!bounce_bufs) return -ENOMEM;
*bounce_bufs_ret = bounce_bufs;
}
ret = hgcm_call_preprocess_linaddr(src_parm,
&bounce_bufs[i],
extra); if (ret) return ret;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
buf = (void *)src_parm->u.pointer.u.linear_addr;
len = src_parm->u.pointer.size; if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM)) return -E2BIG;
/** * hgcm_call_init_call - Initializes the call request that we're sending * to the host. * @call: The call to initialize. * @client_id: The client ID of the caller. * @function: The function number of the function to call. * @src_parm: Pointer to source function call parameters. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array.
*/ staticvoid hgcm_call_init_call( struct vmmdev_hgcm_call *call, u32 client_id, u32 function, conststruct vmmdev_hgcm_function_parameter *src_parm,
u32 parm_count, void **bounce_bufs)
{ struct vmmdev_hgcm_function_parameter *dst_parm =
VMMDEV_HGCM_CALL_PARMS(call);
u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call; void *buf;
for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { switch (src_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT:
*dst_parm = *src_parm; break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
src_parm->u.pointer.size,
src_parm->type, &off_extra); break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
buf = (void *)src_parm->u.pointer.u.linear_addr;
hgcm_call_init_linaddr(call, dst_parm, buf,
src_parm->u.pointer.size,
src_parm->type, &off_extra); break;
/** * hgcm_cancel_call - Tries to cancel a pending HGCM call. * @gdev: The VBoxGuest device extension. * @call: The call to cancel. * * Return: VBox status code
*/ staticint hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
{ int rc;
/* * We use a pre-allocated request for cancellations, which is * protected by cancel_req_mutex. This means that all cancellations * get serialized, this should be fine since they should be rare.
*/
mutex_lock(&gdev->cancel_req_mutex);
gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
rc = vbg_req_perform(gdev, gdev->cancel_req);
mutex_unlock(&gdev->cancel_req_mutex);
if (rc >= 0)
call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
return rc;
}
/** * vbg_hgcm_do_call - Performs the call and completion wait. * @gdev: The VBoxGuest device extension. * @call: The call to execute. * @timeout_ms: Timeout in ms. * @interruptible: whether this call is interruptible * @leak_it: Where to return the leak it / free it, indicator. * Cancellation fun. * * Return: %0 or negative errno value.
*/ staticint vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
u32 timeout_ms, bool interruptible, bool *leak_it)
{ int rc, cancel_rc, ret; long timeout;
*leak_it = false;
rc = vbg_req_perform(gdev, call);
/* * If the call failed, then pretend success. Upper layers will * interpret the result code in the packet.
*/ if (rc < 0) {
call->header.result = rc; return 0;
}
if (rc != VINF_HGCM_ASYNC_EXECUTE) return 0;
/* Host decided to process the request asynchronously, wait for it */ if (timeout_ms == U32_MAX)
timeout = MAX_SCHEDULE_TIMEOUT; else
timeout = msecs_to_jiffies(timeout_ms);
/* timeout > 0 means hgcm_req_done has returned true, so success */ if (timeout > 0) return 0;
if (timeout == 0)
ret = -ETIMEDOUT; else
ret = -EINTR;
/* Cancel the request */
cancel_rc = hgcm_cancel_call(gdev, call); if (cancel_rc >= 0) return ret;
/* * Failed to cancel, this should mean that the cancel has lost the * race with normal completion, wait while the host completes it.
*/ if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
timeout = msecs_to_jiffies(500); else
timeout = msecs_to_jiffies(2000);
if (WARN_ON(timeout == 0)) { /* We really should never get here */
vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
__func__);
*leak_it = true; return ret;
}
/* The call has completed normally after all */ return 0;
}
/** * hgcm_call_copy_back_result - Copies the result of the call back to * the caller info structure and user buffers. * @call: HGCM call request. * @dst_parm: Pointer to function call parameters destination. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array. * * Return: %0 or negative errno value.
*/ staticint hgcm_call_copy_back_result( conststruct vmmdev_hgcm_call *call, struct vmmdev_hgcm_function_parameter *dst_parm,
u32 parm_count, void **bounce_bufs)
{ conststruct vmmdev_hgcm_function_parameter *src_parm =
VMMDEV_HGCM_CALL_PARMS(call); void __user *p; int ret;
u32 i;
/* Copy back parameters. */ for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) { switch (dst_parm->type) { case VMMDEV_HGCM_PARM_TYPE_32BIT: case VMMDEV_HGCM_PARM_TYPE_64BIT:
*dst_parm = *src_parm; break;
case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
dst_parm->u.page_list.size = src_parm->u.page_list.size; break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN: case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
dst_parm->u.pointer.size = src_parm->u.pointer.size; break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
dst_parm->u.pointer.size = src_parm->u.pointer.size;
p = (void __user *)dst_parm->u.pointer.u.linear_addr;
ret = copy_to_user(p, bounce_bufs[i],
min(src_parm->u.pointer.size,
dst_parm->u.pointer.size)); if (ret) return -EFAULT; break;
default:
WARN_ON(1); return -EINVAL;
}
}
return 0;
}
int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
u32 function, u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, int *vbox_status)
{ struct vmmdev_hgcm_call *call; void **bounce_bufs = NULL; bool leak_it;
size_t size; int i, ret;
size = sizeof(struct vmmdev_hgcm_call) +
parm_count * sizeof(struct vmmdev_hgcm_function_parameter); /* * Validate and buffer the parameters for the call. This also increases * call_size with the amount of extra space needed for page lists.
*/
ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size); if (ret) { /* Even on error bounce bufs may still have been allocated */ goto free_bounce_bufs;
}
call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor); if (!call) {
ret = -ENOMEM; goto free_bounce_bufs;
}
ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it); if (ret == 0) {
*vbox_status = call->header.result;
ret = hgcm_call_copy_back_result(call, parms, parm_count,
bounce_bufs);
}
if (!leak_it)
vbg_req_free(call, size);
free_bounce_bufs: if (bounce_bufs) { for (i = 0; i < parm_count; i++)
kvfree(bounce_bufs[i]);
kfree(bounce_bufs);
}
return ret;
}
EXPORT_SYMBOL(vbg_hgcm_call);
#ifdef CONFIG_COMPAT int vbg_hgcm_call32( struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
u32 parm_count, int *vbox_status)
{ struct vmmdev_hgcm_function_parameter *parm64 = NULL;
u32 i, size; int ret = 0;
/* KISS allocate a temporary request and convert the parameters. */
size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
parm64 = kzalloc(size, GFP_KERNEL); if (!parm64) return -ENOMEM;
for (i = 0; i < parm_count; i++) { switch (parm32[i].type) { case VMMDEV_HGCM_PARM_TYPE_32BIT:
parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parm64[i].u.value32 = parm32[i].u.value32; break;
case VMMDEV_HGCM_PARM_TYPE_64BIT:
parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parm64[i].u.value64 = parm32[i].u.value64; break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
parm64[i].type = parm32[i].type;
parm64[i].u.pointer.size = parm32[i].u.pointer.size;
parm64[i].u.pointer.u.linear_addr =
parm32[i].u.pointer.u.linear_addr; break;
default:
ret = -EINVAL;
} if (ret < 0) goto out_free;
}
ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
parm64, parm_count, vbox_status); if (ret < 0) goto out_free;
/* Copy back. */ for (i = 0; i < parm_count; i++, parm32++, parm64++) { switch (parm64[i].type) { case VMMDEV_HGCM_PARM_TYPE_32BIT:
parm32[i].u.value32 = parm64[i].u.value32; break;
case VMMDEV_HGCM_PARM_TYPE_64BIT:
parm32[i].u.value64 = parm64[i].u.value64; break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT: case VMMDEV_HGCM_PARM_TYPE_LINADDR: case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
parm32[i].u.pointer.size = parm64[i].u.pointer.size; break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.