/* * Determines whether the VMCI host personality is * available. Since the core functionality of the host driver is * always present, all guests could possibly use the host * personality. However, to minimize the deviation from the * pre-unified driver state of affairs, we only consider the host * device active if there is no active guest device or if there * are VMX'en with active VMCI contexts using the host device.
*/ bool vmci_host_code_active(void)
{ return vmci_host_device_initialized &&
(!vmci_guest_code_active() ||
atomic_read(&vmci_host_active_users) > 0);
}
int vmci_host_users(void)
{ return atomic_read(&vmci_host_active_users);
}
/* * Called on open of /dev/vmci.
*/ staticint vmci_host_open(struct inode *inode, struct file *filp)
{ struct vmci_host_dev *vmci_host_dev;
/* * Called on close of /dev/vmci, most often when the process * exits.
*/ staticint vmci_host_close(struct inode *inode, struct file *filp)
{ struct vmci_host_dev *vmci_host_dev = filp->private_data;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
vmci_ctx_destroy(vmci_host_dev->context);
vmci_host_dev->context = NULL;
/* * The number of active contexts is used to track whether any * VMX'en are using the host personality. It is incremented when * a context is created through the IOCTL_VMCI_INIT_CONTEXT * ioctl.
*/
atomic_dec(&vmci_host_active_users);
}
vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
/* * This is used to wake up the VMX when a VMCI call arrives, or * to wake up select() or poll() at the next clock tick.
*/ static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{ struct vmci_host_dev *vmci_host_dev = filp->private_data; struct vmci_ctx *context;
__poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { /* * Read context only if ct_type == VMCIOBJ_CONTEXT to make * sure that context is initialized
*/
context = vmci_host_dev->context;
/* Check for VMCI calls to this VM context. */ if (wait)
poll_wait(filp, &context->host_context.wait_queue,
wait);
/* * Copies the handles of a handle array into a user buffer, and * returns the new length in userBufferSize. If the copy to the * user buffer fails, the functions still returns VMCI_SUCCESS, * but retval != 0.
*/ staticint drv_cp_harray_to_user(void __user *user_buf_uva,
u64 *user_buf_size, struct vmci_handle_arr *handle_array, int *retval)
{
u32 array_size = 0; struct vmci_handle *handles;
if (handle_array)
array_size = vmci_handle_arr_get_size(handle_array);
if (array_size * sizeof(*handles) > *user_buf_size) return VMCI_ERROR_MORE_DATA;
/* * Sets up a given context for notify to work. Maps the notify * boolean in user VA into kernel space.
*/ staticint vmci_host_setup_notify(struct vmci_ctx *context, unsignedlong uva)
{ struct page *page; int retval;
if (context->notify_page) {
pr_devel("%s: Notify mechanism is already set up\n", __func__); return VMCI_ERROR_DUPLICATE_ENTRY;
}
/* * We are using 'bool' internally, but let's make sure we explicit * about the size.
*/
BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
/* * Lock physical page backing a given user VA.
*/
retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page); if (retval != 1) return VMCI_ERROR_GENERIC;
context->notify_page = page;
/* * Map the locked page and set up notify pointer.
*/
context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
vmci_ctx_check_signal_notify(context);
return VMCI_SUCCESS;
}
staticint vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, unsignedint cmd, void __user *uptr)
{ if (cmd == IOCTL_VMCI_VERSION2) { int __user *vptr = uptr; if (get_user(vmci_host_dev->user_version, vptr)) return -EFAULT;
}
/* * The basic logic here is: * * If the user sends in a version of 0 tell it our version. * If the user didn't send in a version, tell it our version. * If the user sent in an old version, tell it -its- version. * If the user sent in an newer version, tell it our version. * * The rationale behind telling the caller its version is that * Workstation 6.5 required that VMX and VMCI kernel module were * version sync'd. All new VMX users will be programmed to * handle the VMCI kernel module version.
*/
/* * Copy cid to userlevel, we do this to allow the VMX * to enforce its policy on cid generation.
*/
init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
vmci_ctx_destroy(vmci_host_dev->context);
vmci_host_dev->context = NULL;
vmci_ioctl_err("error writing init block\n");
retval = -EFAULT; goto out;
}
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n"); return -EINVAL;
}
if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
vmci_ioctl_err("is not allowed\n"); return -EINVAL;
}
if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) return -EFAULT;
if (set_va_info.va) { /* * VMX is passing down a new VA for the queue * pair mapping.
*/
result = vmci_qp_broker_map(set_va_info.handle,
vmci_host_dev->context,
set_va_info.va);
} else { /* * The queue pair is about to be unmapped by * the VMX.
*/
result = vmci_qp_broker_unmap(set_va_info.handle,
vmci_host_dev->context, 0);
}
if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
vmci_ioctl_err("not supported on this VMX (version=%d)\n",
vmci_host_dev->user_version); return -EINVAL;
}
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n"); return -EINVAL;
}
if (copy_from_user(&page_file_info, uptr, sizeof(*info))) return -EFAULT;
/* * Communicate success pre-emptively to the caller. Note that the * basic premise is that it is incumbent upon the caller not to look at * the info.result field until after the ioctl() returns. And then, * only if the ioctl() result indicates no error. We send up the * SUCCESS status before calling SetPageStore() store because failing * to copy up the result code means unwinding the SetPageStore(). * * It turns out the logic to unwind a SetPageStore() opens a can of * worms. For example, if a host had created the queue_pair and a * guest attaches and SetPageStore() is successful but writing success * fails, then ... the host has to be stopped from writing (anymore) * data into the queue_pair. That means an additional test in the * VMCI_Enqueue() code path. Ugh.
*/
if (put_user(VMCI_SUCCESS, &info->result)) { /* * In this case, we can't write a result field of the * caller's info block. So, we don't even try to * SetPageStore().
*/ return -EFAULT;
}
result = vmci_qp_broker_set_page_store(page_file_info.handle,
page_file_info.produce_va,
page_file_info.consume_va,
vmci_host_dev->context); if (result < VMCI_SUCCESS) { if (put_user(result, &info->result)) { /* * Note that in this case the SetPageStore() * call failed but we were unable to * communicate that to the caller (because the * copy_to_user() call failed). So, if we * simply return an error (in this case * -EFAULT) then the caller will know that the * SetPageStore failed even though we couldn't * put the result code in the result field and * indicate exactly why it failed. * * That says nothing about the issue where we * were once able to write to the caller's info * memory and now can't. Something more * serious is probably going on than the fact * that SetPageStore() didn't work.
*/ return -EFAULT;
}
}
switch (iocmd) { case IOCTL_VMCI_INIT_CONTEXT:
VMCI_DO_IOCTL(INIT_CONTEXT, init_context); case IOCTL_VMCI_DATAGRAM_SEND:
VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); case IOCTL_VMCI_DATAGRAM_RECEIVE:
VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); case IOCTL_VMCI_QUEUEPAIR_ALLOC:
VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); case IOCTL_VMCI_QUEUEPAIR_SETVA:
VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); case IOCTL_VMCI_QUEUEPAIR_DETACH:
VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); case IOCTL_VMCI_CTX_GET_CPT_STATE:
VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); case IOCTL_VMCI_CTX_SET_CPT_STATE:
VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); case IOCTL_VMCI_GET_CONTEXT_ID:
VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); case IOCTL_VMCI_SET_NOTIFY:
VMCI_DO_IOCTL(SET_NOTIFY, set_notify); case IOCTL_VMCI_NOTIFY_RESOURCE:
VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
case IOCTL_VMCI_VERSION: case IOCTL_VMCI_VERSION2: return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.