/** * struct hsm_ioeventfd - Properties of HSM ioeventfd * @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM * @eventfd: Eventfd of the HSM ioeventfd * @addr: Address of I/O range * @data: Data for matching * @length: Length of I/O range * @type: Type of I/O range (ACRN_IOREQ_TYPE_MMIO/ACRN_IOREQ_TYPE_PORTIO) * @wildcard: Data matching or not
*/ struct hsm_ioeventfd { struct list_head list; struct eventfd_ctx *eventfd;
u64 addr;
u64 data; int length; int type; bool wildcard;
};
/* Either one is wildcard, the data matching will be skipped. */
list_for_each_entry(p, &vm->ioeventfds, list) if (p->eventfd == ioeventfd->eventfd &&
p->addr == ioeventfd->addr &&
p->type == ioeventfd->type &&
(p->wildcard || ioeventfd->wildcard ||
p->data == ioeventfd->data)) returntrue;
returnfalse;
}
/* * Assign an eventfd to a VM and create a HSM ioeventfd associated with the * eventfd. The properties of the HSM ioeventfd are built from a &struct * acrn_ioeventfd.
*/ staticint acrn_ioeventfd_assign(struct acrn_vm *vm, struct acrn_ioeventfd *args)
{ struct eventfd_ctx *eventfd; struct hsm_ioeventfd *p; int ret;
/* Check for range overflow */ if (args->addr + args->len < args->addr) return -EINVAL;
/* * Currently, acrn_ioeventfd is used to support vhost. 1,2,4,8 width * accesses can cover vhost's requirements.
*/ if (!(args->len == 1 || args->len == 2 ||
args->len == 4 || args->len == 8)) return -EINVAL;
eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd);
p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) {
ret = -ENOMEM; goto fail;
}
/* * ACRN_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the * writing of notification register of each virtqueue may trigger the * notification. There is no data matching requirement.
*/ if (args->flags & ACRN_IOEVENTFD_FLAG_DATAMATCH)
p->data = args->data; else
p->wildcard = true;
mutex_lock(&vm->ioeventfds_lock);
if (hsm_ioeventfd_is_conflict(vm, p)) {
ret = -EEXIST; goto unlock_fail;
}
/* register the I/O range into ioreq client */
ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type,
p->addr, p->addr + p->length - 1); if (ret < 0) goto unlock_fail;
if (req->type == ACRN_IOREQ_TYPE_MMIO) { /* * I/O requests are dispatched by range check only, so a * acrn_ioreq_client need process both READ and WRITE accesses * of same range. READ accesses are safe to be ignored here * because virtio PCI devices write the notify registers for * notification.
*/ if (req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) { /* reading does nothing and return 0 */
req->reqs.mmio_request.value = 0; return 0;
}
addr = req->reqs.mmio_request.address;
size = req->reqs.mmio_request.size;
val = req->reqs.mmio_request.value;
} else { if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ) { /* reading does nothing and return 0 */
req->reqs.pio_request.value = 0; return 0;
}
addr = req->reqs.pio_request.address;
size = req->reqs.pio_request.size;
val = req->reqs.pio_request.value;
}
mutex_lock(&client->vm->ioeventfds_lock);
p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type); if (p)
eventfd_signal(p->eventfd);
mutex_unlock(&client->vm->ioeventfds_lock);
return 0;
}
int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args)
{ int ret;
if (args->flags & ACRN_IOEVENTFD_FLAG_DEASSIGN)
ret = acrn_ioeventfd_deassign(vm, args); else
ret = acrn_ioeventfd_assign(vm, args);
return ret;
}
int acrn_ioeventfd_init(struct acrn_vm *vm)
{ char name[ACRN_NAME_LEN];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.