/* * Only noiommu containers can use vfio-noiommu and noiommu containers can only * use vfio-noiommu.
*/ staticbool vfio_iommu_driver_allowed(struct vfio_container *container, conststruct vfio_iommu_driver *driver)
{ if (!IS_ENABLED(CONFIG_VFIO_NOIOMMU)) returntrue; return container->noiommu == (driver->ops == &vfio_noiommu_ops);
}
/* * Container objects - containers are created when /dev/vfio/vfio is * opened, but their lifecycle extends until the last user is done, so * it's freed via kref. Must support container/group/device being * closed in any order.
*/ staticvoid vfio_container_release(struct kref *kref)
{ struct vfio_container *container;
container = container_of(kref, struct vfio_container, kref);
if (iommu_driver && iommu_driver->ops->unregister_device)
iommu_driver->ops->unregister_device(
device->group->container->iommu_data, device);
}
staticlong
vfio_container_ioctl_check_extension(struct vfio_container *container, unsignedlong arg)
{ struct vfio_iommu_driver *driver; long ret = 0;
down_read(&container->group_lock);
driver = container->iommu_driver;
switch (arg) { /* No base extensions yet */ default: /* * If no driver is set, poll all registered drivers for * extensions and return the first positive result. If * a driver is already set, further queries will be passed * only to that driver.
*/ if (!driver) {
mutex_lock(&vfio.iommu_drivers_lock);
list_for_each_entry(driver, &vfio.iommu_drivers_list,
vfio_next) {
if (!list_empty(&container->group_list) &&
!vfio_iommu_driver_allowed(container,
driver)) continue; if (!try_module_get(driver->ops->owner)) continue;
ret = driver->ops->ioctl(NULL,
VFIO_CHECK_EXTENSION,
arg);
module_put(driver->ops->owner); if (ret > 0) break;
}
mutex_unlock(&vfio.iommu_drivers_lock);
} else
ret = driver->ops->ioctl(container->iommu_data,
VFIO_CHECK_EXTENSION, arg);
}
up_read(&container->group_lock);
return ret;
}
/* hold write lock on container->group_lock */ staticint __vfio_container_attach_groups(struct vfio_container *container, struct vfio_iommu_driver *driver, void *data)
{ struct vfio_group *group; int ret = -ENODEV;
list_for_each_entry(group, &container->group_list, container_next) {
ret = driver->ops->attach_group(data, group->iommu_group,
group->type); if (ret) goto unwind;
}
staticlong vfio_ioctl_set_iommu(struct vfio_container *container, unsignedlong arg)
{ struct vfio_iommu_driver *driver; long ret = -ENODEV;
down_write(&container->group_lock);
/* * The container is designed to be an unprivileged interface while * the group can be assigned to specific users. Therefore, only by * adding a group to a container does the user get the privilege of * enabling the iommu, which may allocate finite resources. There * is no unset_iommu, but by removing all the groups from a container, * the container is deprivileged and returns to an unset state.
*/ if (list_empty(&container->group_list) || container->iommu_driver) {
up_write(&container->group_lock); return -EINVAL;
}
if (!vfio_iommu_driver_allowed(container, driver)) continue; if (!try_module_get(driver->ops->owner)) continue;
/* * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, * so test which iommu driver reported support for this * extension and call open on them. We also pass them the * magic, allowing a single driver to support multiple * interfaces if they'd like.
*/ if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
module_put(driver->ops->owner); continue;
}
data = driver->ops->open(arg); if (IS_ERR(data)) {
ret = PTR_ERR(data);
module_put(driver->ops->owner); continue;
}
ret = __vfio_container_attach_groups(container, driver, data); if (ret) {
driver->ops->release(data);
module_put(driver->ops->owner); continue;
}
switch (cmd) { case VFIO_GET_API_VERSION:
ret = VFIO_API_VERSION; break; case VFIO_CHECK_EXTENSION:
ret = vfio_container_ioctl_check_extension(container, arg); break; case VFIO_SET_IOMMU:
ret = vfio_ioctl_set_iommu(container, arg); break; default:
driver = container->iommu_driver;
data = container->iommu_data;
if (driver) /* passthrough all unrecognized ioctls */
ret = driver->ops->ioctl(data, cmd, arg);
}
int vfio_container_attach_group(struct vfio_container *container, struct vfio_group *group)
{ struct vfio_iommu_driver *driver; int ret = 0;
lockdep_assert_held(&group->group_lock);
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) return -EPERM;
down_write(&container->group_lock);
/* Real groups and fake groups cannot mix */ if (!list_empty(&container->group_list) &&
container->noiommu != (group->type == VFIO_NO_IOMMU)) {
ret = -EPERM; goto out_unlock_container;
}
if (group->type == VFIO_IOMMU) {
ret = iommu_group_claim_dma_owner(group->iommu_group, group); if (ret) goto out_unlock_container;
}
driver = container->iommu_driver; if (driver) {
ret = driver->ops->attach_group(container->iommu_data,
group->iommu_group,
group->type); if (ret) { if (group->type == VFIO_IOMMU)
iommu_group_release_dma_owner(
group->iommu_group); goto out_unlock_container;
}
}
/* Detaching the last group deprivileges a container, remove iommu */ if (driver && list_empty(&container->group_list)) {
driver->ops->release(container->iommu_data);
module_put(driver->ops->owner);
container->iommu_driver = NULL;
container->iommu_data = NULL;
}
up_write(&container->group_lock);
vfio_container_put(container);
}
int vfio_group_use_container(struct vfio_group *group)
{
lockdep_assert_held(&group->group_lock);
/* * The container fd has been assigned with VFIO_GROUP_SET_CONTAINER but * VFIO_SET_IOMMU hasn't been done yet.
*/ if (!group->container->iommu_driver) return -EINVAL;
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) return -EPERM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.