/* Iterate over each struct group_device in a struct iommu_group */ #define for_each_group_device(group, pos) \
list_for_each_entry(pos, &(group)->devices, list)
/* * Use a function instead of an array here because the domain-type is a * bit-field, so an array would waste memory.
*/ staticconstchar *iommu_domain_type_str(unsignedint t)
{ switch (t) { case IOMMU_DOMAIN_BLOCKED: return"Blocked"; case IOMMU_DOMAIN_IDENTITY: return"Passthrough"; case IOMMU_DOMAIN_UNMANAGED: return"Unmanaged"; case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA_FQ: return"Translated"; case IOMMU_DOMAIN_PLATFORM: return"Platform"; default: return"Unknown";
}
}
if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false); else
iommu_set_default_translated(false);
/** * iommu_device_register() - Register an IOMMU hardware instance * @iommu: IOMMU handle for the instance * @ops: IOMMU ops to associate with the instance * @hwdev: (optional) actual instance device, used for fwnode lookup * * Return: 0 on success, or an error.
*/ int iommu_device_register(struct iommu_device *iommu, conststruct iommu_ops *ops, struct device *hwdev)
{ int err = 0;
/* We need to be able to take module references appropriately */ if (WARN_ON(is_module_address((unsignedlong)ops) && !ops->owner)) return -EINVAL;
iommu->ops = ops; if (hwdev)
iommu->fwnode = dev_fwnode(hwdev);
/* Pairs with the alloc in generic_single_device_group() */
iommu_group_put(iommu->singleton_group);
iommu->singleton_group = NULL;
}
EXPORT_SYMBOL_GPL(iommu_device_unregister);
/* * Register an iommu driver against a single bus. This is only used by iommufd * selftest to create a mock iommu driver. The caller must provide * some memory to hold a notifier_block.
*/ int iommu_device_register_bus(struct iommu_device *iommu, conststruct iommu_ops *ops, conststruct bus_type *bus, struct notifier_block *nb)
{ int err;
/* * Internal equivalent of device_iommu_mapped() for when we care that a device * actually has API ops, and don't want false positives from VFIO-only groups.
*/ staticbool dev_has_iommu(struct device *dev)
{ return dev->iommu && dev->iommu->iommu_dev;
}
/* * Init the dev->iommu and dev->iommu_group in the struct device and get the * driver probed
*/ staticint iommu_init_device(struct device *dev)
{ conststruct iommu_ops *ops; struct iommu_device *iommu_dev; struct iommu_group *group; int ret;
if (!dev_iommu_get(dev)) return -ENOMEM; /* * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing * is buried in the bus dma_configure path. Properly unpicking that is * still a big job, so for now just invoke the whole thing. The device * already having a driver bound means dma_configure has already run and * found no IOMMU to wait for, so there's no point calling it again.
*/ if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) {
mutex_unlock(&iommu_probe_device_lock);
dev->bus->dma_configure(dev);
mutex_lock(&iommu_probe_device_lock); /* If another instance finished the job for us, skip it */ if (!dev->iommu || dev->iommu_group) return -ENODEV;
} /* * At this point, relevant devices either now have a fwspec which will * match ops registered with a non-NULL fwnode, or we can reasonably * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can * be present, and that any of their registered instances has suitable * ops for probing, and thus cheekily co-opt the same mechanism.
*/
ops = iommu_fwspec_ops(dev->iommu->fwspec); if (!ops) {
ret = -ENODEV; goto err_free;
}
if (!try_module_get(ops->owner)) {
ret = -EINVAL; goto err_free;
}
iommu_dev = ops->probe_device(dev); if (IS_ERR(iommu_dev)) {
ret = PTR_ERR(iommu_dev); goto err_module_put;
}
dev->iommu->iommu_dev = iommu_dev;
ret = iommu_device_link(iommu_dev, dev); if (ret) goto err_release;
group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL))
group = ERR_PTR(-EINVAL); if (IS_ERR(group)) {
ret = PTR_ERR(group); goto err_unlink;
}
dev->iommu_group = group;
dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); if (ops->is_attach_deferred)
dev->iommu->attach_deferred = ops->is_attach_deferred(dev); return 0;
/* * release_device() must stop using any attached domain on the device. * If there are still other devices in the group, they are not affected * by this callback. * * If the iommu driver provides release_domain, the core code ensures * that domain is attached prior to calling release_device. Drivers can * use this to enforce a translation on the idle iommu. Typically, the * global static blocked_domain is a good choice. * * Otherwise, the iommu driver must set the device to either an identity * or a blocking translation in release_device() and stop using any * domain pointer, as it is going to be freed. * * Regardless, if a delayed attach never occurred, then the release * should still avoid touching any hardware configuration either.
*/ if (!dev->iommu->attach_deferred && ops->release_domain)
ops->release_domain->ops->attach_dev(ops->release_domain, dev);
if (ops->release_device)
ops->release_device(dev);
/* * If this is the last driver to use the group then we must free the * domains before we do the module_put().
*/ if (list_empty(&group->devices)) { if (group->default_domain) {
iommu_domain_free(group->default_domain);
group->default_domain = NULL;
} if (group->blocking_domain) {
iommu_domain_free(group->blocking_domain);
group->blocking_domain = NULL;
}
group->domain = NULL;
}
/* Caller must put iommu_group */
dev->iommu_group = NULL;
module_put(ops->owner);
dev_iommu_free(dev); #ifdef CONFIG_IOMMU_DMA
dev->dma_iommu = false; #endif
}
/* * Serialise to avoid races between IOMMU drivers registering in * parallel and/or the "replay" calls from ACPI/OF code via client * driver probe. Once the latter have been cleaned up we should * probably be able to use device_lock() here to minimise the scope, * but for now enforcing a simple global ordering is fine.
*/
lockdep_assert_held(&iommu_probe_device_lock);
/* Device is probed already if in a group */ if (dev->iommu_group) return 0;
ret = iommu_init_device(dev); if (ret) return ret; /* * And if we do now see any replay calls, they would indicate someone * misusing the dma_configure path outside bus code.
*/ if (dev->driver)
dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n");
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
mutex_lock(&group->mutex); if (IS_ERR(gdev)) {
ret = PTR_ERR(gdev); goto err_put_group;
}
/* * The gdev must be in the list before calling * iommu_setup_default_domain()
*/
list_add_tail(&gdev->list, &group->devices);
WARN_ON(group->default_domain && !group->domain); if (group->default_domain)
iommu_create_device_direct_mappings(group->default_domain, dev); if (group->domain) {
ret = __iommu_device_set_domain(group, dev, group->domain, 0); if (ret) goto err_remove_gdev;
} elseif (!group->default_domain && !group_list) {
ret = iommu_setup_default_domain(group, 0); if (ret) goto err_remove_gdev;
} elseif (!group->default_domain) { /* * With a group_list argument we defer the default_domain setup * to the caller by providing a de-duplicated list of groups * that need further setup.
*/ if (list_empty(&group->entry))
list_add_tail(&group->entry, group_list);
}
if (group->default_domain)
iommu_setup_dma_ops(dev);
/* * If the group has become empty then ownership must have been * released, and the current domain must be set back to NULL or * the default domain.
*/ if (list_empty(&group->devices))
WARN_ON(group->owner_cnt ||
group->domain != group->default_domain);
kfree(grp_dev->name);
kfree(grp_dev);
}
/* Remove the iommu_group from the struct device. */ staticvoid __iommu_group_remove_device(struct device *dev)
{ struct iommu_group *group = dev->iommu_group; struct group_device *device;
mutex_lock(&group->mutex);
for_each_group_device(group, device) { if (device->dev != dev) continue;
/** * iommu_insert_resv_region - Insert a new region in the * list of reserved regions. * @new: new region to insert * @regions: list of regions * * Elements are sorted by start address and overlapping segments * of the same type are merged.
*/ staticint iommu_insert_resv_region(struct iommu_resv_region *new, struct list_head *regions)
{ struct iommu_resv_region *iter, *tmp, *nr, *top;
LIST_HEAD(stack);
nr = iommu_alloc_resv_region(new->start, new->length,
new->prot, new->type, GFP_KERNEL); if (!nr) return -ENOMEM;
/* First add the new element based on start address sorting */
list_for_each_entry(iter, regions, list) { if (nr->start < iter->start ||
(nr->start == iter->start && nr->type <= iter->type)) break;
}
list_add_tail(&nr->list, &iter->list);
/* Merge overlapping segments of type nr->type in @regions, if any */
list_for_each_entry_safe(iter, tmp, regions, list) {
phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
/* no merge needed on elements of different types than @new */ if (iter->type != new->type) {
list_move_tail(&iter->list, &stack); continue;
}
/* look for the last stack element of same type as @iter */
list_for_each_entry_reverse(top, &stack, list) if (top->type == iter->type) goto check_overlap;
mutex_lock(&group->mutex); if (group->default_domain) { switch (group->default_domain->type) { case IOMMU_DOMAIN_BLOCKED:
type = "blocked"; break; case IOMMU_DOMAIN_IDENTITY:
type = "identity"; break; case IOMMU_DOMAIN_UNMANAGED:
type = "unmanaged"; break; case IOMMU_DOMAIN_DMA:
type = "DMA"; break; case IOMMU_DOMAIN_DMA_FQ:
type = "DMA-FQ"; break;
}
}
mutex_unlock(&group->mutex);
/** * iommu_group_alloc - Allocate a new group * * This function is called by an iommu driver to allocate a new iommu * group. The iommu group represents the minimum granularity of the iommu. * Upon successful return, the caller holds a reference to the supplied * group in order to hold the group until devices are added. Use * iommu_group_put() to release this extra reference count, allowing the * group to be automatically reclaimed once it has no devices or external * references.
*/ struct iommu_group *iommu_group_alloc(void)
{ struct iommu_group *group; int ret;
group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM);
/* * The devices_kobj holds a reference on the group kobject, so * as long as that exists so will the group. We can therefore * use the devices_kobj for reference counting.
*/
kobject_put(&group->kobj);
ret = iommu_group_create_file(group,
&iommu_group_attr_reserved_regions); if (ret) {
kobject_put(group->devices_kobj); return ERR_PTR(ret);
}
ret = iommu_group_create_file(group, &iommu_group_attr_type); if (ret) {
kobject_put(group->devices_kobj); return ERR_PTR(ret);
}
/** * iommu_group_get_iommudata - retrieve iommu_data registered for a group * @group: the group * * iommu drivers can store data in the group for use when doing iommu * operations. This function provides a way to retrieve it. Caller * should hold a group reference.
*/ void *iommu_group_get_iommudata(struct iommu_group *group)
{ return group->iommu_data;
}
EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
/** * iommu_group_set_iommudata - set iommu_data for a group * @group: the group * @iommu_data: new data * @release: release function for iommu_data * * iommu drivers can store data in the group for use when doing iommu * operations. This function provides a way to set the data after * the group has been allocated. Caller should hold a group reference.
*/ void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, void (*release)(void *iommu_data))
{
group->iommu_data = iommu_data;
group->iommu_data_release = release;
}
EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
/** * iommu_group_set_name - set name for a group * @group: the group * @name: name * * Allow iommu driver to set a name for a group. When set it will * appear in a name attribute file under the group in sysfs.
*/ int iommu_group_set_name(struct iommu_group *group, constchar *name)
{ int ret;
if (group->name) {
iommu_group_remove_file(group, &iommu_group_attr_name);
kfree(group->name);
group->name = NULL; if (!name) return 0;
}
group->name = kstrdup(name, GFP_KERNEL); if (!group->name) return -ENOMEM;
ret = iommu_group_create_file(group, &iommu_group_attr_name); if (ret) {
kfree(group->name);
group->name = NULL; return ret;
}
if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) return -EINVAL;
iommu_get_resv_regions(dev, &mappings);
/* We need to consider overlapping regions for different devices */
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
size_t map_size = 0;
if (entry->type == IOMMU_RESV_DIRECT)
dev->iommu->require_direct = 1;
map_end: if (map_size) {
ret = iommu_map(domain, addr - map_size,
addr - map_size, map_size,
entry->prot, GFP_KERNEL); if (ret) goto out;
map_size = 0;
}
}
}
out:
iommu_put_resv_regions(dev, &mappings);
return ret;
}
/* This is undone by __iommu_group_free_device() */ staticstruct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev)
{ int ret, i = 0; struct group_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return ERR_PTR(-ENOMEM);
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); if (ret) goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename: if (!device->name) {
ret = -ENOMEM; goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name); if (ret) { if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name.
*/
kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++); goto rename;
} goto err_free_name;
}
trace_add_device_to_group(group->id, dev);
dev_info(dev, "Adding to iommu group %d\n", group->id);
return device;
err_free_name:
kfree(device->name);
err_remove_link:
sysfs_remove_link(&dev->kobj, "iommu_group");
err_free_device:
kfree(device);
dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); return ERR_PTR(ret);
}
/** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) * @dev: the device * * This function is called by an iommu driver to add a device into a * group. Adding a device increments the group reference count.
*/ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{ struct group_device *gdev;
gdev = iommu_group_alloc_device(group, dev); if (IS_ERR(gdev)) return PTR_ERR(gdev);
/** * iommu_group_remove_device - remove a device from it's current group * @dev: device to be removed * * This function is called by an iommu driver to remove the device from * it's current group. This decrements the iommu group reference count.
*/ void iommu_group_remove_device(struct device *dev)
{ struct iommu_group *group = dev->iommu_group;
if (!group) return;
dev_info(dev, "Removing from iommu group %d\n", group->id);
#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) /** * iommu_group_mutex_assert - Check device group mutex lock * @dev: the device that has group param set * * This function is called by an iommu driver to check whether it holds * group mutex lock for the given device or not. * * Note that this function must be called after device group param is set.
*/ void iommu_group_mutex_assert(struct device *dev)
{ struct iommu_group *group = dev->iommu_group;
/** * iommu_group_for_each_dev - iterate over each device in the group * @group: the group * @data: caller opaque data to be passed to callback function * @fn: caller supplied callback function * * This function is called by group users to iterate over group devices. * Callers should hold a reference count to the group during callback. * The group->mutex is held across callbacks, which will block calls to * iommu_group_add/remove_device.
*/ int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *))
{ struct group_device *device; int ret = 0;
mutex_lock(&group->mutex);
for_each_group_device(group, device) {
ret = fn(device->dev, data); if (ret) break;
}
mutex_unlock(&group->mutex);
/** * iommu_group_get - Return the group for a device and increment reference * @dev: get the group that this device belongs to * * This function is called by iommu drivers and users to get the group * for the specified device. If found, the group is returned and the group * reference in incremented, else NULL.
*/ struct iommu_group *iommu_group_get(struct device *dev)
{ struct iommu_group *group = dev->iommu_group;
/** * iommu_group_ref_get - Increment reference on a group * @group: the group to use, must not be NULL * * This function is called by iommu drivers to take additional references on an * existing group. Returns the given group for convenience.
*/ struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
{
kobject_get(group->devices_kobj); return group;
}
EXPORT_SYMBOL_GPL(iommu_group_ref_get);
/** * iommu_group_put - Decrement group reference * @group: the group to use * * This function is called by iommu drivers and users to release the * iommu group. Once the reference count is zero, the group is released.
*/ void iommu_group_put(struct iommu_group *group)
{ if (group)
kobject_put(group->devices_kobj);
}
EXPORT_SYMBOL_GPL(iommu_group_put);
/** * iommu_group_id - Return ID for a group * @group: the group to ID * * Return the unique ID for the group matching the sysfs group number.
*/ int iommu_group_id(struct iommu_group *group)
{ return group->id;
}
EXPORT_SYMBOL_GPL(iommu_group_id);
/* * To consider a PCI device isolated, we require ACS to support Source * Validation, Request Redirection, Completer Redirection, and Upstream * Forwarding. This effectively means that devices cannot spoof their * requester ID, requests and completions cannot be redirected, and all * transactions are forwarded upstream, even as it passes through a * bridge where the target device is downstream.
*/ #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
/* * For multifunction devices which are not isolated from each other, find * all the other non-isolated functions and look for existing groups. For * each function, we also need to look for aliases to or from other devices * that may already have a group.
*/ staticstruct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, unsignedlong *devfns)
{ struct pci_dev *tmp = NULL; struct iommu_group *group;
if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) return NULL;
group = get_pci_alias_group(tmp, devfns); if (group) {
pci_dev_put(tmp); return group;
}
}
return NULL;
}
/* * Look for aliases to or from the given device for existing groups. DMA * aliases are only supported on the same bus, therefore the search * space is quite small (especially since we're really only looking at pcie * device, and therefore only expect multiple slots on the root complex or * downstream switch ports). It's conceivable though that a pair of * multifunction devices could have aliases between them that would cause a * loop. To prevent this, we use a bitmap to track where we've been.
*/ staticstruct iommu_group *get_pci_alias_group(struct pci_dev *pdev, unsignedlong *devfns)
{ struct pci_dev *tmp = NULL; struct iommu_group *group;
if (test_and_set_bit(pdev->devfn & 0xff, devfns)) return NULL;
group = iommu_group_get(&pdev->dev); if (group) return group;
/* We alias them or they alias us */ if (pci_devs_are_dma_aliases(pdev, tmp)) {
group = get_pci_alias_group(tmp, devfns); if (group) {
pci_dev_put(tmp); return group;
}
group = get_pci_function_alias_group(tmp, devfns); if (group) {
pci_dev_put(tmp); return group;
}
}
}
/* * DMA alias iterator callback, return the last seen device. Stop and return * the IOMMU group if we find one along the way.
*/ staticint get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
{ struct group_for_pci_data *data = opaque;
/* * Generic device_group call-back function. It just allocates one * iommu-group per device.
*/ struct iommu_group *generic_device_group(struct device *dev)
{ return iommu_group_alloc();
}
EXPORT_SYMBOL_GPL(generic_device_group);
/* * Generic device_group call-back function. It just allocates one * iommu-group per iommu driver instance shared by every device * probed by that iommu driver.
*/ struct iommu_group *generic_single_device_group(struct device *dev)
{ struct iommu_device *iommu = dev->iommu->iommu_dev;
if (!iommu->singleton_group) { struct iommu_group *group;
group = iommu_group_alloc(); if (IS_ERR(group)) return group;
iommu->singleton_group = group;
} return iommu_group_ref_get(iommu->singleton_group);
}
EXPORT_SYMBOL_GPL(generic_single_device_group);
/* * Use standard PCI bus topology, isolation features, and DMA alias quirks * to find or create an IOMMU group for a device.
*/ struct iommu_group *pci_device_group(struct device *dev)
{ struct pci_dev *pdev = to_pci_dev(dev); struct group_for_pci_data data; struct pci_bus *bus; struct iommu_group *group = NULL;
u64 devfns[4] = { 0 };
if (WARN_ON(!dev_is_pci(dev))) return ERR_PTR(-EINVAL);
/* * Find the upstream DMA alias for the device. A device must not * be aliased due to topology in order to have its own IOMMU group. * If we find an alias along the way that already belongs to a * group, use it.
*/ if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) return data.group;
pdev = data.pdev;
/* * Continue upstream from the point of minimum IOMMU granularity * due to aliases to the point where devices are protected from * peer-to-peer DMA by PCI ACS. Again, if we find an existing * group, use it.
*/ for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { if (!bus->self) continue;
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break;
pdev = bus->self;
group = iommu_group_get(&pdev->dev); if (group) return group;
}
/* * Look for existing groups on device aliases. If we alias another * device or another device aliases us, use the same group.
*/
group = get_pci_alias_group(pdev, (unsignedlong *)devfns); if (group) return group;
/* * Look for existing groups on non-isolated functions on the same * slot and aliases of those funcions, if any. No need to clear * the search bitmap, the tested devfns are still valid.
*/
group = get_pci_function_alias_group(pdev, (unsignedlong *)devfns); if (group) return group;
/* No shared group found, allocate new */ return iommu_group_alloc();
}
EXPORT_SYMBOL_GPL(pci_device_group);
/* Get the IOMMU group for device on fsl-mc bus */ struct iommu_group *fsl_mc_device_group(struct device *dev)
{ struct device *cont_dev = fsl_mc_cont_dev(dev); struct iommu_group *group;
group = iommu_group_get(cont_dev); if (!group)
group = iommu_group_alloc(); return group;
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
if (group->default_domain && group->default_domain->type == req_type) return group->default_domain;
/* * When allocating the DMA API domain assume that the driver is going to * use PASID and make sure the RID's domain is PASID compatible.
*/ if (req_type & __IOMMU_DOMAIN_PAGING) {
dom = __iommu_paging_domain_alloc_flags(dev, req_type,
dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0);
/* * If driver does not support PASID feature then * try to allocate non-PASID domain
*/ if (PTR_ERR(dom) == -EOPNOTSUPP)
dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0);
return dom;
}
if (req_type == IOMMU_DOMAIN_IDENTITY) return __iommu_alloc_identity_domain(dev);
return ERR_PTR(-EINVAL);
}
/* * req_type of 0 means "auto" which means to select a domain based on * iommu_def_domain_type or what the driver actually supports.
*/ staticstruct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{ conststruct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); struct iommu_domain *dom;
lockdep_assert_held(&group->mutex);
/* * Allow legacy drivers to specify the domain that will be the default * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM * domain. Do not use in new drivers.
*/ if (ops->default_domain) { if (req_type != ops->default_domain->type) return ERR_PTR(-EINVAL); return ops->default_domain;
}
if (req_type) return __iommu_group_alloc_default_domain(group, req_type);
/* The driver gave no guidance on what type to use, try the default */
dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); if (!IS_ERR(dom)) return dom;
/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) return ERR_PTR(-EINVAL);
dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); if (IS_ERR(dom)) return dom;
pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
iommu_def_domain_type, group->name); return dom;
}
mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, group_list);
mutex_unlock(&iommu_probe_device_lock); if (ret == -ENODEV)
ret = 0;
/* * Combine the driver's chosen def_domain_type across all the devices in a * group. Drivers must give a consistent result.
*/ staticint iommu_get_def_domain_type(struct iommu_group *group, struct device *dev, int cur_type)
{ conststruct iommu_ops *ops = dev_iommu_ops(dev); int type;
if (ops->default_domain) { /* * Drivers that declare a global static default_domain will * always choose that.
*/
type = ops->default_domain->type;
} else { if (ops->def_domain_type)
type = ops->def_domain_type(dev); else return cur_type;
} if (!type || cur_type == type) return cur_type; if (!cur_type) return type;
dev_err_ratelimited(
dev, "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n",
iommu_domain_type_str(cur_type), iommu_domain_type_str(type),
group->id);
/* * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY * takes precedence.
*/ if (type == IOMMU_DOMAIN_IDENTITY) return type; return cur_type;
}
/* * A target_type of 0 will select the best domain type. 0 can be returned in * this case meaning the global default should be used.
*/ staticint iommu_get_default_domain_type(struct iommu_group *group, int target_type)
{ struct device *untrusted = NULL; struct group_device *gdev; int driver_type = 0;
lockdep_assert_held(&group->mutex);
/* * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an * identity_domain and it will automatically become their default * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. * Override the selection to IDENTITY.
*/ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) &&
IS_ENABLED(CONFIG_IOMMU_DMA)));
driver_type = IOMMU_DOMAIN_IDENTITY;
}
if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { /* * No ARM32 using systems will set untrusted, it cannot * work.
*/ if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) return -1;
untrusted = gdev->dev;
}
}
/* * If the common dma ops are not selected in kconfig then we cannot use * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been * selected.
*/ if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) return -1; if (!driver_type)
driver_type = IOMMU_DOMAIN_IDENTITY;
}
if (untrusted) { if (driver_type && driver_type != IOMMU_DOMAIN_DMA) {
dev_err_ratelimited(
untrusted, "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n",
group->id, iommu_domain_type_str(driver_type)); return -1;
}
driver_type = IOMMU_DOMAIN_DMA;
}
if (target_type) { if (driver_type && target_type != driver_type) return -1; return target_type;
} return driver_type;
}
/* Remove item from the list */
list_del_init(&group->entry);
/* * We go to the trouble of deferred default domain creation so * that the cross-group default domain type and the setup of the * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios.
*/
ret = iommu_setup_default_domain(group, 0); if (ret) {
mutex_unlock(&group->mutex); return ret;
}
for_each_group_device(group, gdev)
iommu_setup_dma_ops(gdev->dev);
mutex_unlock(&group->mutex);
/* * FIXME: Mis-locked because the ops->probe_finalize() call-back * of some IOMMU drivers calls arm_iommu_attach_device() which * in-turn might call back into IOMMU core code, where it tries * to take group->mutex, resulting in a deadlock.
*/
for_each_group_device(group, gdev)
iommu_group_do_probe_finalize(gdev->dev);
}
return 0;
}
/** * device_iommu_capable() - check for a general IOMMU capability * @dev: device to which the capability would be relevant, if available * @cap: IOMMU capability * * Return: true if an IOMMU is present and supports the given capability * for the given device, otherwise false.
*/ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{ conststruct iommu_ops *ops;
if (!dev_has_iommu(dev)) returnfalse;
ops = dev_iommu_ops(dev); if (!ops->capable) returnfalse;
/** * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() * for a group * @group: Group to query * * IOMMU groups should not have differing values of * msi_device_has_isolated_msi() for devices in a group. However nothing * directly prevents this, so ensure mistakes don't result in isolation failures * by checking that all the devices are the same.
*/ bool iommu_group_has_isolated_msi(struct iommu_group *group)
{ struct group_device *group_dev; bool ret = true;
/** * iommu_set_fault_handler() - set a fault handler for an iommu domain * @domain: iommu domain * @handler: fault handler * @token: user data, will be passed back to the fault handler * * This function should be used by IOMMU users which want to be notified * whenever an IOMMU fault happens. * * The fault handler itself should return 0 on success, and an appropriate * error code otherwise.
*/ void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token)
{ if (WARN_ON(!domain || domain->cookie_type != IOMMU_COOKIE_NONE)) return;
/** * iommu_paging_domain_alloc_flags() - Allocate a paging domain * @dev: device for which the domain is allocated * @flags: Bitmap of iommufd_hwpt_alloc_flags * * Allocate a paging domain which will be managed by a kernel driver. Return * allocated domain if successful, or an ERR pointer for failure.
*/ struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsignedint flags)
{ return __iommu_paging_domain_alloc_flags(dev,
IOMMU_DOMAIN_UNMANAGED, flags);
}
EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags);
void iommu_domain_free(struct iommu_domain *domain)
{ switch (domain->cookie_type) { case IOMMU_COOKIE_DMA_IOVA:
iommu_put_dma_cookie(domain); break; case IOMMU_COOKIE_DMA_MSI:
iommu_put_msi_cookie(domain); break; case IOMMU_COOKIE_SVA:
mmdrop(domain->mm); break; default: break;
} if (domain->ops->free)
domain->ops->free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
/* * Put the group's domain back to the appropriate core-owned domain - either the * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
*/ staticvoid __iommu_group_set_core_domain(struct iommu_group *group)
{ struct iommu_domain *new_domain;
if (group->owner)
new_domain = group->blocking_domain; else
new_domain = group->default_domain;
staticint __iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{ int ret;
if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV;
ret = domain->ops->attach_dev(domain, dev); if (ret) return ret;
dev->iommu->attach_deferred = 0;
trace_attach_device_to_domain(dev); return 0;
}
/** * iommu_attach_device - Attach an IOMMU domain to a device * @domain: IOMMU domain to attach * @dev: Device that will be attached * * Returns 0 on success and error code on failure * * Note that EINVAL can be treated as a soft failure, indicating * that certain configuration of the domain is incompatible with * the device. In this case attaching a different domain to the * device may succeed.
*/ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{ /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; int ret;
if (!group) return -ENODEV;
/* * Lock the group to make sure the device-count doesn't * change while we are attaching
*/
mutex_lock(&group->mutex);
ret = -EINVAL; if (list_count_nodes(&group->devices) != 1) goto out_unlock;
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{ if (dev->iommu && dev->iommu->attach_deferred) return __iommu_attach_device(domain, dev);
return 0;
}
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{ /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group;
struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{ /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group;
/* * For IOMMU_DOMAIN_DMA implementations which already provide their own * guarantees that the group and its default domain are valid and correct.
*/ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
{ return dev->iommu_group->default_domain;
}
dev = iommu_group_first_dev(group); if (!dev_has_iommu(dev) ||
!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain)) return -EINVAL;
return __iommu_group_set_domain(group, domain);
}
/** * iommu_attach_group - Attach an IOMMU domain to an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * * Returns 0 on success and error code on failure * * Note that EINVAL can be treated as a soft failure, indicating * that certain configuration of the domain is incompatible with * the group. In this case attaching a different domain to the * group may succeed.
*/ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{ int ret;
mutex_lock(&group->mutex);
ret = __iommu_attach_group(domain, group);
mutex_unlock(&group->mutex);
/* * If the device requires IOMMU_RESV_DIRECT then we cannot allow * the blocking domain to be attached as it does not contain the * required 1:1 mapping. This test effectively excludes the device * being used with iommu_group_claim_dma_owner() which will block * vfio and iommufd as well.
*/ if (dev->iommu->require_direct &&
(new_domain->type == IOMMU_DOMAIN_BLOCKED ||
new_domain == group->blocking_domain)) {
dev_warn(dev, "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); return -EINVAL;
}
if (dev->iommu->attach_deferred) { if (new_domain == group->default_domain) return 0;
dev->iommu->attach_deferred = 0;
}
ret = __iommu_attach_device(new_domain, dev); if (ret) { /* * If we have a blocking domain then try to attach that in hopes * of avoiding a UAF. Modern drivers should implement blocking * domains as global statics that cannot fail.
*/ if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) &&
group->blocking_domain &&
group->blocking_domain != new_domain)
__iommu_attach_device(group->blocking_domain, dev); return ret;
} return 0;
}
/* * If 0 is returned the group's domain is new_domain. If an error is returned * then the group's domain will be set back to the existing domain unless * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's * domains is left inconsistent. This is a driver bug to fail attach with a * previously good domain. We try to avoid a kernel UAF because of this. * * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU * API works on domains and devices. Bridge that gap by iterating over the * devices in a group. Ideally we'd have a single device which represents the * requestor ID of the group, but we also allow IOMMU drivers to create policy * defined minimum sets, where the physical hardware may be able to distiguish * members, but we wish to group them at a higher level (ex. untrusted * multi-function PCI devices). Thus we attach each device.
*/ staticint __iommu_group_set_domain_internal(struct iommu_group *group, struct iommu_domain *new_domain, unsignedint flags)
{ struct group_device *last_gdev; struct group_device *gdev; int result; int ret;
lockdep_assert_held(&group->mutex);
if (group->domain == new_domain) return 0;
if (WARN_ON(!new_domain)) return -EINVAL;
/* * Changing the domain is done by calling attach_dev() on the new * domain. This switch does not have to be atomic and DMA can be * discarded during the transition. DMA must only be able to access * either new_domain or group->domain, never something else.
*/
result = 0;
for_each_group_device(group, gdev) {
ret = __iommu_device_set_domain(group, gdev->dev, new_domain,
flags); if (ret) {
result = ret; /* * Keep trying the other devices in the group. If a * driver fails attach to an otherwise good domain, and * does not support blocking domains, it should at least * drop its reference on the current domain so we don't * UAF.
*/ if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) continue; goto err_revert;
}
}
group->domain = new_domain; return result;
err_revert: /* * This is called in error unwind paths. A well behaved driver should * always allow us to attach to a domain that was already attached.
*/
last_gdev = gdev;
for_each_group_device(group, gdev) { /* * A NULL domain can happen only for first probe, in which case * we leave group->domain as NULL and let release clean * everything up.
*/ if (group->domain)
WARN_ON(__iommu_device_set_domain(
group, gdev->dev, group->domain,
IOMMU_SET_DOMAIN_MUST_SUCCEED)); if (gdev == last_gdev) break;
} return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.