// SPDX-License-Identifier: GPL-2.0-only /* * A fairly generic DMA-API to IOMMU-API glue layer. * * Copyright (C) 2014-2015 ARM Ltd. * * based in part on arch/arm/mm/dma-mapping.c: * Copyright (C) 2000-2004 Russell King
*/
struct iommu_dma_cookie { struct iova_domain iovad; struct list_head msi_page_list; /* Flush queue */ union { struct iova_fq *single_fq; struct iova_fq __percpu *percpu_fq;
}; /* Number of TLB flushes that have been started */
atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that have been finished */
atomic64_t fq_flush_finish_cnt; /* Timer to regularily empty the flush queues */ struct timer_list fq_timer; /* 1 when timer is active, 0 when not */
atomic_t fq_timer_on; /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; /* Options for dma-iommu use */ struct iommu_dma_options options;
};
/* * Order against the IOMMU driver's pagetable update from unmapping * @pte, to guarantee that fq_flush_iotlb() observes that if called * from a different CPU before we release the lock below. Full barrier * so it also pairs with iommu_dma_init_fq() to avoid seeing partially * written fq state here.
*/
smp_mb();
/* * First remove all entries from the flush queue that have already been * flushed out on another CPU. This makes the fq_full() check below less * likely to be true.
*/
fq_ring_free_locked(cookie, fq);
if (fq_full(fq)) {
fq_flush_iotlb(cookie);
fq_ring_free_locked(cookie, fq);
}
/* Avoid false sharing as much as possible. */ if (!atomic_read(&cookie->fq_timer_on) &&
!atomic_xchg(&cookie->fq_timer_on, 1))
mod_timer(&cookie->fq_timer,
jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
}
staticvoid iommu_dma_free_fq_single(struct iova_fq *fq)
{ int idx;
staticvoid iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
{ int cpu, idx;
/* The IOVAs will be torn down separately, so just free our queued pages */
for_each_possible_cpu(cpu) { struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
/* sysfs updates are serialised by the mutex of the group owning @domain */ int iommu_dma_init_fq(struct iommu_domain *domain)
{ struct iommu_dma_cookie *cookie = domain->iova_cookie; int rc;
timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
atomic_set(&cookie->fq_timer_on, 0); /* * Prevent incomplete fq state being observable. Pairs with path from * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
*/
smp_wmb();
WRITE_ONCE(cookie->fq_domain, domain); return 0;
}
/** * iommu_get_dma_cookie - Acquire DMA-API resources for a domain * @domain: IOMMU domain to prepare for DMA-API usage
*/ int iommu_get_dma_cookie(struct iommu_domain *domain)
{ struct iommu_dma_cookie *cookie;
if (domain->cookie_type != IOMMU_COOKIE_NONE) return -EEXIST;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (!cookie) return -ENOMEM;
/** * iommu_get_msi_cookie - Acquire just MSI remapping resources * @domain: IOMMU domain to prepare * @base: Start address of IOVA region for MSI mappings * * Users who manage their own IOVA allocation and do not want DMA API support, * but would still like to take advantage of automatic MSI remapping, can use * this to initialise their own domain appropriately. Users should reserve a * contiguous IOVA region, starting at @base, large enough to accommodate the * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address * used by the devices attached to @domain.
*/ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{ struct iommu_dma_msi_cookie *cookie;
if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL;
if (domain->cookie_type != IOMMU_COOKIE_NONE) return -EEXIST;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (!cookie) return -ENOMEM;
/** * iommu_dma_get_resv_regions - Reserved region driver helper * @dev: Device from iommu_get_resv_regions() * @list: Reserved region list from iommu_get_resv_regions() * * IOMMU drivers can use this to implement their .get_resv_regions callback * for general non-IOMMU-specific reservations. Currently, this covers GICv3 * ITS region reservation on ACPI based ARM platforms that may require HW MSI * reservation.
*/ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
iort_iommu_get_resv_regions(dev, list);
if (dev->of_node)
of_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
staticint cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
phys_addr_t start, phys_addr_t end)
{ struct iova_domain *iovad = &cookie->iovad; struct iommu_dma_msi_page *msi_page; int i, num_pages;
resource_list_for_each_entry(window, &bridge->windows) { if (resource_type(window->res) != IORESOURCE_MEM) continue;
lo = iova_pfn(iovad, window->res->start - window->offset);
hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
/* Get reserved DMA windows from host bridge */
list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
resource_list_for_each_entry(window, &bridge->dma_ranges) {
end = window->res->start - window->offset;
resv_iova: if (end > start) {
lo = iova_pfn(iovad, start);
hi = iova_pfn(iovad, end);
reserve_iova(iovad, lo, hi);
} elseif (end < start) { /* DMA ranges should be non-overlapping */
dev_err(&dev->dev, "Failed to reserve IOVA [%pa-%pa]\n",
&start, &end); return -EINVAL;
}
start = window->res->end - window->offset + 1; /* If window is last entry */ if (window->node.next == &bridge->dma_ranges &&
end != ~(phys_addr_t)0) {
end = ~(phys_addr_t)0; goto resv_iova;
}
}
staticbool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir)
{ struct scatterlist *s; int i;
if (!IS_ENABLED(CONFIG_SWIOTLB)) returnfalse;
if (dev_is_untrusted(dev)) returntrue;
/* * If kmalloc() buffers are not DMA-safe for this device and * direction, check the individual lengths in the sg list. If any * element is deemed unsafe, use the swiotlb for bouncing.
*/ if (!dma_kmalloc_safe(dev, dir)) {
for_each_sg(sg, s, nents, i) if (!dma_kmalloc_size_aligned(s->length)) returntrue;
}
returnfalse;
}
/** * iommu_dma_init_options - Initialize dma-iommu options * @options: The options to be initialized * @dev: Device the options are set for * * This allows tuning dma-iommu specific to device properties
*/ staticvoid iommu_dma_init_options(struct iommu_dma_options *options, struct device *dev)
{ /* Shadowing IOTLB flushes do better with a single large queue */ if (dev->iommu->shadow_on_flush) {
options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
options->fq_size = IOVA_SINGLE_FQ_SIZE;
} else {
options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
options->fq_size = IOVA_DEFAULT_FQ_SIZE;
options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
}
}
/** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @dev: Device the domain is being initialised for * * If the geometry and dma_range_map include address 0, we reserve that page * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * any change which could make prior IOVAs invalid will fail.
*/ staticint iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
{ struct iommu_dma_cookie *cookie = domain->iova_cookie; conststruct bus_dma_region *map = dev->dma_range_map; unsignedlong order, base_pfn; struct iova_domain *iovad; int ret;
if (!cookie || domain->cookie_type != IOMMU_COOKIE_DMA_IOVA) return -EINVAL;
iovad = &cookie->iovad;
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = 1;
/* Check the domain allows at least some access to the device... */ if (map) { if (dma_range_map_min(map) > domain->geometry.aperture_end ||
dma_range_map_max(map) < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n"); return -EFAULT;
}
} /* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t(unsignedlong, base_pfn,
domain->geometry.aperture_start >> order);
/* start_pfn is always nonzero for an already-initialised domain */ if (iovad->start_pfn) { if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n"); return -EFAULT;
}
return 0;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad); if (ret) return ret;
iommu_dma_init_options(&cookie->options, dev);
/* If the FQ fails we can simply fall back to strict mode */ if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA;
return iova_reserve_iommu_regions(dev, domain);
}
/** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. * @dir: Direction of DMA transfer * @coherent: Is the DMA master cache-coherent? * @attrs: DMA attributes for the mapping * * Return: corresponding IOMMU API page protection flags
*/ staticint dma_info_to_prot(enum dma_data_direction dir, bool coherent, unsignedlong attrs)
{ int prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
switch (dir) { case DMA_BIDIRECTIONAL: return prot | IOMMU_READ | IOMMU_WRITE; case DMA_TO_DEVICE: return prot | IOMMU_READ; case DMA_FROM_DEVICE: return prot | IOMMU_WRITE; default: return 0;
}
}
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* * Try to use all the 32-bit PCI addresses first. The original SAC vs. * DAC reasoning loses relevance with PCIe, but enough hardware and * firmware bugs are still lurking out there that it's safest not to * venture into the 64-bit space until necessary. * * If your device goes wrong after seeing the notice then likely either * its driver is not setting DMA masks accurately, the hardware has * some inherent bug in handling >32-bit addresses, or not all the * expected address bits are wired up between the device and the IOMMU.
*/ if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false); if (iova) goto done;
/* The MSI case is only ever cleaning up its most recent allocation */ if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI)
domain->msi_cookie->msi_iova -= size; elseif (gather && gather->queued)
queue_iova(domain->iova_cookie, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
&gather->freelist); else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
}
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain)) return DMA_MAPPING_ERROR;
/* If anyone ever wants this we'd need support in the IOVA allocator */ if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad), "Unsupported alignment constraint\n")) return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR;
order_mask &= GENMASK(MAX_PAGE_ORDER, 0); if (!order_mask) return NULL;
pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL;
/* IOMMU can map any pages, so himem can also be used here */
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
while (count) { struct page *page = NULL; unsignedint order_size;
/* * Higher-order allocations are a convenience rather * than a necessity, hence using __GFP_NORETRY until * falling back to minimum-order allocations.
*/ for (order_mask &= GENMASK(__fls(count), 0);
order_mask; order_mask &= ~order_size) { unsignedint order = __fls(order_mask);
gfp_t alloc_flags = gfp;
order_size = 1U << order; if (order_mask > order_size)
alloc_flags |= __GFP_NORETRY;
page = alloc_pages_node(nid, alloc_flags, order); if (!page) continue; if (order)
split_page(page, order); break;
} if (!page) {
__iommu_dma_free_pages(pages, i); return NULL;
}
count -= order_size; while (order_size--)
pages[i++] = page++;
} return pages;
}
/* * If size is less than PAGE_SIZE, then a full CPU page will be allocated, * but an IOMMU which supports smaller pages might not map the whole thing.
*/ staticstruct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, struct sg_table *sgt, gfp_t gfp, unsignedlong attrs)
{ struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); unsignedint count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages;
dma_addr_t iova;
ssize_t ret;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain)) return NULL;
/* * Remove the zone/policy flags from the GFP - these are applied to the * __iommu_dma_alloc_pages() but are not used for the supporting * internal allocations that follow.
*/
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp)) goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) { struct scatterlist *sg; int i;
/* * This is the actual return value from the iommu_dma_alloc_noncontiguous. * * The users of the DMA API should only care about the sg_table, but to make * the DMA-API internal vmaping and freeing easier we stash away the page * array as well (except for the fallback case). This can go away any time, * e.g. when a vmap-variant that takes a scatterlist comes along.
*/ struct dma_sgt_handle { struct sg_table sgt; struct page **pages;
}; #define sgt_handle(sgt) \
container_of((sgt), struct dma_sgt_handle, sgt)
/* * Untrusted devices should not see padding areas with random leftover * kernel data, so zero the pre- and post-padding. * swiotlb_tbl_map_single() has initialized the bounce buffer proper to * the contents of the original memory buffer.
*/ if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
size_t start, virt = (size_t)phys_to_virt(phys);
/* * Checks if a physical buffer has unaligned boundaries with respect to * the IOMMU granule. Returns non-zero if either the start or end * address is not aligned to the granule boundary.
*/ staticinline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
size_t size)
{ return iova_offset(iovad, phys | size);
}
/* * If both the physical buffer start address and size are page aligned, * we don't need to use a bounce page.
*/ if (dev_use_swiotlb(dev, size, dir) &&
iova_unaligned(iovad, phys, size)) {
phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs); if (phys == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR;
}
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
/* * Prepare a successfully-mapped scatterlist to give back to the caller. * * At this point the segments are already laid out by iommu_dma_map_sg() to * avoid individually crossing any boundaries, so we merely need to check a * segment's start address to avoid concatenating across one.
*/ staticint __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr)
{ struct scatterlist *s, *cur = sg; unsignedlong seg_mask = dma_get_seg_boundary(dev); unsignedint cur_len = 0, max_len = dma_get_max_seg_size(dev); int i, count = 0;
for_each_sg(sg, s, nents, i) { /* Restore this segment's original unaligned fields first */
dma_addr_t s_dma_addr = sg_dma_address(s); unsignedint s_iova_off = sg_dma_address(s); unsignedint s_length = sg_dma_len(s); unsignedint s_iova_len = s->length;
/* * Now fill in the real DMA data. If... * - there is a valid output segment to append to * - and this segment starts on an IOVA page boundary * - but doesn't fall at a segment boundary * - and wouldn't make the resulting output segment too long
*/ if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
(max_len - cur_len >= s_length)) { /* ...then concatenate it with the previous one */
cur_len += s_length;
} else { /* Otherwise start the next output segment */ if (i > 0)
cur = sg_next(cur);
cur_len = s_length;
count++;
/* * If mapping failed, then just restore the original list, * but making sure the DMA fields are invalidated.
*/ staticvoid __invalidate_sg(struct scatterlist *sg, int nents)
{ struct scatterlist *s; int i;
/* * The DMA API client is passing in a scatterlist which could describe * any old buffer layout, but the IOMMU API requires everything to be * aligned to IOMMU pages. Hence the need for this complicated bit of * impedance-matching, to be able to hand off a suitably-aligned list, * but still preserve the original offsets and sizes for the caller.
*/ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsignedlong attrs)
{ struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); struct pci_p2pdma_map_state p2pdma_state = {};
dma_addr_t iova;
size_t iova_len = 0; unsignedlong mask = dma_get_seg_boundary(dev);
ssize_t ret; int i;
if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
ret = iommu_deferred_attach(dev, domain); if (ret) goto out;
}
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
/* * Work out how much IOVA space we need, and align the segments to * IOVA granules for the IOMMU driver to handle. With some clever * trickery we can modify the list in-place, but reversibly, by * stashing the unaligned parts in the as-yet-unused DMA fields.
*/
for_each_sg(sg, s, nents, i) {
size_t s_iova_off = iova_offset(iovad, s->offset);
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(s))) { case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: /* * Mapping through host bridge should be mapped with * regular IOVAs, thus we do nothing here and continue * below.
*/ break; case PCI_P2PDMA_MAP_NONE: break; case PCI_P2PDMA_MAP_BUS_ADDR: /* * iommu_map_sg() will skip this segment as it is marked * as a bus address, __finalise_sg() will copy the dma * address into the output segment.
*/
s->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state,
sg_phys(s));
sg_dma_len(s) = sg->length;
sg_dma_mark_bus_address(s); continue; default:
ret = -EREMOTEIO; goto out_restore_sg;
}
/* * Due to the alignment of our single IOVA allocation, we can * depend on these assumptions about the segment boundary mask: * - If mask size >= IOVA size, then the IOVA range cannot * possibly fall across a boundary, so we don't care. * - If mask size < IOVA size, then the IOVA range must start * exactly on a boundary, therefore we can lay things out * based purely on segment lengths without needing to know * the actual addresses beforehand. * - The mask must be a power of 2, so pad_len == 0 if * iova_len == 0, thus we cannot dereference prev the first * time through here (i.e. before it has a meaningful value).
*/ if (pad_len && pad_len < s_length - 1) {
prev->length += pad_len;
iova_len += pad_len;
}
iova_len += s_length;
prev = s;
}
if (!iova_len) return __finalise_sg(dev, sg, nents, 0);
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) {
ret = -ENOMEM; goto out_restore_sg;
}
/* * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do.
*/
ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); if (ret < 0 || ret < iova_len) goto out_free_iova;
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsignedlong attrs)
{
dma_addr_t end = 0, start; struct scatterlist *tmp; int i;
if (sg_dma_is_swiotlb(sg)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); return;
}
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
/* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, the start and end points * just have to be determined.
*/
for_each_sg(sg, tmp, nents, i) { if (sg_dma_is_bus_address(tmp)) {
sg_dma_unmark_bus_address(tmp); continue;
}
if (is_vmalloc_addr(cpu_addr)) { /* * If it the address is remapped, then it's either non-coherent * or highmem CMA, or an iommu_dma_alloc_remap() construction.
*/
pages = dma_common_find_pages(cpu_addr); if (!pages)
page = vmalloc_to_page(cpu_addr);
dma_common_free_remap(cpu_addr, alloc_size);
} else { /* Lowmem means a coherent atomic or CMA allocation */
page = virt_to_page(cpu_addr);
}
if (pages)
__iommu_dma_free_pages(pages, count); if (page)
dma_free_contiguous(dev, page, alloc_size);
}
size_t iommu_dma_max_mapping_size(struct device *dev)
{ if (dev_is_untrusted(dev)) return swiotlb_max_mapping_size(dev);
return SIZE_MAX;
}
/** * dma_iova_try_alloc - Try to allocate an IOVA space * @dev: Device to allocate the IOVA space for * @state: IOVA state * @phys: physical address * @size: IOVA size * * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space * for the given base address and size. * * Note: @phys is only used to calculate the IOVA alignment. Callers that always * do PAGE_SIZE aligned transfers can safely pass 0 here. * * Returns %true if the IOVA-based DMA API can be used and IOVA space has been * allocated, or %false if the regular DMA API should be used.
*/ bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
phys_addr_t phys, size_t size)
{ struct iommu_dma_cookie *cookie; struct iommu_domain *domain; struct iova_domain *iovad;
size_t iova_off;
dma_addr_t addr;
memset(state, 0, sizeof(*state)); if (!use_dma_iommu(dev)) returnfalse;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev))) returnfalse;
if (WARN_ON_ONCE(!size)) returnfalse;
/* * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu * internals, make sure that caller didn't set it and/or * didn't use this interface to map SIZE_MAX.
*/ if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB)) returnfalse;
/** * dma_iova_free - Free an IOVA space * @dev: Device to free the IOVA space for * @state: IOVA state * * Undoes a successful dma_try_iova_alloc(). * * Note that all dma_iova_link() calls need to be undone first. For callers * that never call dma_iova_unlink(), dma_iova_destroy() can be used instead * which unlinks all ranges and frees the IOVA space in a single efficient * operation.
*/ void dma_iova_free(struct device *dev, struct dma_iova_state *state)
{ struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad;
size_t iova_start_pad = iova_offset(iovad, state->addr);
size_t size = dma_iova_size(state);
/** * dma_iova_link - Link a range of IOVA space * @dev: DMA device * @state: IOVA state * @phys: physical address to link * @offset: offset into the IOVA state to map into * @size: size of the buffer * @dir: DMA direction * @attrs: attributes of mapping properties * * Link a range of IOVA space for the given IOVA state without IOTLB sync. * This function is used to link multiple physical addresses in contiguous * IOVA space without performing costly IOTLB sync. * * The caller is responsible to call to dma_iova_sync() to sync IOTLB at * the end of linkage.
*/ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
phys_addr_t phys, size_t offset, size_t size, enum dma_data_direction dir, unsignedlong attrs)
{ struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad;
size_t iova_start_pad = iova_offset(iovad, phys);
if (WARN_ON_ONCE(iova_start_pad && offset > 0)) return -EIO;
/** * dma_iova_sync - Sync IOTLB * @dev: DMA device * @state: IOVA state * @offset: offset into the IOVA state to sync * @size: size of the buffer * * Sync IOTLB for the given IOVA state. This function should be called on * the IOVA-contiguous range created by one ore more dma_iova_link() calls * to sync the IOTLB.
*/ int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
size_t offset, size_t size)
{ struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad;
dma_addr_t addr = state->addr + offset;
size_t iova_start_pad = iova_offset(iovad, addr);
if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather); if (free_iova)
iommu_dma_free_iova(domain, addr, size, &iotlb_gather);
}
/** * dma_iova_unlink - Unlink a range of IOVA space * @dev: DMA device * @state: IOVA state * @offset: offset into the IOVA state to unlink * @size: size of the buffer * @dir: DMA direction * @attrs: attributes of mapping properties * * Unlink a range of IOVA space for the given IOVA state.
*/ void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
size_t offset, size_t size, enum dma_data_direction dir, unsignedlong attrs)
{
__iommu_dma_iova_unlink(dev, state, offset, size, dir, attrs, false);
}
EXPORT_SYMBOL_GPL(dma_iova_unlink);
/** * dma_iova_destroy - Finish a DMA mapping transaction * @dev: DMA device * @state: IOVA state * @mapped_len: number of bytes to unmap * @dir: DMA direction * @attrs: attributes of mapping properties * * Unlink the IOVA range up to @mapped_len and free the entire IOVA space. The * range of IOVA from dma_addr to @mapped_len must all be linked, and be the * only linked IOVA in state.
*/ void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
size_t mapped_len, enum dma_data_direction dir, unsignedlong attrs)
{ if (mapped_len)
__iommu_dma_iova_unlink(dev, state, 0, mapped_len, dir, attrs, true); else /* * We can be here if first call to dma_iova_link() failed and * there is nothing to unlink, so let's be more clear.
*/
dma_iova_free(dev, state);
}
EXPORT_SYMBOL_GPL(dma_iova_destroy);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.