// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018-2020 Christoph Hellwig. * * DMA operations that map physical memory directly without using an IOMMU.
*/ #include <linux/memblock.h> /* for max_pfn */ #include <linux/export.h> #include <linux/mm.h> #include <linux/dma-map-ops.h> #include <linux/scatterlist.h> #include <linux/pfn.h> #include <linux/vmalloc.h> #include <linux/set_memory.h> #include <linux/slab.h> #include <linux/pci-p2pdma.h> #include"direct.h"
/* * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use * it for entirely different regions. In that case the arch code needs to * override the variable below for dma-direct to work properly.
*/
u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
/* * Optimistically try the zone that the physical address mask falls * into first. If that returns memory that isn't actually addressable * we will fallback to the next lower zone and try again. * * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding * zones.
*/
*phys_limit = dma_to_phys(dev, dma_limit); if (*phys_limit <= zone_dma_limit) return GFP_DMA; if (*phys_limit <= DMA_BIT_MASK(32)) return GFP_DMA32; return 0;
}
if (!force_dma_unencrypted(dev)) return 0;
ret = set_memory_encrypted((unsignedlong)vaddr, PFN_UP(size)); if (ret)
pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); return ret;
}
/* * Check if a potentially blocking operations needs to dip into the atomic * pools for the given device/gfp.
*/ staticbool dma_direct_use_pool(struct device *dev, gfp_t gfp)
{ return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
}
if (!dev_is_dma_coherent(dev)) { if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
!is_swiotlb_for_alloc(dev)) return arch_dma_alloc(dev, size, dma_handle, gfp,
attrs);
/* * If there is a global pool, always allocate from it for * non-coherent devices.
*/ if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL)) return dma_alloc_from_global_coherent(dev, size,
dma_handle);
/* * Otherwise we require the architecture to either be able to * mark arbitrary parts of the kernel direct mapping uncached, * or remapped it uncached.
*/
set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); if (!set_uncached && !remap) {
pr_warn_once("coherent DMA allocations not supported on this platform.\n"); return NULL;
}
}
/* * Remapping or decrypting memory may block, allocate the memory from * the atomic pools instead if we aren't allowed block.
*/ if ((remap || force_dma_unencrypted(dev)) &&
dma_direct_use_pool(dev, gfp)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); if (!page) return NULL;
/* * dma_alloc_contiguous can return highmem pages depending on a * combination the cma= arguments and per-arch setup. These need to be * remapped to return a kernel virtual address.
*/ if (PageHighMem(page)) {
remap = true;
set_uncached = false;
}
if (remap) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
if (force_dma_unencrypted(dev))
prot = pgprot_decrypted(prot);
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, prot,
__builtin_return_address(0)); if (!ret) goto out_free_pages;
} else {
ret = page_address(page); if (dma_set_decrypted(dev, ret, size)) goto out_leak_pages;
}
memset(ret, 0, size);
if (set_uncached) {
arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size); if (IS_ERR(ret)) goto out_encrypt_pages;
}
if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
!dev_is_dma_coherent(dev)) { if (!dma_release_from_global_coherent(page_order, cpu_addr))
WARN_ON_ONCE(1); return;
}
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) return;
if (is_vmalloc_addr(cpu_addr)) {
vunmap(cpu_addr);
} else { if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
arch_dma_clear_uncached(cpu_addr, size); if (dma_set_encrypted(dev, cpu_addr, size)) return;
}
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
dma_free_from_pool(dev, vaddr, size)) return;
if (dma_set_encrypted(dev, vaddr, size)) return;
__dma_direct_free_pages(dev, page, size);
}
if (dir == DMA_FROM_DEVICE)
arch_dma_mark_clean(paddr, sg->length);
}
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu_all();
}
/* * Unmaps segments, except for ones marked as pci_p2pdma which do not * require any further action as they contain a bus address.
*/ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsignedlong attrs)
{ struct scatterlist *sg; int i;
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsignedlong attrs)
{ struct pci_p2pdma_map_state p2pdma_state = {}; struct scatterlist *sg; int i, ret;
for_each_sg(sgl, sg, nents, i) { switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(sg))) { case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: /* * Any P2P mapping that traverses the PCI host bridge * must be mapped with CPU physical address and not PCI * bus addresses.
*/ break; case PCI_P2PDMA_MAP_NONE:
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
sg->offset, sg->length, dir, attrs); if (sg->dma_address == DMA_MAPPING_ERROR) {
ret = -EIO; goto out_unmap;
} break; case PCI_P2PDMA_MAP_BUS_ADDR:
sg->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state,
sg_phys(sg));
sg_dma_mark_bus_address(sg); continue; default:
ret = -EREMOTEIO; goto out_unmap;
}
sg_dma_len(sg) = sg->length;
}
/* * Because 32-bit DMA masks are so common we expect every architecture * to be able to satisfy them - either by not supporting more physical * memory, or by providing a ZONE_DMA32. If neither is the case, the * architecture needs to use an IOMMU instead of the direct mapping.
*/ if (mask >= DMA_BIT_MASK(32)) return 1;
/* * This check needs to be against the actual bit mask value, so use * phys_to_dma_unencrypted() here so that the SME encryption mask isn't * part of the check.
*/ if (IS_ENABLED(CONFIG_ZONE_DMA))
min_mask = min_t(u64, min_mask, zone_dma_limit); return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
if (start_pfn >= cpu_start_pfn &&
start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) return m;
}
return NULL;
}
/* * To check whether all ram resource ranges are covered by dma range map * Returns 0 when further check is needed * Returns 1 if there is some RAM range can't be covered by dma_range_map
*/ staticint check_ram_in_range_map(unsignedlong start_pfn, unsignedlong nr_pages, void *data)
{ unsignedlong end_pfn = start_pfn + nr_pages; struct device *dev = data;
while (start_pfn < end_pfn) { conststruct bus_dma_region *bdr;
bdr = dma_find_range(dev, start_pfn); if (!bdr) return 1;
size_t dma_direct_max_mapping_size(struct device *dev)
{ /* If SWIOTLB is active, use its maximum mapping size */ if (is_swiotlb_active(dev) &&
(dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) return swiotlb_max_mapping_size(dev); return SIZE_MAX;
}
/** * dma_direct_set_offset - Assign scalar offset for a single DMA range. * @dev: device pointer; needed to "own" the alloced memory. * @cpu_start: beginning of memory region covered by this offset. * @dma_start: beginning of DMA/PCI region covered by this offset. * @size: size of the region. * * This is for the simple case of a uniform offset which cannot * be discovered by "dma-ranges". * * It returns -ENOMEM if out of memory, -EINVAL if a map * already exists, 0 otherwise. * * Note: any call to this from a driver is a bug. The mapping needs * to be described by the device tree or other firmware interfaces.
*/ int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
dma_addr_t dma_start, u64 size)
{ struct bus_dma_region *map;
u64 offset = (u64)cpu_start - (u64)dma_start;
if (dev->dma_range_map) {
dev_err(dev, "attempt to add DMA range to existing map\n"); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.