// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> * * This code provides a IOMMU for Xen PV guests with PCI passthrough. * * PV guests under Xen are running in an non-contiguous memory architecture. * * When PCI pass-through is utilized, this necessitates an IOMMU for * translating bus (DMA) to virtual and vice-versa and also providing a * mechanism to have contiguous pages for device drivers operations (say DMA * operations). * * Specifically, under Xen the Linux idea of pages is an illusion. It * assumes that pages start at zero and go up to the available memory. To * help with that, the Linux Xen MMU provides a lookup mechanism to * translate the page frame numbers (PFN) to machine frame numbers (MFN) * and vice-versa. The MFN are the "real" frame numbers. Furthermore * memory is not contiguous. Xen hypervisor stitches memory for guests * from different pools, which means there is no guarantee that PFN==MFN * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are * allocated in descending order (high to low), meaning the guest might * never get any MFN's under the 4GB mark.
*/
/* If the address is outside our domain, it CAN * have the same virtual address as another address * in our domain. Therefore _only_ check address within our domain.
*/ if (pfn_valid(PFN_DOWN(paddr))) return swiotlb_find_pool(dev, paddr); return NULL;
}
#ifdef CONFIG_X86 int __init xen_swiotlb_fixup(void *buf, unsignedlong nslabs)
{ int rc; unsignedint order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); unsignedint i, dma_bits = order + PAGE_SHIFT;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
i = 0; do { do {
rc = xen_create_contiguous_region(
p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
} while (rc && dma_bits++ < MAX_DMA_BITS); if (rc) return rc;
i += IO_TLB_SEGSIZE;
} while (i < nslabs); return 0;
}
if (TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
free_pages((unsignedlong)vaddr, get_order(size));
} #endif/* CONFIG_X86 */
/* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
*/ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, unsignedlong offset, size_t size, enum dma_data_direction dir, unsignedlong attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it.
*/ if (dma_capable(dev, dev_addr, size, true) &&
!dma_kmalloc_needs_bounce(dev, size, dir) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
!is_swiotlb_force_bounce(dev)) goto done;
/* * Oh well, have to allocate and map a bounce buffer.
*/
trace_swiotlb_bounced(dev, dev_addr, size);
/* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous xen_swiotlb_map_page call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there.
*/ staticvoid xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsignedlong attrs)
{
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr); struct io_tlb_pool *pool;
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
arch_sync_dma_for_cpu(paddr, size, dir); else
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
}
/* NOTE: We use dev_addr here, not paddr! */
pool = xen_swiotlb_find_pool(hwdev, dev_addr); if (pool)
__swiotlb_tbl_unmap_single(hwdev, paddr, size, dir,
attrs, pool);
}
pool = xen_swiotlb_find_pool(dev, dma_addr); if (pool)
__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
if (!dev_is_dma_coherent(dev)) { if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
arch_sync_dma_for_device(paddr, size, dir); else
xen_dma_sync_for_device(dev, dma_addr, size, dir);
}
}
/* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_page() above.
*/ staticvoid
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsignedlong attrs)
{ struct scatterlist *sg; int i;
/* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask to * this function.
*/ staticint
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{ return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.