/** * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir. * @ioc: The I/O Controller. * @startsg: The scatter/gather list of coalesced chunks. * @nents: The number of entries in the scatter/gather list. * @hint: The DMA Hint. * * This function inserts the coalesced scatter/gather list chunks into the * I/O Controller's I/O Pdir.
*/ staticinlineunsignedint
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, unsignedlong hint, void (*iommu_io_pdir_entry)(__le64 *, space_t, unsignedlong, unsignedlong))
{ struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ unsignedint n_mappings = 0; unsignedlong dma_offset = 0, dma_len = 0;
__le64 *pdirp = NULL;
/* Horrible hack. For efficiency's sake, dma_sg starts one * entry below the true start (it is immediately incremented
* in the loop) */
dma_sg--;
while (nents-- > 0) { unsignedlong vaddr; long size;
dma_len = sg_dma_len(startsg);
sg_dma_len(startsg) = 0;
dma_offset = (unsignedlong) pide & ~IOVP_MASK;
n_mappings++; #ifdefined(ZX1_SUPPORT) /* Pluto IOMMU IO Virt Address is not zero based */
sg_dma_address(dma_sg) = pide | ioc->ibase; #else /* SBA, ccio, and dino are zero based. * Trying to save a few CPU cycles for most users.
*/
sg_dma_address(dma_sg) = pide; #endif
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
prefetchw(pdirp);
}
/* ** First pass is to walk the SG list and determine where the breaks are ** in the DMA stream. Allocates PDIR entries but does not fill them. ** Returns the number of DMA chunks. ** ** Doing the fill separate from the coalescing/allocation keeps the ** code simpler. Future enhancement could make one pass through ** the sglist do both.
*/
staticinlineunsignedint
iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, struct scatterlist *startsg, int nents, int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
{ struct scatterlist *contig_sg; /* contig chunk head */ unsignedlong dma_offset, dma_len; /* start/len of DMA stream */ unsignedint n_mappings = 0; unsignedint max_seg_size = min(dma_get_max_seg_size(dev),
(unsigned)DMA_CHUNK_SIZE); unsignedint max_seg_boundary = dma_get_seg_boundary(dev) + 1; if (max_seg_boundary) /* check if the addition above didn't overflow */
max_seg_size = min(max_seg_size, max_seg_boundary);
/* ** First make sure current dma stream won't ** exceed max_seg_size if we coalesce the ** next entry.
*/ if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
max_seg_size)) break;
/* * Next see if we can append the next chunk (i.e. * it must end on one page and begin on another, or * it must start on the same address as the previous * entry ended.
*/ if (unlikely((prev_end != sg_start) ||
((prev_end | sg_start) & ~PAGE_MASK))) break;
dma_len += startsg->length;
}
/* ** End of DMA Stream ** Terminate last VCONTIG block. ** Allocate space for DMA stream.
*/
sg_dma_len(contig_sg) = dma_len;
dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
sg_dma_address(contig_sg) =
PIDE_FLAG
| (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
| dma_offset;
n_mappings++;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.