/* * If we are entirely done with this bi_io_vec entry, check if the next * one could be merged into it. This typically happens when moving to * the next bio, but some callers also don't pack bvecs tight.
*/ while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) { struct bio_vec next;
if (!iter->iter.bi_size) { if (!iter->bio->bi_next) break;
iter->bio = iter->bio->bi_next;
iter->iter = iter->bio->bi_iter;
}
next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); if (bv.bv_len + next.bv_len > max_size ||
!biovec_phys_mergeable(req->q, &bv, &next)) break;
/* * The IOVA-based DMA API wants to be able to coalesce at the minimal IOMMU page * size granularity (which is guaranteed to be <= PAGE_SIZE and usually 4k), so * we need to ensure our segments are aligned to this as well. * * Note that there is no point in using the slightly more complicated IOVA based * path for single segment mappings.
*/ staticinlinebool blk_can_dma_map_iova(struct request *req, struct device *dma_dev)
{ return !((queue_virt_boundary(req->q) + 1) &
dma_get_merge_boundary(dma_dev));
}
/** * blk_rq_dma_map_iter_start - map the first DMA segment for a request * @req: request to map * @dma_dev: device to map to * @state: DMA IOVA state * @iter: block layer DMA iterator * * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the * caller and don't need to be initialized. @state needs to be stored for use * at unmap time, @iter is only needed at map time. * * Returns %false if there is no segment to map, including due to an error, or * %true ft it did map a segment. * * If a segment was mapped, the DMA address for it is returned in @iter.addr and * the length in @iter.len. If no segment was mapped the status code is * returned in @iter.status. * * The caller can call blk_rq_dma_map_coalesce() to check if further segments * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next() * to try to map the following segments.
*/ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, struct dma_iova_state *state, struct blk_dma_iter *iter)
{ unsignedint total_len = blk_rq_payload_bytes(req); struct phys_vec vec;
/* * Grab the first segment ASAP because we'll need it to check for P2P * transfers.
*/ if (!blk_map_iter_next(req, &iter->iter, &vec)) returnfalse;
if (IS_ENABLED(CONFIG_PCI_P2PDMA) && (req->cmd_flags & REQ_P2PDMA)) { switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
phys_to_page(vec.paddr))) { case PCI_P2PDMA_MAP_BUS_ADDR: return blk_dma_map_bus(iter, &vec); case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: /* * P2P transfers through the host bridge are treated the * same as non-P2P transfers below and during unmap.
*/
req->cmd_flags &= ~REQ_P2PDMA; break; default:
iter->status = BLK_STS_INVAL; returnfalse;
}
}
/** * blk_rq_dma_map_iter_next - map the next DMA segment for a request * @req: request to map * @dma_dev: device to map to * @state: DMA IOVA state * @iter: block layer DMA iterator * * Iterate to the next mapping after a previous call to * blk_rq_dma_map_iter_start(). See there for a detailed description of the * arguments. * * Returns %false if there is no segment to map, including due to an error, or * %true ft it did map a segment. * * If a segment was mapped, the DMA address for it is returned in @iter.addr and * the length in @iter.len. If no segment was mapped the status code is * returned in @iter.status.
*/ bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, struct dma_iova_state *state, struct blk_dma_iter *iter)
{ struct phys_vec vec;
if (!blk_map_iter_next(req, &iter->iter, &vec)) returnfalse;
/* * If the driver previously mapped a shorter list, we could see a * termination bit prematurely unless it fully inits the sg table * on each mapping. We KNOW that there must be more entries here * or the driver would be buggy, so force clear the termination bit * to avoid doing a full sg_init_table() in drivers for each command.
*/
sg_unmark_end(*sg); return sg_next(*sg);
}
/* * Map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries.
*/ int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, struct scatterlist **last_sg)
{ struct req_iterator iter = {
.bio = rq->bio,
}; struct phys_vec vec; int nsegs = 0;
/* the internal flush request may not have bio attached */ if (iter.bio)
iter.iter = iter.bio->bi_iter;
/* * Something must have been wrong if the figured number of * segment is bigger than number of req's physical segments
*/
WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.