buffers = buflout->buffers; for (i = 0; i < n; i++)
buffers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sglout, sg, n_sglout, i) { int y = sg_nctr;
if (!sg->length) continue;
if (left >= sg->length) {
left -= sg->length; continue;
}
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, buffers[y].addr))) goto err_out;
buffers[y].len = sg->length;
sg_nctr++; if (left) {
buffers[y].len -= left;
left = 0;
}
} if (extra_buff) {
buffers[sg_nctr].addr = extra_dst_buff;
buffers[sg_nctr].len = sz_extra_dst_buff;
}
buflout->num_bufs = sg_nctr;
buflout->num_bufs += extra_buff;
buflout->num_mapped_bufs = sg_nctr;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, bloutp))) goto err_out;
buf->blout = buflout;
buf->bloutp = bloutp;
buf->sz_out = sz_out;
} else { /* Otherwise set the src and dst to the same address */
buf->bloutp = buf->blp;
buf->sz_out = 0;
} return 0;
err_out: if (!dma_mapping_error(dev, bloutp))
dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
n = sg_nents(sglout); for (i = 0; i < n; i++) { if (buflout->buffers[i].addr == extra_dst_buff) break; if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->buffers[i].len,
DMA_BIDIRECTIONAL);
}
if (!buf->sgl_dst_valid)
kfree(buflout);
err_in: if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->buffers[i].addr))
dma_unmap_single(dev, bufl->buffers[i].addr,
bufl->buffers[i].len,
bufl_dma_dir);
if (!buf->sgl_src_valid)
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.