for (i = 1; i < dma->nr_pages; i++) {
size = min_t(size_t, PAGE_SIZE, remaining_size);
remaining_size -= size;
addr = dma_map_page(dev,
dma->pages[i],
0,
size,
dma->direction); if (unlikely(dma_mapping_error(dev, addr))) {
__free_page(dma->pages[i]); return -EIO;
}
/* * Compress SG list entry when pages are contiguous * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
*/ if ((addr == (sg_addr + transfer_size)) &&
((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) { /* pages are contiguous, add to same sg entry */
transfer_size += size;
} else { /* pages are not contiguous, write sg entry */
sgdata->size = transfer_size;
put_unaligned(sg_addr, (u64 *)&sgdata->address);
dma->sglist[SGLIST_NUM_SG]++;
/* start new sg entry */
sgdata++;
sg_addr = addr;
transfer_size = size;
}
} /* Write last sg list entry */
sgdata->size = transfer_size;
put_unaligned(sg_addr, (u64 *)&sgdata->address);
dma->sglist[SGLIST_NUM_SG]++;
/* Update pointers and size field to point to sglist */
put_unaligned((u64)dma->handle, &vkdata->address);
vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
(sizeof(u32) * SGLIST_VKDATA_START);
#ifdef BCM_VK_DUMP_SGLIST
dev_dbg(dev, "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
(u64)dma->sglist,
dma->handle,
dma->sglen,
vkdata->size); for (i = 0; i < vkdata->size / sizeof(u32); i++)
dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]); #endif
return 0;
}
int bcm_vk_sg_alloc(struct device *dev, struct bcm_vk_dma *dma, int dir, struct _vk_data *vkdata, int num)
{ int i; int rc = -EINVAL;
/* Convert user addresses to DMA SG List */ for (i = 0; i < num; i++) { if (vkdata[i].size && vkdata[i].address) { /* * If both size and address are non-zero * then DMA alloc.
*/
rc = bcm_vk_dma_alloc(dev,
&dma[i],
dir,
&vkdata[i]);
} elseif (vkdata[i].size ||
vkdata[i].address) { /* * If one of size and address are zero * there is a problem.
*/
dev_err(dev, "Invalid vkdata %x 0x%x 0x%llx\n",
i, vkdata[i].size, vkdata[i].address);
rc = -EINVAL;
} else { /* * If size and address are both zero * don't convert, but return success.
*/
rc = 0;
}
if (rc) goto fail_alloc;
} return rc;
fail_alloc: while (i > 0) {
i--; if (dma[i].sglist)
bcm_vk_dma_free(dev, &dma[i]);
} return rc;
}
staticint bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
{
dma_addr_t addr; int i; int num_sg;
u32 size; struct _vk_data *vkdata;
int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num, int *proc_cnt)
{ int i;
*proc_cnt = 0; /* Unmap and free all pages and sglists */ for (i = 0; i < num; i++) { if (dma[i].sglist) {
bcm_vk_dma_free(dev, &dma[i]);
*proc_cnt += 1;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.