/* * Xen dma-buf functionality for gntdev. * * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c. * * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
*/
struct gntdev_dmabuf_priv { /* List of exported DMA buffers. */ struct list_head exp_list; /* List of wait objects. */ struct list_head exp_wait_list; /* List of imported DMA buffers. */ struct list_head imp_list; /* This is the lock which protects dma_buf_xxx lists. */ struct mutex lock; /* * We reference this file while exporting dma-bufs, so * the grant device context is not destroyed while there are * external users alive.
*/ struct file *filp;
};
/* DMA buffer export support. */
/* Implementation of wait for exported DMA buffer to be released. */
mutex_lock(&priv->lock);
list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next) if (gntdev_dmabuf->fd == fd) {
pr_debug("Found gntdev_dmabuf in the wait list\n");
kref_get(&gntdev_dmabuf->u.exp.refcount);
ret = gntdev_dmabuf; break;
}
mutex_unlock(&priv->lock); return ret;
}
staticint dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd, int wait_to_ms)
{ struct gntdev_dmabuf *gntdev_dmabuf; struct gntdev_dmabuf_wait_obj *obj; int ret;
pr_debug("Will wait for dma-buf with fd %d\n", fd); /* * Try to find the DMA buffer: if not found means that * either the buffer has already been released or file descriptor * provided is wrong.
*/
gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd); if (IS_ERR(gntdev_dmabuf)) return PTR_ERR(gntdev_dmabuf);
/* * gntdev_dmabuf still exists and is reference count locked by us now, * so prepare to wait: allocate wait object and add it to the wait list, * so we can find it on release.
*/
obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf); if (IS_ERR(obj)) return PTR_ERR(obj);
ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
dmabuf_exp_wait_obj_free(priv, obj); return ret;
}
pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
attach->dev);
if (dir == DMA_NONE || !gntdev_dmabuf_attach) return ERR_PTR(-EINVAL);
/* Return the cached mapping when possible. */ if (gntdev_dmabuf_attach->dir == dir) return gntdev_dmabuf_attach->sgt;
/* * Two mappings with different directions for the same attachment are * not allowed.
*/ if (gntdev_dmabuf_attach->dir != DMA_NONE) return ERR_PTR(-EBUSY);
sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
gntdev_dmabuf->nr_pages); if (!IS_ERR(sgt)) { if (dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC)) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
} else {
gntdev_dmabuf_attach->sgt = sgt;
gntdev_dmabuf_attach->dir = dir;
}
} if (IS_ERR(sgt))
pr_debug("Failed to map sg table for dev %p\n", attach->dev); return sgt;
}
staticvoid dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir)
{ /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
}
attach = dma_buf_attach(dma_buf, dev); if (IS_ERR(attach)) {
ret = ERR_CAST(attach); goto fail_free_obj;
}
gntdev_dmabuf->u.imp.attach = attach;
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) {
ret = ERR_CAST(sgt); goto fail_detach;
}
/* Check that we have zero offset. */ if (sgt->sgl->offset) {
ret = ERR_PTR(-EINVAL);
pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
sgt->sgl->offset); goto fail_unmap;
}
/* Check number of pages that imported buffer has. */ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
ret = ERR_PTR(-EINVAL);
pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
attach->dmabuf->size, gntdev_dmabuf->nr_pages); goto fail_unmap;
}
gntdev_dmabuf->u.imp.sgt = sgt;
gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL); if (!gfns) {
ret = ERR_PTR(-ENOMEM); goto fail_unmap;
}
/* * Now convert sgt to array of gfns without accessing underlying pages. * It is not allowed to access the underlying struct page of an sg table * exported by DMA-buf, but since we deal with special Xen dma device here * (not a normal physical one) look at the dma addresses in the sg table * and then calculate gfns directly from them.
*/
i = 0;
for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
dma_addr_t addr = sg_page_iter_dma_address(&sg_iter); unsignedlong pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
gfns[i++] = pfn_to_gfn(pfn);
}
ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
gntdev_dmabuf->u.imp.refs,
count, domid));
kfree(gfns); if (IS_ERR(ret)) goto fail_end_access;
/* * Find the hyper dma-buf by its file descriptor and remove * it from the buffer's list.
*/ staticstruct gntdev_dmabuf *
dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
{ struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
mutex_lock(&priv->lock);
list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) { if (gntdev_dmabuf->fd == fd) {
pr_debug("Found gntdev_dmabuf in the import list\n");
ret = gntdev_dmabuf;
list_del(&gntdev_dmabuf->next); break;
}
}
mutex_unlock(&priv->lock); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.