rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
rk_obj->pages, rk_obj->num_pages); if (IS_ERR(rk_obj->sgt)) {
ret = PTR_ERR(rk_obj->sgt); goto err_put_pages;
}
/* * Fake up the SG table so that dma_sync_sg_for_device() can be used * to flush the pages associated with it. * * TODO: Replace this by drm_clflush_sg() once it can be implemented * without relying on symbols that are not exported.
*/
for_each_sgtable_sg(rk_obj->sgt, s, i)
sg_dma_address(s) = sg_phys(s);
/* * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the * whole buffer from the start.
*/
vma->vm_pgoff = 0;
/* * We allocated a struct page table for rk_obj, so clear * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
/* * rockchip_gem_create_with_handle - allocate an object with the given * size and create a gem handle on it * * returns a struct rockchip_gem_object* on success or ERR_PTR values * on failure.
*/ staticstruct rockchip_gem_object *
rockchip_gem_create_with_handle(struct drm_file *file_priv, struct drm_device *drm, unsignedint size, unsignedint *handle)
{ struct rockchip_gem_object *rk_obj; struct drm_gem_object *obj; bool is_framebuffer; int ret;
rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer); if (IS_ERR(rk_obj)) return ERR_CAST(rk_obj);
obj = &rk_obj->base;
/* * allocate a id of idr table where the obj is registered * and handle has the id what user can see.
*/
ret = drm_gem_handle_create(file_priv, obj, handle); if (ret) goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(obj);
return rk_obj;
err_handle_create:
rockchip_gem_free_object(obj);
return ERR_PTR(ret);
}
/* * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback * function * * This aligns the pitch and size arguments to the minimum required. wrap * this into your own function if you need bigger alignment.
*/ int rockchip_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args)
{ struct rockchip_gem_object *rk_obj; int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
/* * align to 64 bytes since Mali requires it.
*/
args->pitch = ALIGN(min_pitch, 64);
args->size = args->pitch * args->height;
/* * Allocate a sg_table for this GEM object. * Note: Both the table's contents, and the sg_table itself must be freed by * the caller. * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
*/ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; struct sg_table *sgt; int ret;
if (rk_obj->pages) return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
rk_obj->dma_addr, obj->size,
rk_obj->dma_attrs); if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt); return ERR_PTR(ret);
}
if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.