if (dobj->page) { /* page backed memory */ unsignedint order = get_order(dobj->obj.size);
__free_pages(dobj->page, order);
} elseif (dobj->linear) { /* linear backed memory */
mutex_lock(&priv->linear_lock);
drm_mm_remove_node(dobj->linear);
mutex_unlock(&priv->linear_lock);
kfree(dobj->linear); if (dobj->addr)
iounmap(dobj->addr);
}
if (dobj->obj.import_attach) { /* We only ever display imported data */ if (dobj->sgt)
dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
/* * If it is a small allocation (typically cursor, which will * be 32x64 or 64x32 ARGB pixels) try to get it from the system. * Framebuffers will never be this small (our minimum size for * framebuffers is larger than this anyway.) Such objects are * only accessed by the CPU so we don't need any special handing * here.
*/ if (size <= 8192) { unsignedint order = get_order(size); struct page *p = alloc_pages(GFP_KERNEL, order);
/* * We could grab something from DMA if it's enabled, but that * involves building in a problem: * * GEM DMA helper interface uses dma_alloc_coherent(), which provides * us with an CPU virtual address and a device address. * * The CPU virtual address may be either an address in the kernel * direct mapped region (for example, as it would be on x86) or * it may be remapped into another part of kernel memory space * (eg, as it would be on ARM.) This means virt_to_phys() on the * returned virtual address is invalid depending on the architecture * implementation. * * The device address may also not be a physical address; it may * be that there is some kind of remapping between the device and * system RAM, which makes the use of the device address also * unsafe to re-use as a physical address. * * This makes DRM usage of dma_alloc_coherent() in a generic way * at best very questionable and unsafe.
*/
/* Otherwise, grab it from our linear allocation */ if (!obj->page) { struct drm_mm_node *node; unsigned align = min_t(unsigned, size, SZ_2M); void __iomem *ptr; int ret;
node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOSPC;
mutex_lock(&priv->linear_lock);
ret = drm_mm_insert_node_generic(&priv->linear, node,
size, align, 0, 0);
mutex_unlock(&priv->linear_lock); if (ret) {
kfree(node); return ret;
}
obj->linear = node;
/* Ensure that the memory we're returning is cleared. */
ptr = ioremap_wc(obj->linear->start, size); if (!ptr) {
mutex_lock(&priv->linear_lock);
drm_mm_remove_node(obj->linear);
mutex_unlock(&priv->linear_lock);
kfree(obj->linear);
obj->linear = NULL; return -ENOMEM;
}
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
(unsignedlonglong)obj->phys_addr,
(unsignedlonglong)obj->dev_addr);
return 0;
}
void *
armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
{ /* only linear objects need to be ioremap'd */ if (!dobj->addr && dobj->linear)
dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size); return dobj->addr;
}
/* * Don't call dma_buf_map_attachment() here - it maps the * scatterlist immediately for DMA, and this is not always * an appropriate thing to do.
*/ return &dobj->obj;
}
int armada_gem_map_import(struct armada_gem_object *dobj)
{ int ret;
dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
DMA_TO_DEVICE); if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;
DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret); return ret;
} if (dobj->sgt->nents > 1) {
DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n"); return -EINVAL;
} if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n"); return -EINVAL;
}
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
dobj->mapped = true; return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.