/* * For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent.
*/ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* * For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent: * * WARNING: The DMA API does not support concurrent CPU * and device access to the memory area. With BIDIRECTIONAL, * we will clean the cache lines which overlap the region, * and invalidate all cache lines (partially) contained in * the region. * * If you have dirty data in the overlapping cache lines, * that will corrupt the GPU-written data. If you have * written into the remainder of the region, this can * discard those writes.
*/ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* called with etnaviv_obj->lock held */ staticint etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{ struct drm_device *dev = etnaviv_obj->base.dev; struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
if (IS_ERR(p)) {
dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); return PTR_ERR(p);
}
sgt = drm_prime_pages_to_sg(dev, etnaviv_obj->pages, npages); if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt)); return ERR_CAST(sgt);
}
etnaviv_obj->sgt = sgt;
etnaviv_gem_scatter_map(etnaviv_obj);
}
return etnaviv_obj->pages;
}
void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
{
lockdep_assert_held(&etnaviv_obj->lock); /* when we start tracking the pin count, then do something here */
}
if (etnaviv_obj->flags & ETNA_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
} elseif (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
vma->vm_page_prot = pgprot_noncached(vm_page_prot);
} else { /* * Shunt off cached objs to shmem file so they have their own * address_space (so unmap_mapping_range does what we want, * in particular in the case of mmap'd dmabufs)
*/
vma->vm_pgoff = 0;
vma_set_file(vma, etnaviv_obj->base.filp);
/* * Make sure we don't parallel update on a fault, nor move or remove * something from beneath our feet. Note that vmf_insert_page() is * specifically coded to take care of this, so we don't have to.
*/
err = mutex_lock_interruptible(&etnaviv_obj->lock); if (err) return VM_FAULT_NOPAGE; /* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
err = PTR_ERR(pages); return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
{ int ret;
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj); if (ret)
dev_err(obj->dev->dev, "could not allocate mmap offset\n"); else
*offset = drm_vma_node_offset_addr(&obj->vma_node);
mutex_lock(&etnaviv_obj->lock);
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); if (mapping) { /* * Holding the object lock prevents the use count changing * beneath us. If the use count is zero, the MMU might be * reaping this object, so take the lock and re-check that * the MMU owns this mapping to close this race.
*/ if (mapping->use == 0) {
mutex_lock(&mmu_context->lock); if (mapping->context == mmu_context) if (va && mapping->iova != va) {
etnaviv_iommu_reap_mapping(mapping);
mapping = NULL;
} else {
mapping->use += 1;
} else
mapping = NULL;
mutex_unlock(&mmu_context->lock); if (mapping) goto out;
} else {
mapping->use += 1; goto out;
}
}
pages = etnaviv_gem_get_pages(etnaviv_obj); if (IS_ERR(pages)) {
ret = PTR_ERR(pages); goto out;
}
/* * See if we have a reaped vram mapping we can re-use before * allocating a fresh mapping.
*/
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); if (!mapping) {
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) {
ret = -ENOMEM; goto out;
}
if (etnaviv_obj->vaddr) return etnaviv_obj->vaddr;
mutex_lock(&etnaviv_obj->lock); /* * Need to check again, as we might have raced with another thread * while waiting for the mutex.
*/ if (!etnaviv_obj->vaddr)
etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (etnaviv_obj->flags & ETNA_BO_CACHED) { /* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}
ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size)); if (ret) goto fail;
/* * Our buffers are kept pinned, so allocating them from the MOVABLE * zone is a really bad idea, and conflicts with CMA. See comments * above new_inode() why this is required _and_ expected if you're * going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
fail:
drm_gem_object_put(obj);
return ret;
}
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, conststruct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
{ struct drm_gem_object *obj; int ret;
ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj); if (ret) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.