/* * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map * the whole buffer.
*/
vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
vma->vm_pgoff = 0;
/* * According to Xen on ARM ABI (xen/include/public/arch-arm.h): * all memory which is shared with other entities in the system * (including the hypervisor and other guests) must reside in memory * which is mapped as Normal Inner Write-Back Outer Write-Back * Inner-Shareable.
*/
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
/* * vm_operations_struct.fault handler will be called if CPU access * to VM is here. For GPUs this isn't the case, because CPU doesn't * touch the memory. Insert pages now, so both CPU and GPU are happy. * * FIXME: as we insert all the pages now then no .fault handler must * be called, so don't provide one
*/
ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); if (ret < 0)
DRM_ERROR("Failed to map pages into vma: %d\n", ret);
if (drm_info->front_info->cfg.be_alloc) { /* * backend will allocate space for this buffer, so * only allocate array of pointers to pages
*/
ret = gem_alloc_pages_array(xen_obj, size); if (ret < 0) goto fail;
/* * allocate ballooned pages which will be used to map * grant references provided by the backend
*/
ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
xen_obj->pages); if (ret < 0) {
DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
xen_obj->num_pages, ret);
gem_free_pages_array(xen_obj); goto fail;
}
xen_obj->be_alloc = true; return xen_obj;
} /* * need to allocate backing pages now, so we can share those * with the backend
*/
xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
xen_obj->pages = drm_gem_get_pages(&xen_obj->base); if (IS_ERR(xen_obj->pages)) {
ret = PTR_ERR(xen_obj->pages);
xen_obj->pages = NULL; goto fail;
}
return xen_obj;
fail:
DRM_ERROR("Failed to allocate buffer with size %zu\n", size); return ERR_PTR(ret);
}
/* Please see comment in gem_mmap_obj on mapping and attributes. */
vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
VM_MAP, PAGE_KERNEL); if (!vaddr) return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.