/* * kvmalloc() at first tries to allocate memory using kmalloc() and * falls back to vmalloc() only on failure. It also uses __GFP_NOWARN * internally for allocations larger than a page size, preventing * storm of KMSG warnings.
*/
syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL); if (!syncobjs) return -ENOMEM;
for (i = 0; i < num_in_syncobjs; i++) {
u64 address = exbuf->in_syncobjs + i * syncobj_stride; struct dma_fence *fence;
memset(&syncobj_desc, 0, sizeof(syncobj_desc));
if (copy_from_user(&syncobj_desc,
u64_to_user_ptr(address),
min(syncobj_stride, sizeof(syncobj_desc)))) {
ret = -EFAULT; break;
}
if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
ret = -EINVAL; break;
}
ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
syncobj_desc.point, 0, &fence); if (ret) break;
ret = virtio_gpu_dma_fence_wait(submit, fence);
dma_fence_put(fence); if (ret) break;
if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
syncobjs[i] = drm_syncobj_find(submit->file,
syncobj_desc.handle); if (!syncobjs[i]) {
ret = -EINVAL; break;
}
}
}
if (ret) {
virtio_gpu_free_syncobjs(syncobjs, i); return ret;
}
err = virtio_gpu_init_submit_buflist(submit); if (err) return err;
submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); if (IS_ERR(submit->buf)) return PTR_ERR(submit->buf);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
err = get_unused_fd_flags(O_CLOEXEC); if (err < 0) return err;
submit->out_fence_fd = err;
submit->sync_file = sync_file_create(&out_fence->f); if (!submit->sync_file) return -ENOMEM;
}
return 0;
}
staticint virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
{ int ret = 0;
if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { struct dma_fence *in_fence =
sync_file_get_fence(submit->exbuf->fence_fd); if (!in_fence) return -EINVAL;
/* * Wait if the fence is from a foreign context, or if the * fence array contains any fence from a foreign context.
*/
ret = virtio_gpu_dma_fence_wait(submit, in_fence);
ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
fence_ctx, ring_idx); if (ret) goto cleanup;
ret = virtio_gpu_parse_post_deps(&submit); if (ret) goto cleanup;
ret = virtio_gpu_parse_deps(&submit); if (ret) goto cleanup;
/* * Await in-fences in the end of the job submission path to * optimize the path by proceeding directly to the submission * to virtio after the waits.
*/
ret = virtio_gpu_wait_in_fence(&submit); if (ret) goto cleanup;
ret = virtio_gpu_lock_buflist(&submit); if (ret) goto cleanup;
virtio_gpu_submit(&submit);
/* * Set up user-out data after submitting the job to optimize * the job submission path.
*/
virtio_gpu_install_out_fence_fd(&submit);
virtio_gpu_process_post_deps(&submit);
virtio_gpu_complete_submit(&submit);
cleanup:
virtio_gpu_cleanup_submit(&submit);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.