/* Takes the reservation lock on all the BOs being referenced, so that * we can attach fences and update the reservations after pushing the job * to the queue. * * We don't lock the RCL the tile alloc/state BOs, or overflow memory * (all of which are on render->unref_list). They're entirely private * to v3d, so we don't attach dma-buf fences to them.
*/ staticint
v3d_lock_bo_reservations(struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx)
{ int i, ret;
ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); if (ret) return ret;
for (i = 0; i < job->bo_count; i++) {
ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); if (ret) goto fail;
ret = drm_sched_job_add_implicit_dependencies(&job->base,
job->bo[i], true); if (ret) goto fail;
}
/** * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd * @job: V3D job being set up * @bo_handles: GEM handles * @bo_count: Number of GEM handles passed in * * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's * BO list and reference counting for the lifetime of the job. * * Note that this function doesn't need to unreference the BOs on * failure, because that will happen at `v3d_job_free()`.
*/ staticint
v3d_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct v3d_job *job,
u64 bo_handles,
u32 bo_count)
{
job->bo_count = bo_count;
if (!job->bo_count) { /* See comment on bo_index for why we have to check * this.
*/
DRM_DEBUG("Rendering requires BOs\n"); return -EINVAL;
}
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
1, v3d_priv, file_priv->client_id); if (ret) return ret;
if (has_multisync) { if (se->in_sync_count && se->wait_stage == queue) { struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
for (i = 0; i < se->in_sync_count; i++) { struct drm_v3d_sem in;
if (copy_from_user(&in, handle++, sizeof(in))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy wait dep handle.\n"); goto fail_deps;
}
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
// TODO: Investigate why this was filtered out for the IOCTL. if (ret && ret != -ENOENT) goto fail_deps;
}
}
} else {
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
// TODO: Investigate why this was filtered out for the IOCTL. if (ret && ret != -ENOENT) goto fail_deps;
}
for (i = 0; i < job->bo_count; i++) { /* XXX: Use shared fences for read-only objects. */
dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
DMA_RESV_USAGE_WRITE);
}
/* Update the return sync object for the job */ /* If it only supports a single signal semaphore*/ if (!has_multisync) {
sync_out = drm_syncobj_find(file_priv, out_sync); if (sync_out) {
drm_syncobj_replace_fence(sync_out, done_fence);
drm_syncobj_put(sync_out);
} return;
}
/* If multiple semaphores extension is supported */ if (se->out_sync_count) { for (i = 0; i < se->out_sync_count; i++) {
drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
done_fence);
drm_syncobj_put(se->out_syncs[i].syncobj);
}
kvfree(se->out_syncs);
}
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data * according to the extension id (name).
*/ staticint
v3d_get_extensions(struct drm_file *file_priv,
u64 ext_handles, struct v3d_submit_ext *se, struct v3d_cpu_job *job)
{ struct drm_v3d_extension __user *user_ext; int ret;
user_ext = u64_to_user_ptr(ext_handles); while (user_ext) { struct drm_v3d_extension ext;
if (copy_from_user(&ext, user_ext, sizeof(ext))) {
DRM_DEBUG("Failed to copy submit extension\n"); return -EFAULT;
}
switch (ext.id) { case DRM_V3D_EXT_ID_MULTI_SYNC:
ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se); break; case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job); break; case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job); break; case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job); break; case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job); break; case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job); break; case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job); break; default:
DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); return -EINVAL;
}
if (ret) return ret;
user_ext = u64_to_user_ptr(ext.next);
}
return 0;
}
/** * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * This is the main entrypoint for userspace to submit a 3D frame to * the GPU. Userspace provides the binner command list (if * applicable), and the kernel sets up the render command list to draw * to the framebuffer described in the ioctl, using the command lists * that the 3D engine's binner will produce.
*/ int
v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_cl *args = data; struct v3d_submit_ext se = {0}; struct v3d_bin_job *bin = NULL; struct v3d_render_job *render = NULL; struct v3d_job *clean_job = NULL; struct v3d_job *last_job; struct ww_acquire_ctx acquire_ctx; int ret = 0;
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); if (ret) {
DRM_DEBUG("Failed to get extensions.\n"); return ret;
}
}
ret = v3d_job_allocate((void *)&render, sizeof(*render)); if (ret) return ret;
ret = v3d_job_init(v3d, file_priv, &render->base,
v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); if (ret) {
v3d_job_deallocate((void *)&render); goto fail;
}
/** * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * Userspace provides the register setup for the TFU, which we don't * need to validate since the TFU is behind the MMU.
*/ int
v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct v3d_dev *v3d = to_v3d_dev(dev); struct drm_v3d_submit_tfu *args = data; struct v3d_submit_ext se = {0}; struct v3d_tfu_job *job = NULL; struct ww_acquire_ctx acquire_ctx; int ret = 0;
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); if (ret) {
DRM_DEBUG("Failed to get extensions.\n"); return ret;
}
}
ret = v3d_job_allocate((void *)&job, sizeof(*job)); if (ret) return ret;
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync, &se, V3D_TFU); if (ret) {
v3d_job_deallocate((void *)&job); goto fail;
}
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), sizeof(*job->base.bo), GFP_KERNEL); if (!job->base.bo) {
ret = -ENOMEM; goto fail;
}
bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]); if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
job->base.bo_count,
args->bo_handles[job->base.bo_count]);
ret = -ENOENT; goto fail;
}
job->base.bo[job->base.bo_count] = bo;
}
ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); if (ret) goto fail;
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); if (ret) {
DRM_DEBUG("Failed to get extensions.\n"); return ret;
}
}
ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
&job, &clean_job, &se,
&acquire_ctx); if (ret) goto fail;
if (args->perfmon_id) { if (v3d->global_perfmon) {
ret = -EAGAIN; goto fail_perfmon;
}
job->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id); if (!job->base.perfmon) {
ret = -ENOENT; goto fail_perfmon;
}
}
ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job)); if (ret) return ret;
if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job); if (ret) {
DRM_DEBUG("Failed to get extensions.\n"); goto fail;
}
}
/* Every CPU job must have a CPU job user extension */ if (!cpu_job->job_type) {
DRM_DEBUG("CPU job must have a CPU job user extension.\n");
ret = -EINVAL; goto fail;
}
if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
ret = -EINVAL; goto fail;
}
switch (cpu_job->job_type) { case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
ret = drm_sched_job_add_dependency(&csd_job->base.base,
dma_fence_get(cpu_job->base.done_fence)); if (ret) goto fail_unreserve;
v3d_push_job(&csd_job->base);
ret = drm_sched_job_add_dependency(&clean_job->base,
dma_fence_get(csd_job->base.done_fence)); if (ret) goto fail_unreserve;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.