case DRM_PVR_JOB_TYPE_FRAGMENT: return pvr_frag_job_fw_cmd_init(job, args);
case DRM_PVR_JOB_TYPE_COMPUTE: return pvr_compute_job_fw_cmd_init(job, args);
case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: return pvr_transfer_job_fw_cmd_init(job, args);
default: return -EINVAL;
}
}
/** * struct pvr_job_data - Helper container for pairing jobs with the * sync_ops supplied for them by the user.
*/ struct pvr_job_data { /** @job: Pointer to the job. */ struct pvr_job *job;
/** @sync_ops: Pointer to the sync_ops associated with @job. */ struct drm_pvr_sync_op *sync_ops;
/** @sync_op_count: Number of members of @sync_ops. */
u32 sync_op_count;
};
/** * prepare_job_syncs() - Prepare all sync objects for a single job. * @pvr_file: PowerVR file. * @job_data: Precreated job and sync_ops array. * @signal_array: xarray to receive signal sync objects. * * Returns: * * 0 on success, or * * Any error code returned by pvr_sync_signal_array_collect_ops(), * pvr_sync_add_deps_to_job(), drm_sched_job_add_resv_dependencies() or * pvr_sync_signal_array_update_fences().
*/ staticint
prepare_job_syncs(struct pvr_file *pvr_file, struct pvr_job_data *job_data, struct xarray *signal_array)
{ struct dma_fence *done_fence; int err = pvr_sync_signal_array_collect_ops(signal_array,
from_pvr_file(pvr_file),
job_data->sync_op_count,
job_data->sync_ops);
if (job_data->job->hwrt) { /* The geometry job writes the HWRT region headers, which are * then read by the fragment job.
*/ struct drm_gem_object *obj =
gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem); enum dma_resv_usage usage =
dma_resv_usage_rw(job_data->job->type ==
DRM_PVR_JOB_TYPE_GEOMETRY);
/** * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs. * @pvr_file: PowerVR file. * @job_data: Array of precreated jobs and their sync_ops. * @job_count: Number of jobs. * @signal_array: xarray to receive signal sync objects. * * Returns: * * 0 on success, or * * Any error code returned by pvr_vm_bind_job_prepare_syncs().
*/ staticint
prepare_job_syncs_for_each(struct pvr_file *pvr_file, struct pvr_job_data *job_data,
u32 *job_count, struct xarray *signal_array)
{ for (u32 i = 0; i < *job_count; i++) { int err = prepare_job_syncs(pvr_file, &job_data[i],
signal_array);
/** * pvr_job_data_fini() - Cleanup all allocs used to set up job submission. * @job_data: Job data array. * @job_count: Number of members of @job_data.
*/ staticvoid
pvr_job_data_fini(struct pvr_job_data *job_data, u32 job_count)
{ for (u32 i = 0; i < job_count; i++) {
pvr_job_put(job_data[i].job);
kvfree(job_data[i].sync_ops);
}
}
/** * pvr_job_data_init() - Init an array of created jobs, associating them with * the appropriate sync_ops args, which will be copied in. * @pvr_dev: Target PowerVR device. * @pvr_file: Pointer to PowerVR file structure. * @job_args: Job args array copied from user. * @job_count: Number of members of @job_args. * @job_data_out: Job data array.
*/ staticint pvr_job_data_init(struct pvr_device *pvr_dev, struct pvr_file *pvr_file, struct drm_pvr_job *job_args,
u32 *job_count, struct pvr_job_data *job_data_out)
{ int err = 0, i = 0;
for (; i < *job_count; i++) {
job_data_out[i].job =
create_job(pvr_dev, pvr_file, &job_args[i]);
err = PTR_ERR_OR_ZERO(job_data_out[i].job);
staticint
jobs_lock_all_objs(struct drm_exec *exec, struct pvr_job_data *job_data,
u32 job_count)
{ for (u32 i = 0; i < job_count; i++) { struct pvr_job *job = job_data[i].job;
/* Grab a lock on a the context, to guard against * concurrent submission to the same queue.
*/ int err = drm_exec_lock_obj(exec,
gem_from_pvr_gem(job->ctx->fw_obj->gem));
if (err) return err;
if (job->hwrt) {
err = prepare_fw_obj_resv(exec,
job->hwrt->fw_obj); if (err) return err;
}
}
/* Geometry and fragment jobs can be combined if they are queued to the * same context and targeting the same HWRT.
*/ if (a->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
b->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
a->ctx != b->ctx ||
a->hwrt != b->hwrt) returnfalse;
/* We combine when we see an explicit geom -> frag dep. */ return drm_sched_job_has_dependency(&frag_job->base,
&geom_job->base.s_fence->scheduled);
}
staticstruct dma_fence *
get_last_queued_job_scheduled_fence(struct pvr_queue *queue, struct pvr_job_data *job_data,
u32 cur_job_pos)
{ /* We iterate over the current job array in reverse order to grab the * last to-be-queued job targeting the same queue.
*/ for (u32 i = cur_job_pos; i > 0; i--) { struct pvr_job *job = job_data[i - 1].job;
/* If we didn't find any, we just return the last queued job scheduled * fence attached to the queue.
*/ return dma_fence_get(queue->last_queued_job_scheduled_fence);
}
if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job)) continue;
/* The fragment job will be submitted by the geometry queue. We * need to make sure it comes after all the other fragment jobs * queued before it.
*/
frag_queue = pvr_context_get_queue_for_job(frag_job->ctx,
frag_job->type);
f = get_last_queued_job_scheduled_fence(frag_queue, job_data,
i); if (f) { int err = drm_sched_job_add_dependency(&geom_job->base,
f); if (err) {
*job_count = i; return err;
}
}
/* The KCCB slot will be reserved by the geometry job, so we can * drop the KCCB fence on the fragment job.
*/
pvr_kccb_fence_put(frag_job->kccb_fence);
frag_job->kccb_fence = NULL;
/* The geometry job pvr_job structure is used when the fragment * job is being prepared by the GPU scheduler. Have the fragment * job hold a reference on the geometry job to prevent it being * freed until the fragment job has finished with it.
*/
pvr_job_get(geom_job);
/* Skip the fragment job we just paired to the geometry job. */
i++;
}
return 0;
}
/** * pvr_submit_jobs() - Submit jobs to the GPU * @pvr_dev: Target PowerVR device. * @pvr_file: Pointer to PowerVR file structure. * @args: Ioctl args. * * This initial implementation is entirely synchronous; on return the GPU will * be idle. This will not be the case for future implementations. * * Returns: * * 0 on success, * * -%EFAULT if arguments can not be copied from user space, or * * -%EINVAL on invalid arguments, or * * Any other error.
*/ int
pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file, struct drm_pvr_ioctl_submit_jobs_args *args)
{ struct pvr_job_data *job_data = NULL; struct drm_pvr_job *job_args; struct xarray signal_array;
u32 jobs_alloced = 0; struct drm_exec exec; int err;
if (!args->jobs.count) return -EINVAL;
err = PVR_UOBJ_GET_ARRAY(job_args, &args->jobs); if (err) return err;
/* * Flush MMU if needed - this has been deferred until now to avoid * overuse of this expensive operation.
*/
err = pvr_mmu_flush_exec(pvr_dev, false); if (err) goto out_job_data_cleanup;
err = prepare_job_syncs_for_each(pvr_file, job_data, &args->jobs.count,
&signal_array); if (err) goto out_exec_fini;
err = prepare_job_resvs_for_each(&exec, job_data, args->jobs.count); if (err) goto out_exec_fini;
err = pvr_jobs_link_geom_frag(job_data, &args->jobs.count); if (err) goto out_exec_fini;
/* Anything after that point must succeed because we start exposing job * finished fences to the outside world.
*/
update_job_resvs_for_each(job_data, args->jobs.count);
push_jobs(job_data, args->jobs.count);
pvr_sync_signal_array_push_fences(&signal_array);
err = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.