/** * to_pvr_queue_job_fence() - Return a pvr_queue_fence object if the fence is * backed by a UFO. * @f: The dma_fence to turn into a pvr_queue_fence. * * Return: * * A non-NULL pvr_queue_fence object if the dma_fence is backed by a UFO, or * * NULL otherwise.
*/ staticstruct pvr_queue_fence *
to_pvr_queue_job_fence(struct dma_fence *f)
{ struct drm_sched_fence *sched_fence = to_drm_sched_fence(f);
/** * pvr_queue_fence_put() - Put wrapper for pvr_queue_fence objects. * @f: The dma_fence object to put. * * If the pvr_queue_fence has been initialized, we call dma_fence_put(), * otherwise we free the object with dma_fence_free(). This allows us * to do the right thing before and after pvr_queue_fence_init() had been * called.
*/ staticvoid pvr_queue_fence_put(struct dma_fence *f)
{ if (!f) return;
/* If the fence hasn't been initialized yet, free the object directly. */ if (f->ops)
dma_fence_put(f); else
dma_fence_free(f);
}
/** * pvr_queue_fence_alloc() - Allocate a pvr_queue_fence fence object * * Call this function to allocate job CCCB and done fences. This only * allocates the objects. Initialization happens when the underlying * dma_fence object is to be returned to drm_sched (in prepare_job() or * run_job()). * * Return: * * A valid pointer if the allocation succeeds, or * * NULL if the allocation fails.
*/ staticstruct dma_fence *
pvr_queue_fence_alloc(void)
{ struct pvr_queue_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (!fence) return NULL;
return &fence->base;
}
/** * pvr_queue_fence_init() - Initializes a pvr_queue_fence object. * @f: The fence to initialize * @queue: The queue this fence belongs to. * @fence_ops: The fence operations. * @fence_ctx: The fence context. * * Wrapper around dma_fence_init() that takes care of initializing the * pvr_queue_fence::queue field too.
*/ staticvoid
pvr_queue_fence_init(struct dma_fence *f, struct pvr_queue *queue, conststruct dma_fence_ops *fence_ops, struct pvr_queue_fence_ctx *fence_ctx)
{ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
/** * pvr_queue_cccb_fence_init() - Initializes a CCCB fence object. * @fence: The fence to initialize. * @queue: The queue this fence belongs to. * * Initializes a fence that can be used to wait for CCCB space. * * Should be called in the ::prepare_job() path, so the fence returned to * drm_sched is valid.
*/ staticvoid
pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{
pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops,
&queue->cccb_fence_ctx.base);
}
/** * pvr_queue_job_fence_init() - Initializes a job done fence object. * @fence: The fence to initialize. * @queue: The queue this fence belongs to. * * Initializes a fence that will be signaled when the GPU is done executing * a job. * * Should be called *before* the ::run_job() path, so the fence is initialised * before being placed in the pending_list.
*/ staticvoid
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{ if (!fence->ops)
pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
&queue->job_fence_ctx);
}
static u32 ufo_cmds_size(u32 elem_count)
{ /* We can pass at most ROGUE_FWIF_CCB_CMD_MAX_UFOS per UFO-related command. */
u32 full_cmd_count = elem_count / ROGUE_FWIF_CCB_CMD_MAX_UFOS;
u32 remaining_elems = elem_count % ROGUE_FWIF_CCB_CMD_MAX_UFOS;
u32 size = full_cmd_count *
pvr_cccb_get_size_of_cmd_with_hdr(ROGUE_FWIF_CCB_CMD_MAX_UFOS * sizeof(struct rogue_fwif_ufo));
if (remaining_elems) {
size += pvr_cccb_get_size_of_cmd_with_hdr(remaining_elems * sizeof(struct rogue_fwif_ufo));
}
return size;
}
static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count)
{ /* One UFO cmd for the fence signaling, one UFO cmd per native fence native, * and a command for the job itself.
*/ return ufo_cmds_size(1) + ufo_cmds_size(ufo_wait_count) +
pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len);
}
/** * job_count_remaining_native_deps() - Count the number of non-signaled native dependencies. * @job: Job to operate on. * * Returns: Number of non-signaled native deps remaining.
*/ staticunsignedlong job_count_remaining_native_deps(struct pvr_job *job)
{ unsignedlong remaining_count = 0; struct dma_fence *fence = NULL; unsignedlong index;
jfence = to_pvr_queue_job_fence(fence); if (!jfence) continue;
if (!dma_fence_is_signaled(&jfence->base))
remaining_count++;
}
return remaining_count;
}
/** * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job. * @queue: The queue this job will be submitted to. * @job: The job to get the CCCB fence on. * * The CCCB fence is a synchronization primitive allowing us to delay job * submission until there's enough space in the CCCB to submit the job. * * Return: * * NULL if there's enough space in the CCCB to submit this job, or * * A valid dma_fence object otherwise.
*/ staticstruct dma_fence *
pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job)
{ struct pvr_queue_fence *cccb_fence; unsignedint native_deps_remaining;
/* If the fence is NULL, that means we already checked that we had * enough space in the cccb for our job.
*/ if (!job->cccb_fence) return NULL;
mutex_lock(&queue->cccb_fence_ctx.job_lock);
/* Count remaining native dependencies and check if the job fits in the CCCB. */
native_deps_remaining = job_count_remaining_native_deps(job); if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) {
pvr_queue_fence_put(job->cccb_fence);
job->cccb_fence = NULL; goto out_unlock;
}
/* There should be no job attached to the CCCB fence context: * drm_sched_entity guarantees that jobs are submitted one at a time.
*/ if (WARN_ON(queue->cccb_fence_ctx.job))
pvr_job_put(queue->cccb_fence_ctx.job);
queue->cccb_fence_ctx.job = pvr_job_get(job);
/* Initialize the fence before returning it. */
cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); if (!WARN_ON(cccb_fence->queue))
pvr_queue_cccb_fence_init(job->cccb_fence, queue);
/** * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job. * @queue: The queue this job will be submitted to. * @job: The job to get the KCCB fence on. * * The KCCB fence is a synchronization primitive allowing us to delay job * submission until there's enough space in the KCCB to submit the job. * * Return: * * NULL if there's enough space in the KCCB to submit this job, or * * A valid dma_fence object otherwise.
*/ staticstruct dma_fence *
pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job)
{ struct pvr_device *pvr_dev = queue->ctx->pvr_dev; struct dma_fence *kccb_fence = NULL;
/* If the fence is NULL, that means we already checked that we had * enough space in the KCCB for our job.
*/ if (!job->kccb_fence) return NULL;
/** * pvr_queue_prepare_job() - Return the next internal dependencies expressed as a dma_fence. * @sched_job: The job to query the next internal dependency on * @s_entity: The entity this job is queue on. * * After iterating over drm_sched_job::dependencies, drm_sched let the driver return * its own internal dependencies. We use this function to return our internal dependencies.
*/ staticstruct dma_fence *
pvr_queue_prepare_job(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity)
{ struct pvr_job *job = container_of(sched_job, struct pvr_job, base); struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity); struct dma_fence *internal_dep = NULL;
/* * Initialize the done_fence, so we can signal it. This must be done * here because otherwise by the time of run_job() the job will end up * in the pending list without a valid fence.
*/ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { /* * This will be called on a paired fragment job after being * submitted to firmware. We can tell if this is the case and * bail early from whether run_job() has been called on the * geometry job, which would issue a pm ref.
*/ if (job->paired_job->has_pm_ref) return NULL;
/* * In this case we need to use the job's own ctx to initialise * the done_fence. The other steps are done in the ctx of the * paired geometry job.
*/
pvr_queue_job_fence_init(job->done_fence,
job->ctx->queues.fragment);
} else {
pvr_queue_job_fence_init(job->done_fence, queue);
}
/* CCCB fence is used to make sure we have enough space in the CCCB to * submit our commands.
*/
internal_dep = pvr_queue_get_job_cccb_fence(queue, job);
/* KCCB fence is used to make sure we have a KCCB slot to queue our * CMD_KICK.
*/ if (!internal_dep)
internal_dep = pvr_queue_get_job_kccb_fence(queue, job);
/* Any extra internal dependency should be added here, using the following * pattern: * * if (!internal_dep) * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job);
*/
/* The paired job fence should come last, when everything else is ready. */ if (!internal_dep)
internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job);
return internal_dep;
}
/** * pvr_queue_update_active_state_locked() - Update the queue active state. * @queue: Queue to update the state on. * * Locked version of pvr_queue_update_active_state(). Must be called with * pvr_device::queue::lock held.
*/ staticvoid pvr_queue_update_active_state_locked(struct pvr_queue *queue)
{ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
lockdep_assert_held(&pvr_dev->queues.lock);
/* The queue is temporary out of any list when it's being reset, * we don't want a call to pvr_queue_update_active_state_locked() * to re-insert it behind our back.
*/ if (list_empty(&queue->node)) return;
if (!atomic_read(&queue->in_flight_job_count))
list_move_tail(&queue->node, &pvr_dev->queues.idle); else
list_move_tail(&queue->node, &pvr_dev->queues.active);
}
/** * pvr_queue_update_active_state() - Update the queue active state. * @queue: Queue to update the state on. * * Active state is based on the in_flight_job_count value. * * Updating the active state implies moving the queue in or out of the * active queue list, which also defines whether the queue is checked * or not when a FW event is received. * * This function should be called any time a job is submitted or it done * fence is signaled.
*/ staticvoid pvr_queue_update_active_state(struct pvr_queue *queue)
{ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
/* We need to add the queue to the active list before updating the CCCB, * otherwise we might miss the FW event informing us that something * happened on this queue.
*/
atomic_inc(&queue->in_flight_job_count);
pvr_queue_update_active_state(queue);
xa_for_each(&job->base.dependencies, index, fence) {
jfence = to_pvr_queue_job_fence(fence); if (!jfence) continue;
/* Skip the partial render fence, we will place it at the end. */ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job &&
&job->paired_job->base.s_fence->scheduled == fence) continue;
if (dma_fence_is_signaled(&jfence->base)) continue;
/* Reference value for the partial render test is the current queue fence * seqno minus one.
*/
pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj,
&cmd->partial_render_geom_frag_fence.addr);
cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1;
}
/* Signal the job fence. */
pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr);
ufos[0].value = job->done_fence->seqno;
pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_UPDATE, sizeof(ufos[0]), ufos, 0, 0);
}
/** * pvr_queue_run_job() - Submit a job to the FW. * @sched_job: The job to submit. * * This function is called when all non-native dependencies have been met and * when the commands resulting from this job are guaranteed to fit in the CCCB.
*/ staticstruct dma_fence *pvr_queue_run_job(struct drm_sched_job *sched_job)
{ struct pvr_job *job = container_of(sched_job, struct pvr_job, base); struct pvr_device *pvr_dev = job->pvr_dev; int err;
/* The fragment job is issued along the geometry job when we use combined * geom+frag kicks. When we get there, we should simply return the * done_fence that's been initialized earlier.
*/ if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT &&
job->done_fence->ops) { return dma_fence_get(job->done_fence);
}
/* The only kind of jobs that can be paired are geometry and fragment, and * we bail out early if we see a fragment job that's paired with a geomtry * job. * Paired jobs must also target the same context and point to the same * HWRT.
*/ if (WARN_ON(job->paired_job &&
(job->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
job->hwrt != job->paired_job->hwrt ||
job->ctx != job->paired_job->ctx))) return ERR_PTR(-EINVAL);
err = pvr_job_get_pm_ref(job); if (WARN_ON(err)) return ERR_PTR(err);
if (job->paired_job) {
err = pvr_job_get_pm_ref(job->paired_job); if (WARN_ON(err)) return ERR_PTR(err);
}
/* Submit our job to the CCCB */
pvr_queue_submit_job_to_cccb(job);
/* Make sure we CPU-signal the UFO object, so other queues don't get * blocked waiting on it.
*/
*queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno);
list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { if (dma_fence_is_signaled(job->done_fence)) { /* Jobs might have completed after drm_sched_stop() was called. * In that case, re-assign the parent field to the done_fence.
*/
WARN_ON(job->base.s_fence->parent);
job->base.s_fence->parent = dma_fence_get(job->done_fence);
} else { /* If we had unfinished jobs, flag the entity as guilty so no * new job can be submitted.
*/
atomic_set(&queue->ctx->faulty, 1);
}
}
drm_sched_start(&queue->scheduler, 0);
}
/** * pvr_queue_timedout_job() - Handle a job timeout event. * @s_job: The job this timeout occurred on. * * FIXME: We don't do anything here to unblock the situation, we just stop+start * the scheduler, and re-assign parent fences in the middle. * * Return: * * DRM_GPU_SCHED_STAT_RESET.
*/ staticenum drm_gpu_sched_stat
pvr_queue_timedout_job(struct drm_sched_job *s_job)
{ struct drm_gpu_scheduler *sched = s_job->sched; struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); struct pvr_device *pvr_dev = queue->ctx->pvr_dev; struct pvr_job *job;
u32 job_count = 0;
dev_err(sched->dev, "Job timeout\n");
/* Before we stop the scheduler, make sure the queue is out of any list, so * any call to pvr_queue_update_active_state_locked() that might happen * until the scheduler is really stopped doesn't end up re-inserting the * queue in the active list. This would cause * pvr_queue_signal_done_fences() and drm_sched_stop() to race with each * other when accessing the pending_list, since drm_sched_stop() doesn't * grab the job_list_lock when modifying the list (it's assuming the * only other accessor is the scheduler, and it's safe to not grab the * lock since it's stopped).
*/
mutex_lock(&pvr_dev->queues.lock);
list_del_init(&queue->node);
mutex_unlock(&pvr_dev->queues.lock);
/* Re-insert the queue in the proper list, and kick a queue processing * operation if there were jobs pending.
*/
mutex_lock(&pvr_dev->queues.lock); if (!job_count) {
list_move_tail(&queue->node, &pvr_dev->queues.idle);
} else {
atomic_set(&queue->in_flight_job_count, job_count);
list_move_tail(&queue->node, &pvr_dev->queues.active);
pvr_queue_process(queue);
}
mutex_unlock(&pvr_dev->queues.lock);
drm_sched_start(sched, 0);
return DRM_GPU_SCHED_STAT_RESET;
}
/** * pvr_queue_free_job() - Release the reference the scheduler had on a job object. * @sched_job: Job object to free.
*/ staticvoid pvr_queue_free_job(struct drm_sched_job *sched_job)
{ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
drm_sched_job_cleanup(sched_job);
if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job)
pvr_job_put(job->paired_job);
/** * pvr_queue_fence_is_ufo_backed() - Check if a dma_fence is backed by a UFO object * @f: Fence to test. * * A UFO-backed fence is a fence that can be signaled or waited upon FW-side. * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue * they are pushed to, but those fences are not directly exposed to the outside * world, so we also need to check if the fence we're being passed is a * drm_sched_fence that was coming from our driver.
*/ bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f)
{ struct drm_sched_fence *sched_fence = f ? to_drm_sched_fence(f) : NULL;
if (sched_fence &&
sched_fence->sched->ops == &pvr_queue_sched_ops) returntrue;
if (f && f->ops == &pvr_queue_job_fence_ops) returntrue;
returnfalse;
}
/** * pvr_queue_signal_done_fences() - Signal done fences. * @queue: Queue to check. * * Signal done fences of jobs whose seqno is less than the current value of * the UFO object attached to the queue.
*/ staticvoid
pvr_queue_signal_done_fences(struct pvr_queue *queue)
{ struct pvr_job *job, *tmp_job;
u32 cur_seqno;
if (!dma_fence_is_signaled(job->done_fence)) {
dma_fence_signal(job->done_fence);
pvr_job_release_pm_ref(job);
atomic_dec(&queue->in_flight_job_count);
}
}
spin_unlock(&queue->scheduler.job_list_lock);
}
/** * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space * can be unblocked * pushed to the CCCB * @queue: Queue to check * * If we have a job waiting for CCCB, and this job now fits in the CCCB, we signal * its CCCB fence, which should kick drm_sched.
*/ staticvoid
pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue)
{ struct pvr_queue_fence *cccb_fence;
u32 native_deps_remaining; struct pvr_job *job;
mutex_lock(&queue->cccb_fence_ctx.job_lock);
job = queue->cccb_fence_ctx.job; if (!job) goto out_unlock;
/* If we have a job attached to the CCCB fence context, its CCCB fence * shouldn't be NULL.
*/ if (WARN_ON(!job->cccb_fence)) {
job = NULL; goto out_unlock;
}
/* If we get there, CCCB fence has to be initialized. */
cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); if (WARN_ON(!cccb_fence->queue)) {
job = NULL; goto out_unlock;
}
/* Evict signaled dependencies before checking for CCCB space. * If the job fits, signal the CCCB fence, this should unblock * the drm_sched_entity.
*/
native_deps_remaining = job_count_remaining_native_deps(job); if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) {
job = NULL; goto out_unlock;
}
/** * pvr_queue_process() - Process events that happened on a queue. * @queue: Queue to check * * Signal job fences and check if jobs waiting for CCCB space can be unblocked.
*/ void pvr_queue_process(struct pvr_queue *queue)
{
lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock);
static u32 get_dm_type(struct pvr_queue *queue)
{ switch (queue->type) { case DRM_PVR_JOB_TYPE_GEOMETRY: return PVR_FWIF_DM_GEOM; case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: case DRM_PVR_JOB_TYPE_FRAGMENT: return PVR_FWIF_DM_FRAG; case DRM_PVR_JOB_TYPE_COMPUTE: return PVR_FWIF_DM_CDM;
}
return ~0;
}
/** * init_fw_context() - Initializes the queue part of a FW context. * @queue: Queue object to initialize the FW context for. * @fw_ctx_map: The FW context CPU mapping. * * FW contexts are containing various states, one of them being a per-queue state * that needs to be initialized for each queue being exposed by a context. This * function takes care of that.
*/ staticvoid init_fw_context(struct pvr_queue *queue, void *fw_ctx_map)
{ struct pvr_context *ctx = queue->ctx; struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx); struct rogue_fwif_fwcommoncontext *cctx_fw; struct pvr_cccb *cccb = &queue->cccb;
/** * pvr_queue_cleanup_fw_context() - Wait for the FW context to be idle and clean it up. * @queue: Queue on FW context to clean up. * * Return: * * 0 on success, * * Any error returned by pvr_fw_structure_cleanup() otherwise.
*/ staticint pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
{ if (!queue->ctx->fw_obj) return 0;
/** * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object. * @job: The job to initialize. * @drm_client_id: drm_file.client_id submitting the job * * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm() * and pvr_queue_job_push() can't fail. We also make sure the context type is * valid and the job can fit in the CCCB. * * Return: * * 0 on success, or * * An error code if something failed.
*/ int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id)
{ /* Fragment jobs need at least one native fence wait on the geometry job fence. */
u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0; struct pvr_queue *queue; int err;
if (atomic_read(&job->ctx->faulty)) return -EIO;
queue = pvr_context_get_queue_for_job(job->ctx, job->type); if (!queue) return -EINVAL;
if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) return -E2BIG;
/** * pvr_queue_job_arm() - Arm a job object. * @job: The job to arm. * * Initializes fences and return the drm_sched finished fence so it can * be exposed to the outside world. Once this function is called, you should * make sure the job is pushed using pvr_queue_job_push(), or guarantee that * no one grabbed a reference to the returned fence. The latter can happen if * we do multi-job submission, and something failed when creating/initializing * a job. In that case, we know the fence didn't leave the driver, and we * can thus guarantee nobody will wait on an dead fence object. * * Return: * * A dma_fence object.
*/ struct dma_fence *pvr_queue_job_arm(struct pvr_job *job)
{
drm_sched_job_arm(&job->base);
return &job->base.s_fence->finished;
}
/** * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object. * @job: The job to cleanup. * * Should be called in the job release path.
*/ void pvr_queue_job_cleanup(struct pvr_job *job)
{
pvr_queue_fence_put(job->done_fence);
pvr_queue_fence_put(job->cccb_fence);
pvr_kccb_fence_put(job->kccb_fence);
if (job->base.s_fence)
drm_sched_job_cleanup(&job->base);
}
/** * pvr_queue_job_push() - Push a job to its queue. * @job: The job to push. * * Must be called after pvr_queue_job_init() and after all dependencies * have been added to the job. This will effectively queue the job to * the drm_sched_entity attached to the queue. We grab a reference on * the job object, so the caller is free to drop its reference when it's * done accessing the job object.
*/ void pvr_queue_job_push(struct pvr_job *job)
{ struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
/* Keep track of the last queued job scheduled fence for combined submit. */
dma_fence_put(queue->last_queued_job_scheduled_fence);
queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled);
/** * pvr_queue_kill() - Kill a queue. * @queue: The queue to kill. * * Kill the queue so no new jobs can be pushed. Should be called when the * context handle is destroyed. The queue object might last longer if jobs * are still in flight and holding a reference to the context this queue * belongs to.
*/ void pvr_queue_kill(struct pvr_queue *queue)
{
drm_sched_entity_destroy(&queue->entity);
dma_fence_put(queue->last_queued_job_scheduled_fence);
queue->last_queued_job_scheduled_fence = NULL;
}
/** * pvr_queue_destroy() - Destroy a queue. * @queue: The queue to destroy. * * Cleanup the queue and free the resources attached to it. Should be * called from the context release function.
*/ void pvr_queue_destroy(struct pvr_queue *queue)
{ if (!queue) return;
/** * pvr_queue_device_init() - Device-level initialization of queue related fields. * @pvr_dev: The device to initialize. * * Initializes all fields related to queue management in pvr_device. * * Return: * * 0 on success, or * * An error code on failure.
*/ int pvr_queue_device_init(struct pvr_device *pvr_dev)
{ int err;
INIT_LIST_HEAD(&pvr_dev->queues.active);
INIT_LIST_HEAD(&pvr_dev->queues.idle);
err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock); if (err) return err;
pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0); if (!pvr_dev->sched_wq) return -ENOMEM;
return 0;
}
/** * pvr_queue_device_fini() - Device-level cleanup of queue related fields. * @pvr_dev: The device to cleanup. * * Cleanup/free all queue-related resources attached to a pvr_device object.
*/ void pvr_queue_device_fini(struct pvr_device *pvr_dev)
{
destroy_workqueue(pvr_dev->sched_wq);
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.10Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.