/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/** * drm_sched_entity_init - Init a context entity used by scheduler when * submit to HW ring. * * @entity: scheduler entity to init * @priority: priority of the entity * @sched_list: the list of drm scheds on which jobs from this * entity can be submitted * @num_sched_list: number of drm sched in sched_list * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * * Note that the &sched_list must have at least one element to schedule the entity. * * For changing @priority later on at runtime see * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure.
*/ int drm_sched_entity_init(struct drm_sched_entity *entity, enum drm_sched_priority priority, struct drm_gpu_scheduler **sched_list, unsignedint num_sched_list,
atomic_t *guilty)
{ if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = NULL;
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
entity->last_user = current->group_leader; /* * It's perfectly valid to initialize an entity without having a valid * scheduler attached. It's just not valid to use the scheduler before it * is initialized itself.
*/
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
if (num_sched_list && !sched_list[0]->sched_rq) { /* Since every entry covered by num_sched_list * should be non-NULL and therefore we warn drivers * not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} elseif (num_sched_list) { /* The "priority" of an entity cannot exceed the number of run-queues of a * scheduler. Protect against num_rqs being 0, by converting to signed. Choose * the lowest priority available.
*/ if (entity->priority >= sched_list[0]->num_rqs) {
dev_err(sched_list[0]->dev, "entity has out-of-bounds priority: %u. num_rqs: %u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
(s32) DRM_SCHED_PRIORITY_KERNEL);
}
entity->rq = sched_list[0]->sched_rq[entity->priority];
}
init_completion(&entity->entity_idle);
/* We start in an idle state. */
complete_all(&entity->entity_idle);
/** * drm_sched_entity_modify_sched - Modify sched of an entity * @entity: scheduler entity to init * @sched_list: the list of new drm scheds which will replace * existing entity->sched_list * @num_sched_list: number of drm sched in sched_list * * Note that this must be called under the same common lock for @entity as * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to * guarantee through some other means that this is never called while new jobs * can be pushed to @entity.
*/ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, unsignedint num_sched_list)
{
WARN_ON(!num_sched_list || !sched_list);
staticbool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
spsc_queue_count(&entity->job_queue) == 0 ||
entity->stopped) returntrue;
returnfalse;
}
/** * drm_sched_entity_error - return error of last scheduled job * @entity: scheduler entity to check * * Opportunistically return the error of the last scheduled job. Result can * change any time when new jobs are pushed to the hw.
*/ int drm_sched_entity_error(struct drm_sched_entity *entity)
{ struct dma_fence *fence; int r;
/* Wait for all dependencies to avoid data corruptions */
xa_for_each(&job->dependencies, index, f) { struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence && f == &s_fence->scheduled) { /* The dependencies array had a reference on the scheduled * fence, and the finished fence refcount might have * dropped to zero. Use dma_fence_get_rcu() so we get * a NULL fence in that case.
*/
f = dma_fence_get_rcu(&s_fence->finished);
/* Now that we have a reference on the finished fence, * we can release the reference the dependencies array * had on the scheduled fence.
*/
dma_fence_put(&s_fence->scheduled);
}
xa_erase(&job->dependencies, index); if (f && !dma_fence_add_callback(f, &job->finish_cb,
drm_sched_entity_kill_jobs_cb)) return;
/* Signal the scheduler finished fence when the entity in question is killed. */ staticvoid drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb)
{ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
/* Remove the entity from the scheduler and kill all pending jobs */ staticvoid drm_sched_entity_kill(struct drm_sched_entity *entity)
{ struct drm_sched_job *job; struct dma_fence *prev;
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
/* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev); while ((job = drm_sched_entity_queue_pop(entity))) { struct drm_sched_fence *s_fence = job->s_fence;
/** * drm_sched_entity_flush - Flush a context entity * * @entity: scheduler entity * @timeout: time to wait in for Q to become empty in jiffies. * * Splitting drm_sched_entity_fini() into two functions, The first one does the * waiting, removes the entity from the runqueue and returns an error when the * process was killed. * * Returns the remaining time in jiffies left from the input timeout
*/ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{ struct drm_gpu_scheduler *sched; struct task_struct *last_user; long ret = timeout;
if (!entity->rq) return 0;
sched = entity->rq->sched; /** * The client will not queue more IBs during this fini, consume existing * queued IBs or discard them on SIGKILL
*/ if (current->flags & PF_EXITING) { if (timeout)
ret = wait_event_timeout(
sched->job_scheduled,
drm_sched_entity_is_idle(entity),
timeout);
} else {
wait_event_killable(sched->job_scheduled,
drm_sched_entity_is_idle(entity));
}
/* For killed process disable any more IBs enqueue right now */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); if (last_user == current->group_leader &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
/** * drm_sched_entity_fini - Destroy a context entity * * @entity: scheduler entity * * Cleanups up @entity which has been initialized by drm_sched_entity_init(). * * If there are potentially job still in flight or getting newly queued * drm_sched_entity_flush() must be called first. This function then goes over * the entity and signals all jobs with an error code if the process was killed.
*/ void drm_sched_entity_fini(struct drm_sched_entity *entity)
{ /* * If consumption of existing IBs wasn't completed. Forcefully remove * them here. Also makes sure that the scheduler won't touch this entity * any more.
*/
drm_sched_entity_kill(entity);
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency, &entity->cb);
dma_fence_put(entity->dependency);
entity->dependency = NULL;
}
/** * drm_sched_entity_set_priority - Sets priority of the entity * * @entity: scheduler entity * @priority: scheduler priority * * Update the priority of runqueues used for the entity.
*/ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority)
{
spin_lock(&entity->lock);
entity->priority = priority;
spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
/* * Add a callback to the current dependency of the entity to wake up the * scheduler when the entity becomes available.
*/ staticbool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity, struct drm_sched_job *sched_job)
{ struct drm_gpu_scheduler *sched = entity->rq->sched; struct dma_fence *fence = entity->dependency; struct drm_sched_fence *s_fence;
if (fence->context == entity->fence_context ||
fence->context == entity->fence_context + 1) { /* * Fence is a scheduled/finished fence from a job * which belongs to the same entity, we can ignore * fences from ourself
*/
dma_fence_put(entity->dependency); returnfalse;
}
/* * Fence is from the same scheduler, only need to wait for * it to be scheduled
*/
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(entity->dependency);
entity->dependency = fence;
}
if (trace_drm_sched_job_unschedulable_enabled() &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drm_sched_entity_wakeup)) returntrue;
/* We keep the fence around, so we can iterate over all dependencies * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled * before killing the job.
*/
f = xa_load(&job->dependencies, job->last_dependency); if (f) {
job->last_dependency++; return dma_fence_get(f);
}
if (job->sched->ops->prepare_job) return job->sched->ops->prepare_job(job, entity);
sched_job = drm_sched_entity_queue_peek(entity); if (!sched_job) return NULL;
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) { if (drm_sched_entity_add_dependency_cb(entity, sched_job)) return NULL;
}
/* skip jobs from entity that marked guilty */ if (entity->guilty && atomic_read(entity->guilty))
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
/* * If the queue is empty we allow drm_sched_entity_select_rq() to * locklessly access ->last_scheduled. This only works if we set the * pointer before we dequeue and if we a write barrier here.
*/
smp_wmb();
spsc_queue_pop(&entity->job_queue);
/* * Update the entity's location in the min heap according to * the timestamp of the next job, if any.
*/ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { struct drm_sched_job *next;
next = drm_sched_entity_queue_peek(entity); if (next) { struct drm_sched_rq *rq;
/* Jobs and entities might have different lifecycles. Since we're * removing the job from the entities queue, set the jobs entity pointer * to NULL to prevent any future access of the entity through this job.
*/
sched_job->entity = NULL;
/* single possible engine and already selected */ if (!entity->sched_list) return;
/* queue non-empty, stay on the same engine */ if (spsc_queue_count(&entity->job_queue)) return;
/* * Only when the queue is empty are we guaranteed that * drm_sched_run_job_work() cannot change entity->last_scheduled. To * enforce ordering we need a read barrier here. See * drm_sched_entity_pop_job() for the other side.
*/
smp_rmb();
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
spin_unlock(&entity->lock);
}
/** * drm_sched_entity_push_job - Submit a job to the entity's job queue * @sched_job: job to submit * * Note: To guarantee that the order of insertion to queue matches the job's * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init().
*/ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{ struct drm_sched_entity *entity = sched_job->entity; bool first;
ktime_t submit_ts;
trace_drm_sched_job_queue(sched_job, entity);
if (trace_drm_sched_job_add_dep_enabled()) { struct dma_fence *entry; unsignedlong index;
/* * After the sched_job is pushed into the entity queue, it may be * completed and freed up at any time. We can no longer access it. * Make sure to set the submit_ts first, to avoid a race.
*/
sched_job->submit_ts = submit_ts = ktime_get();
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */ if (first) { struct drm_gpu_scheduler *sched; struct drm_sched_rq *rq;
/* Add the entity to the run queue */
spin_lock(&entity->lock); if (entity->stopped) {
spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n"); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.