/** * xe_hw_engine_setup_groups() - Setup the hw engine groups for the gt * @gt: The gt for which groups are setup * * Return: 0 on success, negative error code on error.
*/ int xe_hw_engine_setup_groups(struct xe_gt *gt)
{ struct xe_hw_engine *hwe; enum xe_hw_engine_id id; struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs; struct xe_device *xe = gt_to_xe(gt);
group_rcs_ccs = hw_engine_group_alloc(xe); if (IS_ERR(group_rcs_ccs)) return PTR_ERR(group_rcs_ccs);
group_bcs = hw_engine_group_alloc(xe); if (IS_ERR(group_bcs)) return PTR_ERR(group_bcs);
group_vcs_vecs = hw_engine_group_alloc(xe); if (IS_ERR(group_vcs_vecs)) return PTR_ERR(group_vcs_vecs);
for_each_hw_engine(hwe, gt, id) { switch (hwe->class) { case XE_ENGINE_CLASS_COPY:
hwe->hw_engine_group = group_bcs; break; case XE_ENGINE_CLASS_RENDER: case XE_ENGINE_CLASS_COMPUTE:
hwe->hw_engine_group = group_rcs_ccs; break; case XE_ENGINE_CLASS_VIDEO_DECODE: case XE_ENGINE_CLASS_VIDEO_ENHANCE:
hwe->hw_engine_group = group_vcs_vecs; break; case XE_ENGINE_CLASS_OTHER: break; default:
drm_warn(&xe->drm, "NOT POSSIBLE");
}
}
return 0;
}
/** * xe_hw_engine_group_add_exec_queue() - Add an exec queue to a hw engine group * @group: The hw engine group * @q: The exec_queue * * Return: 0 on success, * -EINTR if the lock could not be acquired
*/ int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
{ int err; struct xe_device *xe = gt_to_xe(q->gt);
/** * xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group * @group: The hw engine group * @q: The exec_queue
*/ void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
{ struct xe_device *xe = gt_to_xe(q->gt);
xe_assert(xe, group);
xe_assert(xe, q->vm);
down_write(&group->mode_sem);
if (!list_empty(&q->hw_engine_group_link))
list_del(&q->hw_engine_group_link);
up_write(&group->mode_sem);
}
/** * xe_hw_engine_group_resume_faulting_lr_jobs() - Asynchronously resume the hw engine group's * faulting LR jobs * @group: The hw engine group
*/ void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group)
{
queue_work(group->resume_wq, &group->resume_work);
}
/** * xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group * @group: The hw engine group * * Return: 0 on success, negative error code on error.
*/ staticint xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
{ int err; struct xe_exec_queue *q; bool need_resume = false;
lockdep_assert_held_write(&group->mode_sem);
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { if (!xe_vm_in_fault_mode(q->vm)) continue;
need_resume = true;
q->ops->suspend(q);
}
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { if (!xe_vm_in_fault_mode(q->vm)) continue;
err = q->ops->suspend_wait(q); if (err) return err;
}
if (need_resume)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
return 0;
}
/** * xe_hw_engine_group_wait_for_dma_fence_jobs() - Wait for dma fence jobs to complete * @group: The hw engine group * * This function is not meant to be called directly from a user IOCTL as dma_fence_wait() * is not interruptible. * * Return: 0 on success, * -ETIME if waiting for one job failed
*/ staticint xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group *group)
{ long timeout; struct xe_exec_queue *q; struct dma_fence *fence;
lockdep_assert_held_write(&group->mode_sem);
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { if (xe_vm_in_lr_mode(q->vm)) continue;
/** * xe_hw_engine_group_get_mode() - Get the group to execute in the new mode * @group: The hw engine group * @new_mode: The new execution mode * @previous_mode: Pointer to the previous mode provided for use by caller * * Return: 0 if successful, -EINTR if locking failed.
*/ int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group, enum xe_hw_engine_group_execution_mode new_mode, enum xe_hw_engine_group_execution_mode *previous_mode)
__acquires(&group->mode_sem)
{ int err = down_read_interruptible(&group->mode_sem);
if (err) return err;
*previous_mode = group->cur_mode;
if (new_mode != group->cur_mode) {
up_read(&group->mode_sem);
err = down_write_killable(&group->mode_sem); if (err) return err;
if (new_mode != group->cur_mode) {
err = switch_mode(group); if (err) {
up_write(&group->mode_sem); return err;
}
}
downgrade_write(&group->mode_sem);
}
return err;
}
/** * xe_hw_engine_group_put() - Put the group * @group: The hw engine group
*/ void xe_hw_engine_group_put(struct xe_hw_engine_group *group)
__releases(&group->mode_sem)
{
up_read(&group->mode_sem);
}
/** * xe_hw_engine_group_find_exec_mode() - Find the execution mode for this exec queue * @q: The exec_queue
*/ enum xe_hw_engine_group_execution_mode
xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q)
{ if (xe_vm_in_fault_mode(q->vm)) return EXEC_MODE_LR; else return EXEC_MODE_DMA_FENCE;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.