/** * DOC: Execbuf (User GPU command submission) * * Execs have historically been rather complicated in DRM drivers (at least in * the i915) because a few things: * * - Passing in a list BO which are read / written to creating implicit syncs * - Binding at exec time * - Flow controlling the ring at exec time * * In XE we avoid all of this complication by not allowing a BO list to be * passed into an exec, using the dma-buf implicit sync uAPI, have binds as * separate operations, and using the DRM scheduler to flow control the ring. * Let's deep dive on each of these. * * We can get away from a BO list by forcing the user to use in / out fences on * every exec rather than the kernel tracking dependencies of BO (e.g. if the * user knows an exec writes to a BO and reads from the BO in the next exec, it * is the user's responsibility to pass in / out fence between the two execs). * * We do not allow a user to trigger a bind at exec time rather we have a VM * bind IOCTL which uses the same in / out fence interface as exec. In that * sense, a VM bind is basically the same operation as an exec from the user * perspective. e.g. If an exec depends on a VM bind use the in / out fence * interface (struct drm_xe_sync) to synchronize like syncing between two * dependent execs. * * Although a user cannot trigger a bind, we still have to rebind userptrs in * the VM that have been invalidated since the last exec, likewise we also have * to rebind BOs that have been evicted by the kernel. We schedule these rebinds * behind any pending kernel operations on any external BOs in VM or any BOs * private to the VM. This is accomplished by the rebinds waiting on BOs * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and * for external BOs). * * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute * mode VMs we use preempt fences and a rebind worker (TODO: add link). * * There is no need to flow control the ring in the exec as we write the ring at * submission time and set the DRM scheduler max job limit SIZE_OF_RING / * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the * ring is available. * * All of this results in a rather simple exec implementation. * * Flow * ~~~~ * * .. code-block:: * * Parse input arguments * Wait for any async VM bind passed as in-fences to start * <----------------------------------------------------------------------| * Lock global VM lock in read mode | * Pin userptrs (also finds userptr invalidated since last exec) | * Lock exec (VM dma-resv lock, external BOs dma-resv locks) | * Validate BOs that have been evicted | * Create job | * Rebind invalidated userptrs + evicted BOs (non-compute-mode) | * Add rebind fence dependency to job | * Add job VM dma-resv bookkeeping slot (non-compute mode) | * Add job to external BOs dma-resv write slots (non-compute mode) | * Check if any userptrs invalidated since pin ------ Drop locks ---------| * Install in / out fences for job * Submit job * Unlock all
*/
/* * Add validation and rebinding to the drm_exec locking loop, since both can * trigger eviction which may require sleeping dma_resv locks.
*/ staticint xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{ struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
/* The fence slot added here is intended for the exec sched job. */ return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
}
if (xe_exec_queue_is_parallel(q)) {
err = copy_from_user(addresses, addresses_user, sizeof(u64) *
q->width); if (err) {
err = -EFAULT; goto err_syncs;
}
}
group = q->hwe->hw_engine_group;
mode = xe_hw_engine_group_find_exec_mode(q);
if (mode == EXEC_MODE_DMA_FENCE) {
err = xe_hw_engine_group_get_mode(group, mode, &previous_mode); if (err) goto err_syncs;
}
retry: if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
err = down_write_killable(&vm->lock);
write_locked = true;
} else { /* We don't allow execs while the VM is in error state */
err = down_read_interruptible(&vm->lock);
write_locked = false;
} if (err) goto err_hw_exec_mode;
if (write_locked) {
err = xe_vm_userptr_pin(vm);
downgrade_write(&vm->lock);
write_locked = false; if (err) goto err_unlock_list;
}
if (!args->num_batch_buffer) {
err = xe_vm_lock(vm, true); if (err) goto err_unlock_list;
if (!xe_vm_in_lr_mode(vm)) { struct dma_fence *fence;
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); if (IS_ERR(fence)) {
err = PTR_ERR(fence);
xe_vm_unlock(vm); goto err_unlock_list;
} for (i = 0; i < num_syncs; i++)
xe_sync_entry_signal(&syncs[i], fence);
xe_exec_queue_last_fence_set(q, vm, fence);
dma_fence_put(fence);
}
xe_vm_unlock(vm); goto err_unlock_list;
}
/* * It's OK to block interruptible here with the vm lock held, since * on task freezing during suspend / hibernate, the call will * return -ERESTARTSYS and the IOCTL will be rerun.
*/
err = wait_for_completion_interruptible(&xe->pm_block); if (err) goto err_unlock_list;
/* Wait behind rebinds */ if (!xe_vm_in_lr_mode(vm)) {
err = xe_sched_job_add_deps(job,
xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL); if (err) goto err_put_job;
}
for (i = 0; i < num_syncs && !err; i++)
err = xe_sync_entry_add_deps(&syncs[i], job); if (err) goto err_put_job;
if (!xe_vm_in_lr_mode(vm)) {
err = xe_sched_job_last_fence_add_dep(job, vm); if (err) goto err_put_job;
err = down_read_interruptible(&vm->userptr.notifier_lock); if (err) goto err_put_job;
err = __xe_vm_userptr_needs_repin(vm); if (err) goto err_repin;
}
/* * Point of no return, if we error after this point just set an error on * the job and let the DRM scheduler / backend clean up the job.
*/
xe_sched_job_arm(job); if (!xe_vm_in_lr_mode(vm))
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
for (i = 0; i < num_syncs; i++) {
xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
xe_sched_job_init_user_fence(job, &syncs[i]);
}
if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job); if (!xe_vm_in_lr_mode(vm))
xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm);
if (!err && !xe_vm_in_lr_mode(vm)) {
spin_lock(&xe->ttm.lru_lock);
ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
spin_unlock(&xe->ttm.lru_lock);
}
if (mode == EXEC_MODE_LR)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
err_repin: if (!xe_vm_in_lr_mode(vm))
up_read(&vm->userptr.notifier_lock);
err_put_job: if (err)
xe_sched_job_put(job);
err_exec:
drm_exec_fini(exec);
err_unlock_list:
up_read(&vm->lock); if (err == -EAGAIN && !skip_retry) goto retry;
err_hw_exec_mode: if (mode == EXEC_MODE_DMA_FENCE)
xe_hw_engine_group_put(group);
err_syncs: while (num_syncs--)
xe_sync_entry_cleanup(&syncs[num_syncs]);
kfree(syncs);
err_exec_queue:
xe_exec_queue_put(q);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.