for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) { struct drm_gem_object *obj;
if (bo->flags & BO_INVALID_FLAGS) {
DRM_ERROR("invalid flags: %x\n", bo->flags);
ret = -EINVAL; goto out_unlock;
}
submit->bos[i].flags = bo->flags; if (submit->flags & ETNA_SUBMIT_SOFTPIN) { if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
DRM_ERROR("invalid softpin address\n");
ret = -EINVAL; goto out_unlock;
}
submit->bos[i].va = bo->presumed;
}
/* normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly:
*/
obj = idr_find(&file->object_idr, bo->handle); if (!obj) {
DRM_ERROR("invalid handle %u at index %u\n",
bo->handle, i);
ret = -EINVAL; goto out_unlock;
}
/* * Take a refcount on the object. The file table lock * prevents the object_idr's refcount on this being dropped.
*/
drm_gem_object_get(obj);
staticint submit_lock_objects(struct etnaviv_gem_submit *submit, struct ww_acquire_ctx *ticket)
{ int contended, slow_locked = -1, i, ret = 0;
retry: for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (slow_locked == i)
slow_locked = -1;
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = dma_resv_lock_interruptible(obj->resv, ticket); if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i); if (ret) goto fail;
submit->bos[i].flags |= BO_LOCKED;
}
}
ww_acquire_done(ticket);
return 0;
fail: for (; i >= 0; i--)
submit_unlock_object(submit, i);
if (slow_locked > 0)
submit_unlock_object(submit, slow_locked);
if (ret == -EDEADLK) { struct drm_gem_object *obj;
obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
ret = dma_resv_lock_slow_interruptible(obj->resv, ticket); if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended; goto retry;
}
}
return ret;
}
staticint submit_fence_sync(struct etnaviv_gem_submit *submit)
{ int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; struct dma_resv *robj = bo->obj->base.resv;
ret = dma_resv_reserve_fences(robj, 1); if (ret) return ret;
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT) continue;
ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
&bo->obj->base,
bo->flags & ETNA_SUBMIT_BO_WRITE); if (ret) return ret;
}
return ret;
}
staticvoid submit_attach_object_fences(struct etnaviv_gem_submit *submit)
{ int i;
for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
/* process the reloc's and patch up the cmdstream as needed: */ staticint submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
u32 size, conststruct drm_etnaviv_gem_submit_reloc *relocs,
u32 nr_relocs)
{
u32 i, last_offset = 0;
u32 *ptr = stream; int ret;
/* Submits using softpin don't blend with relocs */ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0) return -EINVAL;
for (i = 0; i < nr_relocs; i++) { conststruct drm_etnaviv_gem_submit_reloc *r = relocs + i; struct etnaviv_gem_submit_bo *bo;
u32 off;
if (unlikely(r->flags)) {
DRM_ERROR("invalid reloc flags\n"); return -EINVAL;
}
for (i = 0; i < submit->nr_pmrs; i++) { conststruct drm_etnaviv_gem_submit_pmr *r = pmrs + i; struct etnaviv_gem_submit_bo *bo; int ret;
ret = submit_bo(submit, r->read_idx, &bo); if (ret) return ret;
/* at offset 0 a sequence number gets stored used for userspace sync */ if (r->read_offset == 0) {
DRM_ERROR("perfmon request: offset is 0"); return -EINVAL;
}
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);
if (submit->mmu_context)
etnaviv_iommu_context_put(submit->mmu_context);
if (submit->prev_mmu_context)
etnaviv_iommu_context_put(submit->prev_mmu_context);
for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
/* unpin all objects */ if (submit->bos[i].flags & BO_PINNED) {
etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
atomic_dec(&etnaviv_obj->gpu_active);
submit->bos[i].mapping = NULL;
submit->bos[i].flags &= ~BO_PINNED;
}
/* if the GPU submit failed, objects might still be locked */
submit_unlock_object(submit, i);
drm_gem_object_put(&etnaviv_obj->base);
}
wake_up_all(&submit->gpu->fence_event);
if (submit->out_fence) { /* * Remove from user fence array before dropping the reference, * so fence can not be found in lookup anymore.
*/
xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
dma_fence_put(submit->out_fence);
}
if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
DRM_ERROR("softpin requested on incompatible MMU\n"); return -EINVAL;
}
if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n"); return -EINVAL;
}
/* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks.
*/
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL); if (!bos || !relocs || !pmrs || !stream) {
ret = -ENOMEM; goto err_submit_cmds;
}
ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos)); if (ret) {
ret = -EFAULT; goto err_submit_cmds;
}
ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
args->nr_relocs * sizeof(*relocs)); if (ret) {
ret = -EFAULT; goto err_submit_cmds;
}
ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
args->nr_pmrs * sizeof(*pmrs)); if (ret) {
ret = -EFAULT; goto err_submit_cmds;
}
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size); if (ret) {
ret = -EFAULT; goto err_submit_cmds;
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) {
ret = out_fence_fd; goto err_submit_cmds;
}
}
ww_acquire_init(&ticket, &reservation_ww_class);
submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs); if (!submit) {
ret = -ENOMEM; goto err_submit_ww_acquire;
}
submit->pid = pid;
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8); if (ret) goto err_submit_put;
ret = submit_lock_objects(submit, &ticket); if (ret) goto err_submit_job;
ret = submit_fence_sync(submit); if (ret) goto err_submit_job;
ret = etnaviv_sched_push_job(submit); if (ret) goto err_submit_job;
submit_attach_object_fences(submit);
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { /* * This can be improved: ideally we want to allocate the sync * file before kicking off the GPU job and just attach the * fence to the sync file here, eliminating the ENOMEM * possibility at this stage.
*/
sync_file = sync_file_create(submit->out_fence); if (!sync_file) {
ret = -ENOMEM; /* * When this late error is hit, the submit has already * been handed over to the scheduler. At this point * the sched_job must not be cleaned up.
*/ goto err_submit_put;
}
fd_install(out_fence_fd, sync_file->file);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.