/* Retrieve the current fence attached to that point. It's * perfectly fine to get a NULL fence here, it just means there's * no fence attached to that point yet.
*/ if (!drm_syncobj_find_fence(file, handle, point, 0, &cur_fence))
sig_sync->fence = cur_fence;
dma_fence_unwrap_for_each(uf, &iter, f) { if (pvr_queue_fence_is_ufo_backed(uf))
native_fence_count++;
}
/* No need to unwrap the fence if it's fully non-native. */ if (!native_fence_count) return drm_sched_job_add_dependency(job, f);
dma_fence_unwrap_for_each(uf, &iter, f) { /* There's no dma_fence_unwrap_stop() helper cleaning up the refs * owned by dma_fence_unwrap(), so let's just iterate over all * entries without doing anything when something failed.
*/ if (err) continue;
if (pvr_queue_fence_is_ufo_backed(uf)) { struct drm_sched_fence *s_fence = to_drm_sched_fence(uf);
/* If this is a native dependency, we wait for the scheduled fence, * and we will let pvr_queue_run_job() issue FW waits.
*/
err = drm_sched_job_add_dependency(job,
dma_fence_get(&s_fence->scheduled));
} else {
err = drm_sched_job_add_dependency(job, dma_fence_get(uf));
}
}
dma_fence_put(f); return err;
}
int
pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job,
u32 sync_op_count, conststruct drm_pvr_sync_op *sync_ops, struct xarray *signal_array)
{ int err = 0;
if (!sync_op_count) return 0;
for (u32 i = 0; i < sync_op_count; i++) { struct pvr_sync_signal *sig_sync; struct dma_fence *fence;
if (sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL) continue;
err = pvr_check_sync_op(&sync_ops[i]); if (err) return err;
sig_sync = pvr_sync_signal_array_search(signal_array, sync_ops[i].handle,
sync_ops[i].value); if (sig_sync) { if (WARN_ON(!sig_sync->fence)) return -EINVAL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.