staticint request_sync(struct i915_request *rq)
{ struct intel_timeline *tl = i915_request_timeline(rq); long timeout; int err = 0;
intel_timeline_get(tl);
i915_request_get(rq);
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_queue_bh(rq);
/* * Note that execlists also applies a redzone which it checks on * context unpin when debugging. We are using the same location * and same poison value so that our checks overlap. Despite the * redundancy, we want to keep this little selftest so that we * get coverage of any and all submission backends, and we can * always extend this test to ensure we trick the HW into a * compromising position wrt to the various sections that need * to be written into the context state. * * TLDR; this overlaps with the execlists redzone.
*/
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
/* * Hide the old default state -- we lie about the context size * and get confused when the default state is smaller than * expected. For our do nothing request, inheriting the * active state is sufficient, we are only checking that we * don't use more than we planned.
*/
saved = fetch_and_zero(&engine->default_state);
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
err = __live_context_size(engine);
engine->context_size -= I915_GTT_PAGE_SIZE;
engine->default_state = saved;
intel_engine_pm_put(engine);
if (err) break;
}
return err;
}
staticint __live_active_context(struct intel_engine_cs *engine)
{ unsignedlong saved_heartbeat; struct intel_context *ce; int pass; int err;
/* * We keep active contexts alive until after a subsequent context * switch as the final write from the context-save will be after * we retire the final request. We track when we unpin the context, * under the presumption that the final pin is from the last request, * and instead of immediately unpinning the context, we add a task * to unpin the context from the next idle-barrier. * * This test makes sure that the context is kept alive until a * subsequent idle-barrier (emitted when the engine wakeref hits 0 * with no more outstanding requests). * * In GuC submission mode we don't use idle barriers and we instead * get a message from the GuC to signal that it is safe to unpin the * context from memory.
*/ if (intel_engine_uses_guc(engine)) return 0;
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is awake before starting %s!\n",
engine->name, __func__); return -EINVAL;
}
ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce);
/* Context will be kept active until after an idle-barrier. */ if (i915_active_is_idle(&ce->active)) {
pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL; goto out_engine;
}
if (!intel_engine_pm_is_awake(engine)) {
pr_err("%s is asleep before idle-barrier\n",
engine->name);
err = -EINVAL; goto out_engine;
}
out_engine:
intel_engine_pm_put(engine); if (err) goto err;
}
/* Now make sure our idle-barriers are flushed */
err = intel_engine_flush_barriers(engine); if (err) goto err;
/* Wait for the barrier and in the process wait for engine to park */
err = context_sync(engine->kernel_context); if (err) goto err;
if (!i915_active_is_idle(&ce->active)) {
pr_err("context is still active!");
err = -EINVAL;
}
intel_engine_pm_flush(engine);
if (intel_engine_pm_is_awake(engine)) { struct drm_printer p = drm_dbg_printer(&engine->i915->drm,
DRM_UT_DRIVER, NULL);
intel_engine_dump(engine, &p, "%s is still awake:%d after idle-barriers\n",
engine->name,
atomic_read(&engine->wakeref.count));
GEM_TRACE_DUMP();
staticint __live_remote_context(struct intel_engine_cs *engine)
{ struct intel_context *local, *remote; unsignedlong saved_heartbeat; int pass; int err;
/* * Check that our idle barriers do not interfere with normal * activity tracking. In particular, check that operating * on the context image remotely (intel_context_prepare_remote_request), * which inserts foreign fences into intel_context.active, does not * clobber the idle-barrier. * * In GuC submission mode we don't use idle barriers.
*/ if (intel_engine_uses_guc(engine)) return 0;
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is awake before starting %s!\n",
engine->name, __func__); return -EINVAL;
}
remote = intel_context_create(engine); if (IS_ERR(remote)) return PTR_ERR(remote);
local = intel_context_create(engine); if (IS_ERR(local)) {
err = PTR_ERR(local); goto err_remote;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.