/* Wait for the request to start executing, that then waits for us */ while (READ_ONCE(sema[2]) == 0)
cpu_relax();
/* Run the request for a 100us, sampling timestamps before/after */
local_irq_disable();
write_semaphore(&sema[2], 0); while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
cpu_relax();
*dt = local_clock();
udelay(100);
*dt = local_clock() - *dt;
write_semaphore(&sema[2], 1);
local_irq_enable();
staticint __live_engine_timestamps(struct intel_engine_cs *engine)
{
u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt; struct intel_context *ce; int i, err = 0;
ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce);
for (i = 0; i < COUNT; i++) {
err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]); if (err) break;
}
intel_context_put(ce); if (err) return err;
/* * In GuC mode of submission, the busyness stats may get updated after * the batch starts running. Poll for a change in busyness and timeout * after 500 us.
*/
start = ktime_get(); while (intel_engine_get_busy_time(engine, &unused) == busyness) {
dt = ktime_get() - start; if (dt > 10000000) {
pr_err("active wait timed out %lld\n", dt);
ENGINE_TRACE(engine, "active wait time out %lld\n", dt); return -ETIME;
}
}
/* * Check we can call intel_engine_pm_put from any context. No * failures are reported directly, but if we mess up lockdep should * tell us.
*/ if (intel_gt_pm_wait_for_idle(gt)) {
pr_err("Unable to flush GT pm before test\n"); return -EBUSY;
}
for (p = igt_atomic_phases; p->name; p++) { /* * Acquisition is always synchronous, except if we * know that the engine is already awake, in which * case we should use intel_engine_pm_get_if_awake() * to atomically grab the wakeref. * * In practice, * intel_engine_pm_get(); * intel_engine_pm_put(); * occurs in one thread, while simultaneously * intel_engine_pm_get_if_awake(); * intel_engine_pm_put(); * occurs from atomic context in another.
*/
GEM_BUG_ON(intel_engine_pm_is_awake(engine));
intel_engine_pm_get(engine);
p->critical_section_begin(); if (!intel_engine_pm_get_if_awake(engine))
pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
engine->name, p->name); else
intel_engine_pm_put_async(engine);
intel_engine_pm_put_async(engine);
p->critical_section_end();
intel_engine_pm_flush(engine);
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is still awake after flushing pm\n",
engine->name); return -EINVAL;
}
/* gt wakeref is async (deferred to workqueue) */ if (intel_gt_pm_wait_for_idle(gt)) {
gt_err(gt, "GT failed to idle\n"); return -EINVAL;
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.