/* Apply the GT workarounds... */
intel_gt_apply_workarounds(gt); /* ...and determine whether they are sticking. */
intel_gt_verify_workarounds(gt, "init");
intel_gt_init_swizzling(gt);
/* * At least 830 can leave some of the unused rings * "active" (ie. head != tail) after resume which * will prevent c3 entry. Makes sure all unused rings * are totally idle.
*/
init_unused_rings(gt);
ret = i915_ppgtt_init_hw(gt); if (ret) {
gt_err(gt, "Enabling PPGTT failed (%d)\n", ret); goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(>->uc); if (ret) {
gt_probe_error(gt, "Enabling uc failed (%d)\n", ret); goto out;
}
i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
{ /* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */ if (GRAPHICS_VER(gt->i915) < 11) return INVALID_MMIO_REG;
intel_uncore_write(uncore, EIR, 0);
eir = intel_uncore_read(uncore, EIR); if (eir) { /* * some errors might have become stuck, * mask them.
*/
gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
intel_uncore_rmw(uncore, EMR, 0, eir);
intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT);
}
/* * For the media GT, this ring fault register is not replicated, * so don't do multicast/replicated register read/write operation on it.
*/ if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
intel_uncore_rmw(uncore, XELPMP_RING_FAULT_REG,
RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore,
XELPMP_RING_FAULT_REG);
/* * Although the fault register now lives in an MCR register range, * the GAM registers are special and we only truly need to read * the "primary" GAM instance rather than handling each instance * individually. intel_gt_mcr_read_any() will automatically steer * toward the primary instance.
*/
fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); if (fault & RING_FAULT_VALID)
gen8_report_fault(gt, fault,
intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0),
intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1));
}
/* * No actual flushing is required for the GTT write domain for reads * from the GTT domain. Writes to it "immediately" go to main memory * as far as we know, so there's no chipset flush. It also doesn't * land in the GPU render cache. * * However, we do have to enforce the order so that all writes through * the GTT land before any writes to the device, such as updates to * the GATT itself. * * We also have to wait a bit for the writes to land from the GTT. * An uncached read (i.e. mmio) seems to be ideal for the round-trip * timing. This issue has only been observed when switching quickly * between GTT writes and CPU reads from inside the kernel on recent hw, * and it appears to only affect discrete GTT blocks (i.e. on LLC * system agents we cannot reproduce this behaviour, until Cannonlake * that was!).
*/
wmb();
if (INTEL_INFO(gt->i915)->has_coherent_ggtt) return;
/* * As we reset the gpu during very early sanitisation, the current * register state on the GPU should reflect its defaults values. * We load a context onto the hw (with restore-inhibit), then switch * over to a second context to save that default register state. We * can then prime every new context with that state so they all start * from the same default HW values.
*/
/* Flush the default context image to memory, and enable powersaving. */ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
err = -EIO; goto out;
}
for (id = 0; id < ARRAY_SIZE(requests); id++) { struct i915_request *rq; struct file *state;
rq = requests[id]; if (!rq) continue;
if (rq->fence.error) {
err = -EIO; goto out;
}
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); if (!rq->context->state) continue;
/* Keep a copy of the state's backing pages; free the obj */
state = shmem_create_from_object(rq->context->state->obj); if (IS_ERR(state)) {
err = PTR_ERR(state); goto out;
}
rq->engine->default_state = state;
}
out: /* * If we have to abandon now, we expect the engines to be idle * and ready to be torn-down. The quickest way we can accomplish * this is by declaring ourselves wedged.
*/ if (err)
intel_gt_set_wedged(gt);
for (id = 0; id < ARRAY_SIZE(requests); id++) { struct intel_context *ce; struct i915_request *rq;
rq = requests[id]; if (!rq) continue;
ce = rq->context;
i915_request_put(rq);
intel_context_put(ce);
} return err;
}
err = i915_inject_probe_error(gt->i915, -ENODEV); if (err) return err;
intel_gt_init_workarounds(gt);
/* * This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs * used by the CS may be stale, despite us poking the TLB reset. If * we hold the forcewake during initialisation these problems * just magically go away.
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
/* * If we unload the driver and wedge before the GSC worker is complete, * the worker will hit an error on its submission to the GSC engine and * then exit. This is hard to hit for a user, but it is reproducible * with skipping selftests. The error is handled gracefully by the * worker, so there are no functional issues, but we still end up with * an error message in dmesg, which is something we want to avoid as * this is a supported scenario. We could modify the worker to better * handle a wedging occurring during its execution, but that gets * complicated for a couple of reasons: * - We do want the error on runtime wedging, because there are * implications for subsystems outside of GT (i.e., PXP, HDCP), it's * only the error on driver unload that we want to silence. * - The worker is responsible for multiple submissions (GSC FW load, * HuC auth, SW proxy), so all of those will have to be adapted to * handle the wedged_on_fini scenario. * Therefore, it's much simpler to just wait for the worker to be done * before wedging on driver removal, also considering that the worker * will likely already be idle in the great majority of non-selftest * scenarios.
*/
intel_gsc_uc_flush_work(>->uc.gsc);
/* * Upon unregistering the device to prevent any new users, cancel * all in-flight requests so that we can quickly unbind the active * resources.
*/
intel_gt_set_wedged_on_fini(gt);
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
intel_gt_reset_all_engines(gt);
}
/* * We always have at least one primary GT on any device * and it has been already initialized early during probe * in i915_driver_probe()
*/
gt->i915 = i915;
gt->name = "Primary GT";
gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
gt_dbg(gt, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr); if (ret) return ret;
if (!HAS_EXTRA_GT_LIST(i915)) return 0;
for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
gtdef->name != NULL;
i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); if (!gt) {
ret = -ENOMEM; goto err;
}
enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, struct drm_i915_gem_object *obj, bool always_coherent)
{ /* * Wa_22016122933: always return I915_MAP_WC for Media * version 13.0 when the object is on the Media GT
*/ if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt)) return I915_MAP_WC; if (HAS_LLC(gt->i915) || always_coherent) return I915_MAP_WB; else return I915_MAP_WC;
}
if (engine && engine->bind_context)
engine->bind_context_ready = ready;
}
/** * intel_gt_bind_context_set_ready - Set the context binding as ready * * @gt: GT structure * * This function marks the binder context as ready.
*/ void intel_gt_bind_context_set_ready(struct intel_gt *gt)
{
__intel_gt_bind_context_set_ready(gt, true);
}
/** * intel_gt_bind_context_set_unready - Set the context binding as ready * @gt: GT structure * * This function marks the binder context as not ready.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.