/* Haswell does have the CXT_SIZE register however it does not appear to be * valid. Now, docs explain in dwords what is in the context object. The full * size is 70720 bytes, however, the power context and execlist context will * never be saved (power context is stored elsewhere, and execlists don't work * on HSW) - so the final size, including the extra state required for the * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/ #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/** * intel_engine_context_size() - return the size of the context for an engine * @gt: the gt * @class: engine class * * Each engine class may require a different amount of space for a context * image. * * Return: size (in bytes) of an engine class specific context image * * Note: this size includes the HWSP, which is part of the context image * in LRC mode, but does not include the "shared data page" used with * GuC submission. The caller should account for this if using the GuC.
*/
u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
{ struct intel_uncore *uncore = gt->uncore;
u32 cxt_size;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
switch (class) { case COMPUTE_CLASS:
fallthrough; case RENDER_CLASS: switch (GRAPHICS_VER(gt->i915)) { default:
MISSING_CASE(GRAPHICS_VER(gt->i915)); return DEFAULT_LR_CONTEXT_RENDER_SIZE; case 12: case 11: return GEN11_LR_CONTEXT_RENDER_SIZE; case 9: return GEN9_LR_CONTEXT_RENDER_SIZE; case 8: return GEN8_LR_CONTEXT_RENDER_SIZE; case 7: if (IS_HASWELL(gt->i915)) return HSW_CXT_TOTAL_SIZE;
cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE); return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE); case 6:
cxt_size = intel_uncore_read(uncore, CXT_SIZE); return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE); case 5: case 4: /* * There is a discrepancy here between the size reported * by the register and the size of the context layout * in the docs. Both are described as authoritative! * * The discrepancy is on the order of a few cachelines, * but the total is under one page (4k), which is our * minimum allocation anyway so it should all come * out in the wash.
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
gt_dbg(gt, "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
GRAPHICS_VER(gt->i915), cxt_size * 64,
cxt_size - 1); return round_up(cxt_size * 64, PAGE_SIZE); case 3: case 2: /* For the special day when i810 gets merged. */ case 1: return 0;
} break; default:
MISSING_CASE(class);
fallthrough; case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: case OTHER_CLASS: if (GRAPHICS_VER(gt->i915) < 8) return 0; return GEN8_LR_CONTEXT_OTHER_SIZE;
}
}
staticvoid __sprint_engine_name(struct intel_engine_cs *engine)
{ /* * Before we know what the uABI name for this engine will be, * we still would like to keep track of this engine in the debug logs. * We throw in a ' here as a reminder that this isn't its final name.
*/
GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
intel_engine_class_repr(engine->class),
engine->instance) >= sizeof(engine->name));
}
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{ /* * Though they added more rings on g4x/ilk, they did not add * per-engine HWSTAM until gen6.
*/ if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS) return;
staticvoid intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
{ /* Mask off all writes into the unknown HWSP */
intel_engine_set_hwsp_writemask(engine, ~0u);
}
/* features common between engines sharing EUs */ if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
}
/* * Mid-thread pre-emption is not available in Gen12. Unfortunately, * some compute workloads run quite long threads. That means they get * reset due to not pre-empting in a timely manner. So, bump the * pre-emption timeout value to be much higher for compute engines.
*/ if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE;
/* Cap properties according to any system limits */ #define CLAMP_PROP(field) \ do { \
u64 clamp = intel_clamp_##field(engine, engine->props.field); \ if (clamp != engine->props.field) { \
drm_notice(&engine->i915->drm, \ "Warning, clamping %s to %lld to prevent overflow\n", \ #field, clamp); \
engine->props.field = clamp; \
} \
} while (0)
u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
{ /* * NB: The GuC API only supports 32bit values. However, the limit is further * reduced due to internal calculations which would otherwise overflow.
*/ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
{ /* * NB: The GuC API only supports 32bit values. However, the limit is further * reduced due to internal calculations which would otherwise overflow.
*/ if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
if (engine->class == VIDEO_DECODE_CLASS) { /* * HEVC support is present on first engine instance * before Gen11 and on all instances afterwards.
*/ if (GRAPHICS_VER(i915) >= 11 ||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_CLASS_CAPABILITY_HEVC;
/* * SFC block is present only on even logical engine * instances.
*/ if ((GRAPHICS_VER(i915) >= 11 &&
(engine->gt->info.vdbox_sfc_access &
BIT(engine->instance))) ||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
} elseif (engine->class == VIDEO_ENHANCEMENT_CLASS) { if (GRAPHICS_VER(i915) >= 9 &&
engine->gt->info.sfc_mask & BIT(engine->instance))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
}
}
/** * intel_engines_release() - free the resources allocated for Command Streamers * @gt: pointer to struct intel_gt
*/ void intel_engines_release(struct intel_gt *gt)
{ struct intel_engine_cs *engine; enum intel_engine_id id;
/* * Before we release the resources held by engine, we must be certain * that the HW is no longer accessing them -- having the GPU scribble * to or read from a page being used for something else causes no end * of fun. * * The GPU should be reset by this point, but assume the worst just * in case we aborted before completely initialising the engines.
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt)); if (!intel_gt_gpu_reset_clobbers_display(gt))
intel_gt_reset_all_engines(gt);
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) { if (!engine->release) continue;
/* * In Gen11, only even numbered logical VDBOXes are hooked * up to an SFC (Scaler & Format Converter) unit. * In Gen12, Even numbered physical instance always are connected * to an SFC. Odd numbered physical instances have SFC only if * previous even instance is fused off. * * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field * in the fuse register that tells us whether a specific SFC is present.
*/ if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) returnfalse; elseif (MEDIA_VER(i915) >= 12) return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask); elseif (MEDIA_VER(i915) == 11) return logical_vdbox % 2 == 0;
/* * On newer platforms the fusing register is called 'enable' and has * enable semantics, while on older platforms it is called 'disable' * and bits have disable semantices.
*/
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse;
ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
ss_per_ccs); /* * If all DSS in a quadrant are fused off, the corresponding CCS * engine is not available for use.
*/
for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
info->engine_mask &= ~BIT(_CCS(i));
gt_dbg(gt, "ccs%u fused off\n", i);
}
}
/* * Determine which engines are fused off in our particular hardware. * Note that we have a catch-22 situation where we need to be able to access * the blitter forcewake domain to read the engine fuses, but at the same time * we need to know which engines are available on the system to know which * forcewake domains are present. We solve this by initializing the forcewake * domains based on the full engine mask in the platform capabilities before * calling this function and pruning the domains for fused-off engines * afterwards.
*/ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{ struct intel_gt_info *info = >->info;
/* * The only use of the GSC CS is to load and communicate with the GSC * FW, so we have no use for it if we don't have the FW. * * IMPORTANT: in cases where we don't have the GSC FW, we have a * catch-22 situation that breaks media C6 due to 2 requirements: * 1) once turned on, the GSC power well will not go to sleep unless the * GSC FW is loaded. * 2) to enable idling (which is required for media C6) we need to * initialize the IDLE_MSG register for the GSC CS and do at least 1 * submission, which will wake up the GSC power well.
*/ if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(>->uc)) {
gt_notice(gt, "No GSC FW selected, disabling GSC CS and media C6\n");
info->engine_mask &= ~BIT(GSC0);
}
/* * Do not create the command streamer for CCS slices beyond the first. * All the workload submitted to the first engine will be shared among * all the slices. * * Once the user will be allowed to customize the CCS mode, then this * check needs to be removed.
*/ if (IS_DG2(gt->i915)) {
u8 first_ccs = __ffs(CCS_MASK(gt));
/* * Store the number of active cslices before * changing the CCS engine configuration
*/
gt->ccs.cslices = CCS_MASK(gt);
/* Mask off all the CCS engine */
info->engine_mask &= ~GENMASK(CCS3, CCS0); /* Put back in the first CCS engine */
info->engine_mask |= BIT(_CCS(first_ccs));
}
if (i915_inject_probe_failure(i915)) return -ENODEV;
for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
setup_logical_ids(gt, logical_ids, class);
for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
u8 instance = intel_engines[i].instance;
if (intel_engines[i].class != class ||
!HAS_ENGINE(gt, i)) continue;
err = intel_engine_setup(gt, i,
logical_ids[instance]); if (err) goto cleanup;
mask |= BIT(i);
}
}
/* * Catch failures to update intel_engines table when the new engines * are added to the driver by a warning and disabling the forgotten * engines.
*/ if (drm_WARN_ON(&i915->drm, mask != engine_mask))
gt->info.engine_mask = mask;
if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) /* * On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for * gen4, gen5, or byt, they also behave similarly * and hang if the HWS is placed at the top of the * GTT. To generalise, it appears that all !llc * platforms have issues with us placing the HWS * above the mappable region (even though we never * actually map it).
*/
flags = PIN_MAPPABLE; else
flags = PIN_HIGH;
/* * Though the HWS register does support 36bit addresses, historically * we have had hangs and corruption reported due to wild writes if * the HWS is placed above 4G. We only allow objects to be allocated * in GFP_DMA32 for i965, and no earlier physical address users had * access to more than 4G.
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(obj)) {
gt_err(engine->gt, "Failed to allocate status page\n"); return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) {
ret = PTR_ERR(vma); goto err_put;
}
i915_gem_ww_ctx_init(&ww, true);
retry:
ret = i915_gem_object_lock(obj, &ww); if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
ret = pin_ggtt_status_page(engine, &ww, vma); if (ret) goto err;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr); goto err_unpin;
}
/* * New platforms should not be added with catch-all-newer (>=) * condition so that any later platform added triggers the below warning * and in turn mandates a human cross-check of whether the invalidation * flows have compatible semantics. * * For instance with the 11.00 -> 12.00 transition three out of five * respective engine registers were moved to masked type. Then after the * 12.00 -> 12.50 transition multi cast handling is required too.
*/
if (gt_WARN_ONCE(engine->gt, !num, "Platform does not implement TLB invalidation!")) return -ENODEV;
if (gt_WARN_ON_ONCE(engine->gt, class >= num ||
(!regs[class].reg.reg &&
!regs[class].mcr_reg.reg))) return -ERANGE;
reg = regs[class];
if (regs == xelpmp_regs && class == OTHER_CLASS) { /* * There's only a single GSC instance, but it uses register bit * 1 instead of either 0 or OTHER_GSC_INSTANCE.
*/
GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
val = 1;
} elseif (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
reg.reg = GEN8_M2TCR;
val = 0;
} else {
val = instance;
}
/* * Give our perma-pinned kernel timelines a separate lockdep class, * so that we can use them from within the normal user timelines * should we need to inject GPU operations during their request * construction.
*/
lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
/* * MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple * bind requests at a time so get a bigger ring.
*/ return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
I915_GEM_HWS_GGTT_BIND_ADDR,
&kernel, "ggtt_bind_context");
}
/* * engine_init_common - initialize engine state which might require hw access * @engine: Engine to initialize. * * Initializes @engine@ structure members shared between legacy and execlists * submission modes which do require hardware access. * * Typcally done at later stages of submission mode specific engine setup. * * Returns zero on success or an error code on failure.
*/ staticint engine_init_common(struct intel_engine_cs *engine)
{ struct intel_context *ce, *bce = NULL; int ret;
engine->set_default_submission(engine);
/* * We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the * default context also requires GTT space which may not * be available. To avoid this we always pin the default * context.
*/
ce = create_kernel_context(engine); if (IS_ERR(ce)) return PTR_ERR(ce); /* * Create a separate pinned context for GGTT update with blitter engine * if a platform require such service. MI_UPDATE_GTT works on other * engines as well but BCS should be less busy engine so pick that for * GGTT updates.
*/ if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) {
bce = create_ggtt_bind_context(engine); if (IS_ERR(bce)) {
ret = PTR_ERR(bce); goto err_ce_context;
}
}
ret = measure_breadcrumb_dw(ce); if (ret < 0) goto err_bce_context;
err = setup(engine); if (err) {
intel_engine_cleanup_common(engine); return err;
}
/* The backend should now be responsible for cleanup */
GEM_BUG_ON(engine->release == NULL);
err = engine_init_common(engine); if (err) return err;
intel_engine_add_user(engine);
}
return 0;
}
/** * intel_engine_cleanup_common - cleans up the engine state created by * the common initializers. * @engine: Engine to cleanup. * * This cleans up everything created by the common helpers.
*/ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
/** * intel_engine_resume - re-initializes the HW state of the engine * @engine: Engine to resume. * * Returns zero on success or an error code on failure.
*/ int intel_engine_resume(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
intel_engine_apply_whitelist(engine);
/* * If we are doing a normal GPU reset, we can take our time and allow * the engine to quiesce. We've stopped submission to the engine, and * if we wait long enough an innocent context should complete and * leave the engine idle. So they should not be caught unaware by * the forthcoming GPU reset (which usually follows the stop_cs)!
*/ return READ_ONCE(engine->props.stop_timeout_ms);
}
staticint __intel_engine_stop_cs(struct intel_engine_cs *engine, int fast_timeout_us, int slow_timeout_ms)
{ struct intel_uncore *uncore = engine->uncore; const i915_reg_t mode = RING_MI_MODE(engine->mmio_base); int err;
/* * Wa_22011802037: Prior to doing a reset, ensure CS is * stopped, set ring stop bit and prefetch disable bit to halt CS
*/ if (intel_engine_reset_needs_wa_22011802037(engine->gt))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
_MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
/* A final mmio read to let GPU writes be hopefully flushed to memory */
intel_uncore_posting_read_fw(uncore, mode); return err;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{ int err = 0;
if (GRAPHICS_VER(engine->i915) < 3) return -ENODEV;
ENGINE_TRACE(engine, "\n"); /* * TODO: Find out why occasionally stopping the CS times out. Seen * especially with gem_eio tests. * * Occasionally trying to stop the cs times out, but does not adversely * affect functionality. The timeout is set as a config parameter that * defaults to 100ms. In most cases the follow up operation is to wait * for pending MI_FORCE_WAKES. The assumption is that this timeout is * sufficient for any pending MI_FORCEWAKEs to complete. Once root * caused, the caller must check and handle the return from this * function.
*/ if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
/* * Sometimes we observe that the idle flag is not * set even though the ring is empty. So double * check before giving up.
*/ if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
(ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
err = -ETIMEDOUT;
}
staticvoid __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
{ int ret;
/* Ensure GPM receives fw up/down after CS is stopped */
udelay(1);
/* Wait for forcewake request to complete in GPM */
ret = __intel_wait_for_register_fw(gt->uncore,
GEN9_PWRGT_DOMAIN_STATUS,
fw_mask, fw_mask, 5000, 0, NULL);
/* Ensure CS receives fw ack from GPM */
udelay(1);
if (ret)
GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
}
/* * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we * are concerned only with the gt reset here, we use a logical OR of pending * forcewakeups from all reset domains and then wait for them to complete by * querying PWRGT_DOMAIN_STATUS.
*/ void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
{
u32 fw_pending = __cs_pending_mi_force_wakes(engine);
if (fw_pending)
__gpm_wait_for_fw_complete(engine->gt, fw_pending);
}
/* NB: please notice the memset */ void intel_engine_get_instdone(conststruct intel_engine_cs *engine, struct intel_instdone *instdone)
{ struct drm_i915_private *i915 = engine->i915; struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base; int slice; int subslice; int iter;
memset(instdone, 0, sizeof(*instdone));
if (GRAPHICS_VER(i915) >= 8) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (I915_SELFTEST_ONLY(!engine->mmio_base)) returntrue;
if (!intel_engine_pm_get_if_awake(engine)) returntrue;
/* First check that no commands are left in the ring */ if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
(ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */ if (GRAPHICS_VER(engine->i915) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
local_bh_disable(); if (tasklet_trylock(t)) { /* Must wait for any GPU reset in progress. */ if (__tasklet_is_enabled(t))
t->callback(t);
tasklet_unlock(t);
}
local_bh_enable();
/* Synchronise and wait for the tasklet on another CPU */ if (sync)
tasklet_unlock_wait(t);
}
/** * intel_engine_is_idle() - Report if the engine has finished process all work * @engine: the intel_engine_cs * * Return true if there are no requests pending, nothing left to be submitted * to hardware, and that the engine is idle.
*/ bool intel_engine_is_idle(struct intel_engine_cs *engine)
{ /* More white lies, if wedged, hw state is inconsistent */ if (intel_gt_is_wedged(engine->gt)) returntrue;
if (!intel_engine_pm_is_awake(engine)) returntrue;
/* Waiting to drain ELSP? */
intel_synchronize_hardirq(engine->i915);
intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */ if (!i915_sched_engine_is_empty(engine->sched_engine)) returnfalse;
/* Ring stopped? */ return ring_is_idle(engine);
}
/* * If the driver is wedged, HW state may be very inconsistent and * report that it is still busy, even though we have stopped using it.
*/ if (intel_gt_is_wedged(gt)) returntrue;
/* Already parked (and passed an idleness test); must still be idle */ if (!READ_ONCE(gt->awake)) returntrue;
for_each_engine(engine, gt, id) { if (!intel_engine_is_idle(engine)) returnfalse;
}
returntrue;
}
bool intel_engine_irq_enable(struct intel_engine_cs *engine)
{ if (!engine->irq_enable) returnfalse;
/* * Even though we are holding the engine->sched_engine->lock here, there * is no control over the submission queue per-se and we are * inspecting the active state at a random point in time, with an * unknown queue. Play safe and make sure the timeline remains valid. * (Only being used for pretty printing, one extra kref shouldn't * cause a camel stampede!)
*/
rcu_read_lock();
tl = rcu_dereference(rq->timeline); if (!kref_get_unless_zero(&tl->kref))
tl = NULL;
rcu_read_unlock();
return tl;
}
staticint print_ring(char *buf, int sz, struct i915_request *rq)
{ int len = 0;
if (!i915_request_signaled(rq)) { struct intel_timeline *tl = get_timeline(rq);
/* * No need for an engine->irq_seqno_barrier() before the seqno reads. * The GPU is still running so requests are still executing and any * hardware reads will be out of date by the time they are reported. * But the intention here is just to report an instantaneous snapshot * so that's fine.
*/
intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
drm_printf(m, "\tRequests:\n");
if (hung_rq)
engine_dump_request(hung_rq, m, "\t\thung"); elseif (hung_ce)
drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
if (intel_uc_uses_guc_submission(&engine->gt->uc))
intel_guc_dump_active_requests(engine, hung_rq, m); else
intel_execlists_dump_active_requests(engine, hung_rq, m);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.