/* Table must be set to invalid values for entries not used */ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i) for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
info_map_write(info_map, mapping_table[i][j],
GUC_MAX_INSTANCES_PER_CLASS);
/* * The save/restore register list must be pre-calculated to a temporary * buffer before it can be copied inside the ADS.
*/ struct temp_regset { /* * ptr to the section of the storage for the engine currently being * worked on
*/ struct guc_mmio_reg *registers; /* ptr to the base of the allocated storage for all engines */ struct guc_mmio_reg *storage;
u32 storage_used;
u32 storage_max;
};
/* * The mmio list is built using separate lists within the driver. * It's possible that at some point we may attempt to add the same * register more than once. Do not consider this an error; silently * move on if the register is already in the list.
*/ if (bsearch(&entry, regset->registers, count, sizeof(entry), guc_mmio_reg_cmp)) return 0;
slot = __mmio_reg_add(regset, &entry); if (IS_ERR(slot)) return PTR_ERR(slot);
while (slot-- > regset->registers) {
GEM_BUG_ON(slot[0].offset == slot[1].offset); if (slot[1].offset > slot[0].offset) break;
/* * The GuC doesn't have a default steering, so we need to explicitly * steer all registers that need steering. However, we do not keep track * of all the steering ranges, only of those that have a chance of using * a non-default steering from the i915 pov. Instead of adding such * tracking, it is easier to just program the default steering for all * regs that don't need a non-default one.
*/
intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
flags |= GUC_REGSET_STEERING(group, inst);
/* * Each engine's registers point to a new start relative to * storage
*/
regset->registers = regset->storage + regset->storage_used;
ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
CCS_MASK(engine->gt))
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
/* * some of the WA registers are MCR registers. As it is safe to * use MCR form for non-MCR registers, for code simplicity, all * WA registers are added with MCR form.
*/ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ret |= GUC_MCR_REG_ADD(gt, regset, wa->mcr_reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */ for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
ret |= GUC_MMIO_REG_ADD(gt, regset,
RING_FORCE_TO_NONPRIV(base, i), false);
/* add in local MOCS registers */ for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false); else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
if (GRAPHICS_VER(engine->i915) >= 12) {
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL0)), false);
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL1)), false);
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL2)), false);
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL3)), false);
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL4)), false);
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL5)), false);
ret |= GUC_MCR_REG_ADD(gt, regset, MCR_REG(i915_mmio_reg_offset(EU_PERF_CNTL6)), false);
}
return ret ? -1 : 0;
}
staticlong guc_mmio_reg_state_create(struct intel_guc *guc)
{ struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; enum intel_engine_id id; struct temp_regset temp_set = {}; long total = 0; long ret;
for_each_engine(engine, gt, id) {
u32 used = temp_set.storage_used;
ret = guc_mmio_regset_init(&temp_set, engine); if (ret < 0) goto fail_regset_init;
guc->ads_regset_count[id] = temp_set.storage_used - used;
total += guc->ads_regset_count[id];
}
/* The GSC engine is an instance (6) of OTHER_CLASS */ if (gt->engine[GSC0])
info_map_write(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
BIT(gt->engine[GSC0]->instance));
}
/* * Reserve the memory for the golden contexts and point GuC at it but * leave it empty for now. The context data will be filled in later * once there is something available to put there. * * Note that the HWSP and ring context are not included. * * Note also that the storage must be pinned in the GGTT, so that the * address won't change after GuC has been told where to find it. The * GuC will also validate that the LRC base + size fall within the * allowed GGTT range.
*/ if (!iosys_map_is_null(&guc->ads_map)) {
offset = guc_ads_golden_ctxt_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
offsetof(struct __guc_ads_blob, system_info));
} else {
memset(&local_info, 0, sizeof(local_info));
iosys_map_set_vaddr(&info_map, &local_info);
fill_engine_enable_masks(gt, &info_map);
}
/* * This interface is slightly confusing. We need to pass the * base address of the full golden context and the size of just * the engine state, which is the section of the context image * that starts after the execlists context. This is required to * allow the GuC to restore just the engine state when a * watchdog reset occurs. * We calculate the engine state size by removing the size of * what comes before it in the context image (which is identical * on all engines).
*/
ads_blob_write(guc, ads.eng_state_size[guc_class],
real_size - LRC_SKIP_SIZE(gt->i915));
ads_blob_write(guc, ads.golden_context_lrca[guc_class],
addr_ggtt);
addr_ggtt += alloc_size;
}
/* Make sure current size matches what we calculated previously */ if (guc->ads_golden_ctxt_size)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
if (!intel_uc_uses_guc_submission(>->uc)) return;
GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
/* * Go back and fill in the golden context data now that it is * available.
*/
offset = guc_ads_golden_ctxt_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
guc_class = engine_class_to_guc_class(engine_class); if (!ads_blob_read(guc, system_info.engine_enabled_masks[guc_class])) continue;
engine = find_engine_state(gt, engine_class); if (!engine) {
guc_err(guc, "No engine state recorded for class %d!\n",
engine_class);
ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0); continue;
}
/* first, set aside the first page for a capture_list with zero descriptors */
total_size = PAGE_SIZE; if (ads_is_mapped) { if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
null_ggtt = ads_ggtt + capture_offset;
capture_offset += PAGE_SIZE;
}
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) { for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
u32 engine_mask = guc_get_capture_engine_mask(&info_map, j);
/* null list if we dont have said engine or list */ if (!engine_mask) { if (ads_is_mapped) {
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
} continue;
} if (intel_guc_capture_getlistsize(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
j, &size)) { if (ads_is_mapped)
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt); goto engine_instance_list;
}
total_size += size; if (ads_is_mapped) { if (total_size > guc->ads_capture_size ||
intel_guc_capture_getlist(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
j, &ptr)) {
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt); continue;
}
ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
capture_offset);
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
capture_offset += size;
}
engine_instance_list: if (intel_guc_capture_getlistsize(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
j, &size)) { if (ads_is_mapped)
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt); continue;
}
total_size += size; if (ads_is_mapped) { if (total_size > guc->ads_capture_size ||
intel_guc_capture_getlist(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
j, &ptr)) {
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt); continue;
}
ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
capture_offset);
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
capture_offset += size;
}
} if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) { if (ads_is_mapped)
ads_blob_write(guc, ads.capture_global[i], null_ggtt); continue;
}
total_size += size; if (ads_is_mapped) { if (total_size > guc->ads_capture_size ||
intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
&ptr)) {
ads_blob_write(guc, ads.capture_global[i], null_ggtt); continue;
}
ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
capture_offset += size;
}
}
if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
guc_warn(guc, "ADS capture alloc size changed from %d to %d\n",
guc->ads_capture_size, PAGE_ALIGN(total_size));
/* Lists for error capture debug */
guc_capture_prep_lists(guc);
/* ADS */
ads_blob_write(guc, ads.scheduler_policies, base +
offsetof(struct __guc_ads_blob, policies));
ads_blob_write(guc, ads.gt_system_info, base +
offsetof(struct __guc_ads_blob, system_info));
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
/* Workaround KLV list */
guc_waklv_init(guc);
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
i915_gem_object_flush_map(guc->ads_vma->obj);
}
/** * intel_guc_ads_create() - allocates and initializes GuC ADS. * @guc: intel_guc struct * * GuC needs memory block (Additional Data Struct), where it will store * some data. Allocate and initialize such memory block for GuC use.
*/ int intel_guc_ads_create(struct intel_guc *guc)
{ void *ads_blob;
u32 size; int ret;
GEM_BUG_ON(guc->ads_vma);
/* * Create reg state size dynamically on system memory to be copied to * the final ads blob on gt init/reset
*/
ret = guc_mmio_reg_state_create(guc); if (ret < 0) return ret;
guc->ads_regset_size = ret;
/* Likewise the golden contexts: */
ret = guc_prep_golden_context(guc); if (ret < 0) return ret;
guc->ads_golden_ctxt_size = ret;
/* Likewise the capture lists: */
ret = guc_capture_prep_lists(guc); if (ret < 0) return ret;
guc->ads_capture_size = ret;
/* And don't forget the workaround KLVs: */
ret = guc_prep_waklv(guc); if (ret < 0) return ret;
guc->ads_waklv_size = ret;
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
&ads_blob); if (ret) return ret;
if (i915_gem_object_is_lmem(guc->ads_vma->obj))
iosys_map_set_vaddr_iomem(&guc->ads_map, (void __iomem *)ads_blob); else
iosys_map_set_vaddr(&guc->ads_map, ads_blob);
__guc_ads_init(guc);
return 0;
}
void intel_guc_ads_init_late(struct intel_guc *guc)
{ /* * The golden context setup requires the saved engine state from * __engines_record_defaults(). However, that requires engines to be * operational which means the ADS must already have been configured. * Fortunately, the golden context state is not needed until a hang * occurs, so it can be filled in during this late init phase.
*/
guc_init_golden_context(guc);
}
/** * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse * @guc: intel_guc struct * * GuC stores some data in ADS, which might be stale after a reset. * Reinitialize whole ADS in case any part of it was corrupted during * previous GuC run.
*/ void intel_guc_ads_reset(struct intel_guc *guc)
{ if (!guc->ads_vma) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.