/** * DOC: GuC * * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is * designed to offload some of the functionality usually performed by the host * driver; currently the main operations it can take care of are: * * - Authentication of the HuC, which is required to fully enable HuC usage. * - Low latency graphics context scheduling (a.k.a. GuC submission). * - GT Power management. * * The enable_guc module parameter can be used to select which of those * operations to enable within GuC. Note that not all the operations are * supported on all gen9+ platforms. * * Enabling the GuC is not mandatory and therefore the firmware is only loaded * if at least one of the operations is selected. However, not loading the GuC * might result in the loss of some features that do require the GuC (currently * just the HuC, but more are expected to land in the future).
*/
/* * On Gen11+, the value written to the register is passes as a payload * to the FW. However, the FW currently treats all values the same way * (H2G interrupt), so we can just write the value that the HW expects * on older gens.
*/
intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
}
/* * Wa_14012197797 * Wa_22011391025 * * The same WA bit is used for both and 22011391025 is applicable to * all DG2. * * Platforms post DG2 prevent this issue in hardware by stalling * submissions. With this flag GuC will schedule as to avoid such * stalls.
*/ if (IS_DG2(gt->i915) ||
(CCS_MASK(gt) && GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)))
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */ if (intel_engine_reset_needs_wa_22011802037(gt))
flags |= GUC_WA_PRE_PARSER;
/* * Wa_14018913170: Applicable to all platforms supported by i915 so * don't bother testing for all X/Y/Z platforms explicitly.
*/ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
/* * Initialise the GuC parameter block before starting the firmware * transfer. These parameters are read by the firmware on startup * and cannot be changed thereafter.
*/ staticvoid guc_init_params(struct intel_guc *guc)
{
u32 *params = guc->params; int i;
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
}
/* * Initialise the GuC parameter block before starting the firmware * transfer. These parameters are read by the firmware on startup * and cannot be changed thereafter.
*/ void intel_guc_write_params(struct intel_guc *guc)
{ struct intel_uncore *uncore = guc_to_gt(guc)->uncore; int i;
/* * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and * they are power context saved so it's ok to release forcewake * when we are done here and take it again at xfer time.
*/
intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
int intel_guc_init(struct intel_guc *guc)
{ int ret;
ret = intel_uc_fw_init(&guc->fw); if (ret) goto out;
ret = intel_guc_log_create(&guc->log); if (ret) goto err_fw;
ret = intel_guc_capture_init(guc); if (ret) goto err_log;
ret = intel_guc_ads_create(guc); if (ret) goto err_capture;
GEM_BUG_ON(!guc->ads_vma);
ret = intel_guc_ct_init(&guc->ct); if (ret) goto err_ads;
if (intel_guc_submission_is_used(guc)) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later
*/
ret = intel_guc_submission_init(guc); if (ret) goto err_ct;
}
if (intel_guc_slpc_is_used(guc)) {
ret = intel_guc_slpc_init(&guc->slpc); if (ret) goto err_submission;
}
/* now that everything is perma-pinned, initialize the parameters */
guc_init_params(guc);
/* * This function implements the MMIO based host to GuC interface.
*/ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
u32 *response_buf, u32 response_buf_size)
{ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 header; int i; int ret;
retry: for (i = 0; i < len; i++)
intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
intel_guc_notify(guc);
/* * No GuC command should ever take longer than 10ms. * Fast commands should still complete in 10us.
*/
ret = __intel_wait_for_register_fw(uncore,
guc_send_reg(guc, 0),
GUC_HXG_MSG_0_ORIGIN,
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
GUC_HXG_ORIGIN_GUC),
10, 10, &header); if (unlikely(ret)) {
timeout:
guc_err(guc, "mmio request %#x: no reply %x\n",
request[0], header); goto out;
}
if (response_buf) { int count = min(response_buf_size, guc->send_regs.count);
GEM_BUG_ON(!count);
response_buf[0] = header;
for (i = 1; i < count; i++)
response_buf[i] = intel_uncore_read(uncore,
guc_send_reg(guc, i));
/* Use number of copied dwords as our return value */
ret = count;
} else { /* Use data from the GuC response as our return value */
ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
/* Make sure to handle only enabled messages */
msg = payload[0] & guc->msg_enabled_mask;
if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
guc_err(guc, "Received early crash dump notification!\n"); if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
guc_err(guc, "Received early exception notification!\n");
if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION))
queue_work(system_unbound_wq, &guc->dead_guc_worker);
return 0;
}
/** * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode * @guc: intel_guc structure * @rsa_offset: rsa offset w.r.t ggtt base of huc vma * * Triggers a HuC firmware authentication request to the GuC via intel_guc_send * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by * intel_huc_auth(). * * Return: non-zero code on error
*/ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
u32 action[] = {
INTEL_GUC_ACTION_AUTHENTICATE_HUC,
rsa_offset
};
/** * intel_guc_suspend() - notify GuC entering suspend state * @guc: the guc
*/ int intel_guc_suspend(struct intel_guc *guc)
{ int ret;
u32 action[] = {
INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
};
if (!intel_guc_is_ready(guc)) return 0;
if (intel_guc_submission_is_used(guc)) {
flush_work(&guc->dead_guc_worker);
/* * This H2G MMIO command tears down the GuC in two steps. First it will * generate a G2H CTB for every active context indicating a reset. In * practice the i915 shouldn't ever get a G2H as suspend should only be * called when the GPU is idle. Next, it tears down the CTBs and this * H2G MMIO command completes. * * Don't abort on a failure code from the GuC. Keep going and do the * clean up in sanitize() and re-initialisation on resume and hopefully * the error here won't be problematic.
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); if (ret)
guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
ERR_PTR(ret));
}
/* Signal that the GuC isn't running. */
intel_guc_sanitize(guc);
return 0;
}
/** * intel_guc_resume() - notify GuC resuming from suspend state * @guc: the guc
*/ int intel_guc_resume(struct intel_guc *guc)
{ /* * NB: This function can still be called even if GuC submission is * disabled, e.g. if GuC is enabled for HuC authentication only. Thus, * if any code is later added here, it must be support doing nothing * if submission is disabled (as per intel_guc_suspend).
*/ return 0;
}
/** * DOC: GuC Memory Management * * GuC can't allocate any memory for its own usage, so all the allocations must * be handled by the host driver. GuC accesses the memory via the GGTT, with the * exception of the top and bottom parts of the 4GB address space, which are * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM) * or other parts of the HW. The driver must take care not to place objects that * the GuC is going to access in these reserved ranges. The layout of the GuC * address space is shown below: * * :: * * +===========> +====================+ <== FFFF_FFFF * ^ | Reserved | * | +====================+ <== GUC_GGTT_TOP * | | | * | | DRAM | * GuC | | * Address +===> +====================+ <== GuC ggtt_pin_bias * Space ^ | | * | | | | * | GuC | GuC | * | WOPCM | WOPCM | * | Size | | * | | | | * v v | | * +=======+===> +====================+ <== 0000_0000 * * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
*/
/** * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage * @guc: the guc * @size: size of area to allocate (both virtual space and memory) * * This is a wrapper to create an object for use with the GuC. In order to * use it inside the GuC, an object needs to be pinned lifetime, so we allocate * both some backing storage and a range inside the Global GTT. We must pin * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that * range is reserved inside GuC. * * Return: A i915_vma if successful, otherwise an ERR_PTR.
*/ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{ struct intel_gt *gt = guc_to_gt(guc); struct drm_i915_gem_object *obj; struct i915_vma *vma;
u64 flags; int ret;
/* * Wa_22016122933: For Media version 13.0, all Media GT shared * memory needs to be mapped as WC on CPU side and UC (PAT * index 2) on GPU side.
*/ if (intel_gt_needs_wa_22016122933(gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) goto err;
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
ret = i915_ggtt_pin(vma, NULL, 0, flags); if (ret) {
vma = ERR_PTR(ret); goto err;
}
return i915_vma_make_unshrinkable(vma);
err:
i915_gem_object_put(obj); return vma;
}
/** * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage * @guc: the guc * @size: size of area to allocate (both virtual space and memory) * @out_vma: return variable for the allocated vma pointer * @out_vaddr: return variable for the obj mapping * * This wrapper calls intel_guc_allocate_vma() and then maps the allocated * object with I915_MAP_WB. * * Return: 0 if successful, a negative errno code otherwise.
*/ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, struct i915_vma **out_vma, void **out_vaddr)
{ struct i915_vma *vma; void *vaddr;
vma = intel_guc_allocate_vma(guc, size); if (IS_ERR(vma)) return PTR_ERR(vma);
if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { /* * Ensure intel_uncore_write_fw can be used rather than * intel_uncore_write.
*/
GEM_BUG_ON(guc->send_regs.fw_domains);
/* * This register is used by the i915 and GuC for MMIO based * communication. Once we are in this code CTBs are the only * method the i915 uses to communicate with the GuC so it is * safe to write to this register (a value of 0 is NOP for MMIO * communication). If we ever start mixing CTBs and MMIOs a new * register will have to be chosen. This function is also used * to enforce ordering of a work queue item write and an update * to the process descriptor. When a work queue is being used, * CTBs are also the only mechanism of communication.
*/
intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
} else { /* wmb() sufficient for a barrier if in smem */
wmb();
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.