do {
err = guc_action_vf_reset(guc); if (!err || err != -ETIMEDOUT) break;
} while (--retry);
if (unlikely(err))
xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err)); return err;
}
/** * xe_gt_sriov_vf_reset - Reset GuC VF internal state. * @gt: the &xe_gt * * It requires functional `GuC MMIO based communication`_. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_vf_reset(struct xe_gt *gt)
{ if (!xe_device_uc_enabled(gt_to_xe(gt))) return -ENODEV;
switch (xe->info.platform) { case XE_TIGERLAKE ... XE_PVC: /* 1.1 this is current baseline for Xe driver */
ver->branch = 0;
ver->major = 1;
ver->minor = 1; break; default: /* 1.2 has support for the GMD_ID KLV */
ver->branch = 0;
ver->major = 1;
ver->minor = 2; break;
}
}
staticvoid vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{ /* for now it's the same as minimum */ return vf_minimum_guc_version(gt, ver);
}
/* select wanted version - prefer previous (if any) */ if (guc_version->major || guc_version->minor) {
wanted = *guc_version;
old = true;
} else {
vf_wanted_guc_version(gt, &wanted);
xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
/* First time we handshake, so record the minimum wanted */
gt->sriov.vf.wanted_guc_version = wanted;
}
err = guc_action_match_version(guc, &wanted, guc_version); if (unlikely(err)) goto fail;
if (old) { /* we don't support interface version change */ if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) {
xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
guc_version->branch, guc_version->major,
guc_version->minor, guc_version->patch);
xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
wanted.branch, wanted.major,
wanted.minor, wanted.patch);
err = -EREMCHG; goto fail;
} else { /* version is unchanged, no need to re-verify it */ return 0;
}
}
/* there's no fallback on major version. */ if (guc_version->major != wanted.major) {
err = -ENOPKG; goto unsupported;
}
/* check against minimum version supported by us */
vf_minimum_guc_version(gt, &wanted);
xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY); if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) {
err = -ENOKEY; goto unsupported;
}
xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
guc_version->branch, guc_version->major,
guc_version->minor, guc_version->patch);
return 0;
unsupported:
xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
guc_version->branch, guc_version->major,
guc_version->minor, guc_version->patch,
ERR_PTR(err));
fail:
xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
wanted.major, wanted.minor, ERR_PTR(err));
/* try again with *any* just to query which version is supported */ if (!guc_action_match_version_any(guc, &wanted))
xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
wanted.branch, wanted.major, wanted.minor, wanted.patch); return err;
}
/** * xe_gt_sriov_vf_bootstrap - Query and setup GuC ABI interface version. * @gt: the &xe_gt * * This function is for VF use only. * It requires functional `GuC MMIO based communication`_. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
{ int err;
if (!xe_device_uc_enabled(gt_to_xe(gt))) return -ENODEV;
err = vf_reset_guc_state(gt); if (unlikely(err)) return err;
err = vf_handshake_with_guc(gt); if (unlikely(err)) return err;
return 0;
}
/** * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions * @gt: the &xe_gt * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version * @found: pointer to the xe_uc_fw_version to be filled with the found version * * This function is for VF use only and it can only be used after successful * version handshake with the GuC.
*/ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt, struct xe_uc_fw_version *wanted, struct xe_uc_fw_version *found)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
if (wanted)
*wanted = gt->sriov.vf.wanted_guc_version;
/** * xe_gt_sriov_vf_gmdid - Query GMDID over MMIO. * @gt: the &xe_gt * * This function is for VF use only. * * Return: value of GMDID KLV on success or 0 on failure.
*/
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
{ constchar *type = xe_gt_is_media_type(gt) ? "media" : "graphics"; struct xe_guc *guc = >->uc.guc;
u32 value; int err;
/** * xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO. * @gt: the &xe_gt * * This function is for VF use only. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_vf_query_config(struct xe_gt *gt)
{ struct xe_device *xe = gt_to_xe(gt); int err;
err = vf_get_ggtt_info(gt); if (unlikely(err)) return err;
if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
err = vf_get_lmem_info(gt); if (unlikely(err)) return err;
}
err = vf_get_submission_cfg(gt); if (unlikely(err)) return err;
if (has_gmdid(xe))
vf_cache_gmdid(gt);
return 0;
}
/** * xe_gt_sriov_vf_guc_ids - VF GuC context IDs configuration. * @gt: the &xe_gt * * This function is for VF use only. * * Return: number of GuC context IDs assigned to VF.
*/
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
xe_gt_assert(gt, gt->sriov.vf.self_config.num_ctxs);
return gt->sriov.vf.self_config.num_ctxs;
}
/** * xe_gt_sriov_vf_lmem - VF LMEM configuration. * @gt: the &xe_gt * * This function is for VF use only. * * Return: size of the LMEM assigned to VF.
*/
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
return gt->sriov.vf.self_config.lmem_size;
}
/** * xe_gt_sriov_vf_ggtt - VF GGTT configuration. * @gt: the &xe_gt * * This function is for VF use only. * * Return: size of the GGTT assigned to VF.
*/
u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
return gt->sriov.vf.self_config.ggtt_size;
}
/** * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset. * @gt: the &xe_gt * * This function is for VF use only. * * Return: base offset of the GGTT assigned to VF.
*/
u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
return gt->sriov.vf.self_config.ggtt_base;
}
/** * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration * @gt: the &xe_gt struct instance * * This function is for VF use only. * * Return: The shift value; could be negative
*/
s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
{ struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config;
failed:
xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n",
major, minor, ERR_PTR(err));
vf_disconnect_pf(xe); return err;
}
/** * xe_gt_sriov_vf_connect - Establish connection with the PF driver. * @gt: the &xe_gt * * This function is for VF use only. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_vf_connect(struct xe_gt *gt)
{ int err;
err = vf_handshake_with_pf(gt); if (unlikely(err)) goto failed;
return 0;
failed:
xe_gt_sriov_err(gt, "Failed to get version info (%pe)\n", ERR_PTR(err)); return err;
}
/** * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery, * or just mark that a GuC is ready for it. * @gt: the &xe_gt struct instance linked to target GuC * * This function shall be called only by VF.
*/ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
{ struct xe_device *xe = gt_to_xe(gt);
xe_gt_assert(gt, IS_SRIOV_VF(xe));
set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags); /* * We need to be certain that if all flags were set, at least one * thread will notice that and schedule the recovery.
*/
smp_mb__after_atomic();
xe_gt_sriov_info(gt, "ready for recovery after migration\n");
xe_sriov_vf_start_migration_recovery(xe);
}
/** * xe_gt_sriov_vf_query_runtime - Query SR-IOV runtime data. * @gt: the &xe_gt * * This function is for VF use only. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt)
{ int err;
err = vf_query_runtime_info(gt); if (unlikely(err)) goto failed;
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
vf_show_runtime_info(gt);
return 0;
failed:
xe_gt_sriov_err(gt, "Failed to get runtime info (%pe)\n",
ERR_PTR(err)); return err;
}
/** * xe_gt_sriov_vf_read32 - Get a register value from the runtime data. * @gt: the &xe_gt * @reg: the register to read * * This function is for VF use only. * This function shall be called after VF has connected to PF. * This function is dedicated for registers that VFs can't read directly. * * Return: register value obtained from the PF or 0 if not found.
*/
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr); struct vf_runtime_reg *rr;
/** * xe_gt_sriov_vf_write32 - Handle a write to an inaccessible register. * @gt: the &xe_gt * @reg: the register to write * @val: value to write * * This function is for VF use only. * Currently it will trigger a WARN if running on debug build.
*/ void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
{
u32 addr = xe_mmio_adjusted_addr(>->mmio, reg.addr);
/* * In the future, we may want to handle selected writes to inaccessible * registers in some custom way, but for now let's just log a warning * about such attempt, as likely we might be doing something wrong.
*/
xe_gt_WARN(gt, IS_ENABLED(CONFIG_DRM_XE_DEBUG), "VF is trying to write %#x to an inaccessible register %#x+%#x\n",
val, reg.addr, addr - reg.addr);
}
/** * xe_gt_sriov_vf_print_config - Print VF self config. * @gt: the &xe_gt * @p: the &drm_printer * * This function is for VF use only.
*/ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
{ struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; struct xe_device *xe = gt_to_xe(gt); char buf[10];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.