if (disable_work_sync(>->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg_verbose(gt, "pending restart disabled!\n"); /* release an rpm reference taken on the worker's behalf */
xe_pm_runtime_put(gt_to_xe(gt));
}
}
/** * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize * * Early initialization of the PF data. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
{ int err;
err = pf_alloc_metadata(gt); if (err) return err;
err = xe_gt_sriov_pf_service_init(gt); if (err) return err;
err = xe_gt_sriov_pf_control_init(gt); if (err) return err;
/** * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize * * Late one-time initialization of the PF data. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_pf_init(struct xe_gt *gt)
{ int err;
err = xe_gt_sriov_pf_config_init(gt); if (err) return err;
err = xe_gt_sriov_pf_migration_init(gt); if (err) return err;
/** * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support. * @gt: the &xe_gt to initialize * * On some platforms the PF must explicitly enable VF's access to the GGTT.
*/ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{ if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
pf_enable_ggtt_guest_update(gt);
staticvoid pf_clear_vf_scratch_regs(struct xe_gt *gt, unsignedint vfid)
{
u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt)); struct xe_reg scratch; int n, count;
if (xe_gt_is_media_type(gt)) {
count = MED_VF_SW_FLAG_COUNT; for (n = 0; n < count; n++) {
scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
xe_mmio_write32(>->mmio, scratch, 0);
}
} else {
count = VF_SW_FLAG_COUNT; for (n = 0; n < count; n++) {
scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
xe_mmio_write32(>->mmio, scratch, 0);
}
}
}
/** * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF. * @gt: the &xe_gt * @vfid: the VF identifier * * This function can only be called on PF.
*/ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsignedint vfid)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
if (cancel_work_sync(>->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n"); /* release an rpm reference taken on the worker's behalf */
xe_pm_runtime_put(gt_to_xe(gt));
}
}
/** * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support. * @gt: the &xe_gt * * This function can only be called on the PF.
*/ void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
{
pf_cancel_restart(gt);
}
/* take an rpm reference on behalf of the worker */
xe_pm_runtime_get_noresume(xe);
if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg(gt, "restart already in queue!\n");
xe_pm_runtime_put(xe);
}
}
/** * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset. * @gt: the &xe_gt * * This function can only be called on PF.
*/ void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
pf_queue_restart(gt);
}
/** * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready. * @gt: the &xe_gt * * This function can only be called on PF. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
{ /* don't wait if there is another ongoing reset */ if (xe_guc_read_stopped(>->uc.guc)) return -EBUSY;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.