/* Return: size of the state in dwords or a negative error code on failure */ staticint pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsignedint vfid)
{ int ret;
ret = guc_action_vf_save_restore(>->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0); return ret ?: -ENODATA;
}
/* Return: number of state dwords saved or a negative error code on failure */ staticint pf_send_guc_save_vf_state(struct xe_gt *gt, unsignedint vfid, void *buff, size_t size)
{ constint ndwords = size / sizeof(u32); struct xe_tile *tile = gt_to_tile(gt); struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; struct xe_bo *bo; int ret;
bo = xe_bo_create_pin_map(xe, tile, NULL,
ALIGN(size, PAGE_SIZE),
ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE); if (IS_ERR(bo)) return PTR_ERR(bo);
xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
xe_bo_ggtt_addr(bo), ndwords); if (!ret)
ret = -ENODATA; elseif (ret > ndwords)
ret = -EPROTO;
ret = pf_send_guc_query_vf_state_size(gt, vfid); if (ret < 0) goto fail;
size = ret * sizeof(u32);
xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
ret = pf_alloc_guc_state(gt, snapshot, size); if (ret < 0) goto fail;
ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size); if (ret < 0) goto fail;
size = ret * sizeof(u32);
xe_gt_assert(gt, size);
xe_gt_assert(gt, size <= snapshot->guc.size);
snapshot->guc.size = size;
pf_dump_guc_state(gt, snapshot); return 0;
fail:
xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
pf_free_guc_state(gt, snapshot); return ret;
}
/** * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot. * @gt: the &xe_gt * @vfid: the VF identifier * * This function is for PF only. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsignedint vfid)
{ int err;
fail:
xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret)); return ret;
}
/** * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state. * @gt: the &xe_gt * @vfid: the VF identifier * * This function is for PF only. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsignedint vfid)
{ int ret;
mutex_lock(pf_migration_mutex(gt));
ret = pf_restore_vf_guc_state(gt, vfid);
mutex_unlock(pf_migration_mutex(gt));
return ret;
}
#ifdef CONFIG_DEBUG_FS /** * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state. * @gt: the &xe_gt * @vfid: the VF identifier * @buf: the user space buffer to read to * @count: the maximum number of bytes to read * @pos: the current position in the buffer * * This function is for PF only. * * This function reads up to @count bytes from the saved VF GuC state buffer * at offset @pos into the user space address starting at @buf. * * Return: the number of bytes read or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsignedint vfid, char __user *buf, size_t count, loff_t *pos)
{ struct xe_gt_sriov_state_snapshot *snapshot;
ssize_t ret;
mutex_lock(pf_migration_mutex(gt));
snapshot = pf_pick_vf_snapshot(gt, vfid); if (snapshot->guc.size)
ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
snapshot->guc.size); else
ret = -ENODATA;
mutex_unlock(pf_migration_mutex(gt));
return ret;
}
/** * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state. * @gt: the &xe_gt * @vfid: the VF identifier * @buf: the user space buffer with GuC VF state * @size: the size of GuC VF state (in bytes) * * This function is for PF only. * * This function reads @size bytes of the VF GuC state stored at user space * address @buf and writes it into a internal VF state buffer. * * Return: the number of bytes used or a negative error code on failure.
*/
ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsignedint vfid, constchar __user *buf, size_t size)
{ struct xe_gt_sriov_state_snapshot *snapshot;
loff_t pos = 0;
ssize_t ret;
mutex_lock(pf_migration_mutex(gt));
snapshot = pf_pick_vf_snapshot(gt, vfid);
ret = pf_alloc_guc_state(gt, snapshot, size); if (!ret) {
ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size); if (ret < 0)
pf_free_guc_state(gt, snapshot); else
pf_dump_guc_state(gt, snapshot);
}
mutex_unlock(pf_migration_mutex(gt));
return ret;
} #endif/* CONFIG_DEBUG_FS */
staticbool pf_check_migration_support(struct xe_gt *gt)
{ /* GuC 70.25 with save/restore v2 is required */
xe_gt_assert(gt, GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 25, 0));
/* XXX: for now this is for feature enabling only */ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
}
/** * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration. * @gt: the &xe_gt * * This function is for PF only. * * Return: 0 on success or a negative error code on failure.
*/ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
{ struct xe_device *xe = gt_to_xe(gt); int err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.