/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Author: Huang Rui *
*/
staticint psp_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
{ int ret = 0; struct psp_ring *ring; struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem); if (ret) {
ring->ring_size = 0; return ret;
}
return 0;
}
/* * Due to DF Cstate management centralized to PMFW, the firmware * loading sequence will be updated as below: * - Load KDB * - Load SYS_DRV * - Load tOS * - Load PMFW * - Setup TMR * - Load other non-psp fw * - Load ASD * - Load XGMI/RAS/HDCP/DTM TA if any * * This new sequence is required for * - Arcturus and onwards
*/ staticvoid psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
{ struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
psp->pmfw_centralized_cstate_management = false; return;
}
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 7):
psp->pmfw_centralized_cstate_management = true; break; default:
psp->pmfw_centralized_cstate_management = false; break;
}
}
staticint psp_init_sriov_microcode(struct psp_context *psp)
{ struct amdgpu_device *adev = psp->adev; char ucode_prefix[30]; int ret = 0;
/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); if (psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
&boot_cfg_entry)) {
psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; if ((psp->boot_cfg_bitmask) &
BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { /* If psp runtime database exists, then * only enable two stage memory training * when TWO_STAGE_DRAM_TRAINING bit is set * in runtime database
*/
mem_training_ctx->enable_mem_training = true;
}
} else { /* If psp runtime database doesn't exist or is * invalid, force enable two stage memory training
*/
mem_training_ctx->enable_mem_training = true;
}
if (mem_training_ctx->enable_mem_training) {
ret = psp_memory_training_init(psp); if (ret) {
dev_err(adev->dev, "Failed to initialize memory training!\n"); return ret;
}
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); if (ret) {
dev_err(adev->dev, "Failed to process memory training!\n"); return ret;
}
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
&psp->fw_pri_buf); if (ret) return ret;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf); if (ret) goto failed1;
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem); if (ret) goto failed2;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(reg_index); if (check_changed) { if (val != reg_val) return 0;
} else { if ((val & mask) == reg_val) return 0;
}
udelay(1);
}
for (i = 0; i < msec_timeout; i++) {
val = RREG32(reg_index); if ((val & mask) == reg_val) return 0;
msleep(1);
}
return -ETIME;
}
staticconstchar *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
{ switch (cmd_id) { case GFX_CMD_ID_LOAD_TA: return"LOAD_TA"; case GFX_CMD_ID_UNLOAD_TA: return"UNLOAD_TA"; case GFX_CMD_ID_INVOKE_CMD: return"INVOKE_CMD"; case GFX_CMD_ID_LOAD_ASD: return"LOAD_ASD"; case GFX_CMD_ID_SETUP_TMR: return"SETUP_TMR"; case GFX_CMD_ID_LOAD_IP_FW: return"LOAD_IP_FW"; case GFX_CMD_ID_DESTROY_TMR: return"DESTROY_TMR"; case GFX_CMD_ID_SAVE_RESTORE: return"SAVE_RESTORE_IP_FW"; case GFX_CMD_ID_SETUP_VMR: return"SETUP_VMR"; case GFX_CMD_ID_DESTROY_VMR: return"DESTROY_VMR"; case GFX_CMD_ID_PROG_REG: return"PROG_REG"; case GFX_CMD_ID_GET_FW_ATTESTATION: return"GET_FW_ATTESTATION"; case GFX_CMD_ID_LOAD_TOC: return"ID_LOAD_TOC"; case GFX_CMD_ID_AUTOLOAD_RLC: return"AUTOLOAD_RLC"; case GFX_CMD_ID_BOOT_CFG: return"BOOT_CFG"; case GFX_CMD_ID_CONFIG_SQ_PERFMON: return"CONFIG_SQ_PERFMON"; case GFX_CMD_ID_FB_FW_RESERV_ADDR: return"FB_FW_RESERV_ADDR"; case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: return"FB_FW_RESERV_EXT_ADDR"; default: return"UNKNOWN CMD";
}
}
index = atomic_inc_return(&psp->fence_value);
ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); if (ret) {
atomic_dec(&psp->fence_value); gotoexit;
}
amdgpu_device_invalidate_hdp(psp->adev, NULL); while (*((unsignedint *)psp->fence_buf) != index) { if (--timeout == 0) break; /* * Shouldn't wait for timeout when err_event_athub occurs, * because gpu reset thread triggered and lock resource should * be released for psp resume sequence.
*/
ras_intr = amdgpu_ras_intr_triggered(); if (ras_intr) break;
usleep_range(10, 100);
amdgpu_device_invalidate_hdp(psp->adev, NULL);
}
/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
/* In some cases, psp response status is not 0 even there is no * problem while the command is submitted. Some version of PSP FW * doesn't write 0 to that field. * So here we would like to only print a warning instead of an error * during psp initialization to avoid breaking hw_init and it doesn't * return -EINVAL.
*/ if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { if (ucode)
dev_warn(psp->adev->dev, "failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); if (psp_err_warn(psp))
dev_warn(
psp->adev->dev, "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status); /* If any firmware (including CAP) load fails under SRIOV, it should * return failure to stop the VF from initializing. * Also return failure in case of timeout
*/ if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
ret = -EINVAL; gotoexit;
}
}
if (ucode) {
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
}
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret)
*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
release_psp_cmd_buf(psp);
return ret;
}
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
int ret = 0;
int tmr_size;
void *tmr_buf;
void **pptr;
/*
* According to HW engineer, they prefer the TMR address be "naturally
* aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
tmr_size = PSP_TMR_SIZE(psp->adev);
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed
*/
if (!amdgpu_sriov_vf(psp->adev) &&
psp->toc.start_addr &&
psp->toc.size_bytes &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
if (ret) {
dev_err(psp->adev->dev, "Failed to load toc\n");
return ret;
}
}
static bool psp_skip_tmr(struct psp_context *psp)
{
switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
return false;
}
}
static int psp_tmr_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
* Already set up by host driver.
*/
if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
return 0;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
if (psp->tmr_bo)
dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
static int psp_tmr_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
* as TMR is not loaded at all
*/
if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
return 0;
switch (mp0_ip_ver) {
case IP_VERSION(14, 0, 2):
if (adev->psp.sos.fw_version < 0x3b0e0d)
return 0;
break;
case IP_VERSION(14, 0, 3):
if (adev->psp.sos.fw_version < 0x3a0e14)
return 0;
break;
default:
return 0;
}
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
if (ret)
return ret;
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
if (ret)
return ret;
if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
dev_warn(adev->dev, "reserve fw region is not valid!\n");
return 0;
}
dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
static int psp_asd_initialize(struct psp_context *psp)
{
int ret;
/* If PSP version doesn't match ASD version, asd loading will be failed.
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0;
/* bypass asd if display hardware is not available */
if (!amdgpu_device_has_display_hardware(psp->adev) &&
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
return 0;
int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
uint32_t value)
{
struct psp_gfx_cmd_resp *cmd;
int ret = 0;
if (reg >= PSP_REG_LAST)
return -EINVAL;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_reg_prog_cmd_buf(cmd, reg, value);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (ret)
dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
if (!psp->xgmi_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
if (ret)
return ret;
}
/* Load XGMI TA */
ret = psp_ta_load(psp, &psp->xgmi_context.context);
if (!ret)
psp->xgmi_context.context.initialized = true;
else
return ret;
/*
* Chips that support extended topology information require the driver to
* reflect topology information in the opposite direction. This is
* because the TA has already exceeded its link record limit and if the
* TA holds bi-directional information, the driver would have to do
* multiple fetches instead of just two.
*/
static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
struct psp_xgmi_node_info node_info)
{
struct amdgpu_device *mirror_adev;
struct amdgpu_hive_info *hive;
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
uint64_t dst_node_id = node_info.node_id;
uint8_t dst_num_hops = node_info.num_hops;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
if (WARN_ON(!hive))
return;
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
struct psp_xgmi_topology_info *mirror_top_info;
int j;
if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
continue;
mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
for (j = 0; j < mirror_top_info->num_nodes; j++) {
if (mirror_top_info->nodes[j].node_id != src_node_id)
continue;
mirror_top_info->nodes[j].num_hops = dst_num_hops;
/*
* prevent 0 num_links value re-reflection since reflection
* criteria is based on num_hops (direct or indirect).
*
*/
if (dst_num_links)
mirror_top_info->nodes[j].num_links = dst_num_links;
break;
}
break;
}
amdgpu_put_xgmi_hive(hive);
}
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology,
bool get_extended_data)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
int i;
int ret;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
/* Fill in the shared memory with topology information as input */
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to get the topology information */
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
if (ret)
return ret;
/* Read the output topology information from the shared memory */
topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
for (i = 0; i < topology->num_nodes; i++) {
/* extended data will either be 0 or equal to non-extended data */
if (topology_info_output->nodes[i].num_hops)
topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
/* non-extended data gets everything here so no need to update */
if (!get_extended_data) {
topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
topology->nodes[i].is_sharing_enabled =
topology_info_output->nodes[i].is_sharing_enabled;
topology->nodes[i].sdma_engine =
topology_info_output->nodes[i].sdma_engine;
}
}
/* Invoke xgmi ta again to get the link information */
if (psp_xgmi_peer_link_info_supported(psp)) {
struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
bool requires_reflection =
(psp->xgmi_context.supports_extended_data &&
get_extended_data) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 14);
bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
* This is required for GET_PEER_LINKS per xgmi ta implementation.
* The same requirement for GET_EXTEND_PEER_LINKS command.
*/
if (ta_port_num_support) {
link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
for (i = 0; i < topology->num_nodes; i++)
link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
for (i = 0; i < topology->num_nodes; i++) {
uint8_t node_num_links = ta_port_num_support ?
link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
/* accumulate num_links on extended data */
if (get_extended_data) {
topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
} else {
topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
topology->nodes[i].num_links : node_num_links;
}
/* popluate the connected port num info if supported and available */
if (ta_port_num_support && topology->nodes[i].num_links) {
memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
}
/* reflect the topology information for bi-directionality */
if (requires_reflection && topology->nodes[i].num_hops)
psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
}
}
return 0;
}
int psp_xgmi_set_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
int i;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
switch (ras_cmd->ras_status) {
case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
dev_warn(psp->adev->dev,
"RAS WARNING: cmd failed due to unsupported ip\n");
break;
case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
dev_warn(psp->adev->dev,
"RAS WARNING: cmd failed due to unsupported error injection\n");
break;
case TA_RAS_STATUS__SUCCESS:
break;
case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
dev_warn(psp->adev->dev,
"RAS WARNING: Inject error to critical region is not allowed\n");
break;
default:
dev_warn(psp->adev->dev,
"RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
break;
}
}
static int psp_ras_send_cmd(struct psp_context *psp,
enum ras_command cmd_id, void *in, void *out)
{
struct ta_ras_shared_memory *ras_cmd;
uint32_t cmd = cmd_id;
int ret = 0;
switch (cmd) {
case TA_RAS_COMMAND__ENABLE_FEATURES:
case TA_RAS_COMMAND__DISABLE_FEATURES:
memcpy(&ras_cmd->ras_in_message,
in, sizeof(ras_cmd->ras_in_message));
break;
case TA_RAS_COMMAND__TRIGGER_ERROR:
memcpy(&ras_cmd->ras_in_message.trigger_error,
in, sizeof(ras_cmd->ras_in_message.trigger_error));
break;
case TA_RAS_COMMAND__QUERY_ADDRESS:
memcpy(&ras_cmd->ras_in_message.address,
in, sizeof(ras_cmd->ras_in_message.address));
break;
default:
dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
ret = -EINVAL;
goto err_out;
}
ras_cmd->cmd_id = cmd;
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
switch (cmd) {
case TA_RAS_COMMAND__TRIGGER_ERROR:
if (!ret && out)
memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
break;
case TA_RAS_COMMAND__QUERY_ADDRESS:
if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
ret = -EINVAL;
else if (out)
memcpy(out,
&ras_cmd->ras_out_message.address,
sizeof(ras_cmd->ras_out_message.address));
break;
default:
break;
}
err_out:
mutex_unlock(&psp->ras_context.mutex);
return ret;
}
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
struct ta_ras_shared_memory *ras_cmd;
int ret;
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable)
{
enum ras_command cmd_id;
int ret;
if (!psp->ras_context.context.initialized || !info)
return -EINVAL;
cmd_id = enable ?
TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
if (ret)
return -EINVAL;
return 0;
}
int psp_ras_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->ras_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->ras_context.context);
psp->ras_context.context.initialized = false;
mutex_destroy(&psp->ras_context.mutex);
return ret;
}
int psp_ras_initialize(struct psp_context *psp)
{
int ret;
uint32_t boot_cfg = 0xFF;
struct amdgpu_device *adev = psp->adev;
struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(adev))
return 0;
if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
!adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
/* query GECC enablement status from boot config
* boot_cfg: 1: GECC is enabled or 0: GECC is disabled
*/
ret = psp_boot_config_get(adev, &boot_cfg);
if (ret)
dev_warn(adev->dev, "PSP get boot config failed\n");
if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
dev_warn(adev->dev,
"To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
} else {
if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
if (boot_cfg == 1) {
dev_info(adev->dev, "GECC is enabled\n");
} else {
/* enable GECC in next boot cycle if it is disabled
* in boot config, or force enable GECC if failed to
* get boot configuration
*/
ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
if (ret)
dev_warn(adev->dev, "PSP set boot config failed\n");
else
dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
}
} else {
if (!boot_cfg) {
if (!adev->ras_default_ecc_enabled &&
amdgpu_ras_enable != 1 &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
else
dev_info(adev->dev, "GECC is disabled\n");
} else {
/* disable GECC in next boot cycle if ras is
* disabled by module parameter amdgpu_ras_enable
* and/or amdgpu_ras_mask, or boot_config_get call
* is failed
*/
ret = psp_boot_config_set(adev, 0);
if (ret)
dev_warn(adev->dev, "PSP set boot config failed\n");
else
dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
}
}
}
}
if (!psp->ras_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ras_send_cmd(psp,
TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
if (ret)
return -EINVAL;
/* If err_event_athub occurs error inject was successful, however
* return status from TA is no long reliable
*/
if (amdgpu_ras_intr_triggered())
return 0;
if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
return -EACCES;
else if (ras_status)
return -EINVAL;
return 0;
}
int psp_ras_query_address(struct psp_context *psp,
struct ta_ras_query_address_input *addr_in,
struct ta_ras_query_address_output *addr_out)
{
int ret;
if (!psp->ras_context.context.initialized ||
!addr_in || !addr_out)
return -EINVAL;
ret = psp_ras_send_cmd(psp,
TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
return ret;
}
// ras end
// HDCP start
static int psp_hdcp_initialize(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
/* bypass hdcp initialization if dmu is harvested */
if (!amdgpu_device_has_display_hardware(psp->adev))
return 0;
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
if (!psp->hdcp_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->hdcp_context.context);
if (!ret) {
psp->hdcp_context.context.initialized = true;
mutex_init(&psp->hdcp_context.mutex);
}
return ret;
}
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->hdcp_context.context.initialized)
return 0;
static int psp_hdcp_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->hdcp_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->hdcp_context.context);
psp->hdcp_context.context.initialized = false;
return ret;
}
// HDCP end
// DTM start
static int psp_dtm_initialize(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
/* bypass dtm initialization if dmu is harvested */
if (!amdgpu_device_has_display_hardware(psp->adev))
return 0;
if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
if (!psp->dtm_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->dtm_context.context);
if (!ret) {
psp->dtm_context.context.initialized = true;
mutex_init(&psp->dtm_context.mutex);
}
return ret;
}
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->dtm_context.context.initialized)
return 0;
static int psp_dtm_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->dtm_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->dtm_context.context);
psp->dtm_context.context.initialized = false;
return ret;
}
// DTM end
// RAP start
static int psp_rap_initialize(struct psp_context *psp)
{
int ret;
enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->rap_context.context.bin_desc.size_bytes ||
!psp->rap_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0;
}
if (!psp->rap_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->rap_context.context);
if (!ret) {
psp->rap_context.context.initialized = true;
mutex_init(&psp->rap_context.mutex);
} else
return ret;
ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
if (ret || status != TA_RAP_STATUS__SUCCESS) {
psp_rap_terminate(psp);
/* free rap shared memory */
psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
ret, status);
return ret;
}
return 0;
}
static int psp_rap_terminate(struct psp_context *psp)
{
int ret;
if (!psp->rap_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->rap_context.context);
psp->rap_context.context.initialized = false;
return ret;
}
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
{
struct ta_rap_shared_memory *rap_cmd;
int ret = 0;
if (!psp->rap_context.context.initialized)
return 0;
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
return -EINVAL;
/* securedisplay start */
static int psp_securedisplay_initialize(struct psp_context *psp)
{
int ret;
struct ta_securedisplay_cmd *securedisplay_cmd;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
/* bypass securedisplay initialization if dmu is harvested */
if (!amdgpu_device_has_display_hardware(psp->adev))
return 0;
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev,
"SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
return 0;
}
if (!psp->securedisplay_context.context.initialized) {
ret = psp_ta_init_shared_buf(psp,
&psp->securedisplay_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else {
/* don't try again */
psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.