/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
staticint smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ return 0;
}
int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
{ int result;
uint32_t data = 0;
uint32_t original_data;
uint32_t addr = 0;
uint32_t extra_shift;
PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
addr = smc_start_address;
while (byte_count >= 4) { /* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
result = smu7_set_smc_sram_address(hwmgr, addr, limit);
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret == 0xFE)
dev_dbg(adev->dev, "last message was not supported\n"); elseif (ret != 1)
dev_info(adev->dev, "\nlast message was failed ret is %d\n", ret);
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret == 0xFE)
dev_dbg(adev->dev, "message %x was not supported\n", msg); elseif (ret != 1)
dev_dbg(adev->dev, "failed to send message %x ret is %d \n", msg, ret);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
{ enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
switch (fw_type) { case UCODE_ID_SMU:
result = CGS_UCODE_ID_SMU; break; case UCODE_ID_SMU_SK:
result = CGS_UCODE_ID_SMU_SK; break; case UCODE_ID_SDMA0:
result = CGS_UCODE_ID_SDMA0; break; case UCODE_ID_SDMA1:
result = CGS_UCODE_ID_SDMA1; break; case UCODE_ID_CP_CE:
result = CGS_UCODE_ID_CP_CE; break; case UCODE_ID_CP_PFP:
result = CGS_UCODE_ID_CP_PFP; break; case UCODE_ID_CP_ME:
result = CGS_UCODE_ID_CP_ME; break; case UCODE_ID_CP_MEC:
result = CGS_UCODE_ID_CP_MEC; break; case UCODE_ID_CP_MEC_JT1:
result = CGS_UCODE_ID_CP_MEC_JT1; break; case UCODE_ID_CP_MEC_JT2:
result = CGS_UCODE_ID_CP_MEC_JT2; break; case UCODE_ID_RLC_G:
result = CGS_UCODE_ID_RLC_G; break; case UCODE_ID_MEC_STORAGE:
result = CGS_UCODE_ID_STORAGE; break; default: break;
}
return result;
}
int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{ int result;
result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
*value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
return result;
}
int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{ int result;
result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
/* digest need be excluded out */ if (!hwmgr->not_vf)
info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */ int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t ret;
int smu7_init(struct pp_hwmgr *hwmgr)
{ struct smu7_smumgr *smu_data; int r; /* Allocate memory for backend private data */
smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
smu_data->header_buffer.data_size,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&smu_data->header_buffer.handle,
&smu_data->header_buffer.mc_addr,
&smu_data->header_buffer.kaddr);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.