/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/* * Helper function to query gpu virtualizaiton capability * * @adev: amdgpu_device pointer * * Return true if gpu virtualization is supported or false if not
*/ bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
{
u32 fw_cap;
staticint convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, int atom_mem_type)
{ int vram_type;
if (adev->flags & AMD_IS_APU) { switch (atom_mem_type) { case Ddr2MemType: case LpDdr2MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR2; break; case Ddr3MemType: case LpDdr3MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR3; break; case Ddr4MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR4; break; case LpDdr4MemType:
vram_type = AMDGPU_VRAM_TYPE_LPDDR4; break; case Ddr5MemType:
vram_type = AMDGPU_VRAM_TYPE_DDR5; break; case LpDdr5MemType:
vram_type = AMDGPU_VRAM_TYPE_LPDDR5; break; default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; break;
}
} else { switch (atom_mem_type) { case ATOM_DGPU_VRAM_TYPE_GDDR5:
vram_type = AMDGPU_VRAM_TYPE_GDDR5; break; case ATOM_DGPU_VRAM_TYPE_HBM2: case ATOM_DGPU_VRAM_TYPE_HBM2E: case ATOM_DGPU_VRAM_TYPE_HBM3:
vram_type = AMDGPU_VRAM_TYPE_HBM; break; case ATOM_DGPU_VRAM_TYPE_GDDR6:
vram_type = AMDGPU_VRAM_TYPE_GDDR6; break; case ATOM_DGPU_VRAM_TYPE_HBM3E:
vram_type = AMDGPU_VRAM_TYPE_HBM3E; break; default:
vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; break;
}
}
return vram_type;
}
int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, int *vram_width, int *vram_type, int *vram_vendor)
{ struct amdgpu_mode_info *mode_info = &adev->mode_info; int index, i = 0;
u16 data_offset, size; union igp_info *igp_info; union vram_info *vram_info; union umc_info *umc_info; union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
u8 mem_vendor;
u32 mem_channel_number;
u32 mem_channel_width;
u32 module_id;
if (adev->flags & AMD_IS_APU)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
integratedsysteminfo); else { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1):
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); break; default:
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
}
} if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) { if (adev->flags & AMD_IS_APU) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset); switch (frev) { case 1: switch (crev) { case 11: case 12:
mem_channel_number = igp_info->v11.umachannelnumber; if (!mem_channel_number)
mem_channel_number = 1;
mem_type = igp_info->v11.memorytype; if (mem_type == LpDdr5MemType)
mem_channel_width = 32; else
mem_channel_width = 64; if (vram_width)
*vram_width = mem_channel_number * mem_channel_width; if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); break; default: return -EINVAL;
} break; case 2: switch (crev) { case 1: case 2:
mem_channel_number = igp_info->v21.umachannelnumber; if (!mem_channel_number)
mem_channel_number = 1;
mem_type = igp_info->v21.memorytype; if (mem_type == LpDdr5MemType)
mem_channel_width = 32; else
mem_channel_width = 64; if (vram_width)
*vram_width = mem_channel_number * mem_channel_width; if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); break; case 3:
mem_channel_number = igp_info->v23.umachannelnumber; if (!mem_channel_number)
mem_channel_number = 1;
mem_type = igp_info->v23.memorytype; if (mem_type == LpDdr5MemType)
mem_channel_width = 32; else
mem_channel_width = 64; if (vram_width)
*vram_width = mem_channel_number * mem_channel_width; if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); break; default: return -EINVAL;
} break; default: return -EINVAL;
}
} else { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1):
umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
/* * Return true if vbios enabled ecc by default, if umc info table is available * or false if ecc is not enabled or umc info table is not available
*/ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
{ struct amdgpu_mode_info *mode_info = &adev->mode_info; int index;
u16 data_offset, size; union umc_info *umc_info;
u8 frev, crev; bool mem_ecc_enabled = false;
u8 umc_config;
u32 umc_config1;
adev->ras_default_ecc_enabled = false;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
/** * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS * @adev: amdgpu_device pointer * @i2c_address: pointer to u8; if not NULL, will contain * the RAS EEPROM address if the function returns true * * Return true if VBIOS supports RAS EEPROM address reporting, * else return false. If true and @i2c_address is not NULL, * will contain the RAS ROM address.
*/ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
u8 *i2c_address)
{ struct amdgpu_mode_info *mode_info = &adev->mode_info; int index;
u16 data_offset, size; union firmware_info *firmware_info;
u8 frev, crev;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
firmwareinfo);
if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
index, &size, &frev, &crev,
&data_offset)) { /* support firmware_info 3.4 + */ if ((frev == 3 && crev >= 4) || (frev > 3)) {
firmware_info = (union firmware_info *)
(mode_info->atom_context->bios + data_offset); /* The ras_rom_i2c_slave_addr should ideally * be a 19-bit EEPROM address, which would be * used as is by the driver; see top of * amdgpu_eeprom.c. * * When this is the case, 0 is of course a * valid RAS EEPROM address, in which case, * we'll drop the first "if (firm...)" and only * leave the check for the pointer. * * The reason this works right now is because * ras_rom_i2c_slave_addr contains the EEPROM * device type qualifier 1010b in the top 4 * bits.
*/ if (firmware_info->v34.ras_rom_i2c_slave_addr) { if (i2c_address)
*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr; returntrue;
}
}
}
returnfalse;
}
union smu_info { struct atom_smu_info_v3_1 v31; struct atom_smu_info_v4_0 v40;
};
/* * Helper function to query two stage mem training capability * * @adev: amdgpu_device pointer * * Return true if two stage mem training is supported or false if not
*/ bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
{
u32 fw_cap;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.