/* * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/* This region is read-only and reserved from system use */
discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); if (discv_regn) {
memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
memunmap(discv_regn); return 0;
}
staticint amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{ bool sz_valid = true;
uint64_t vram_size; int i, ret = 0;
u32 msg;
if (!amdgpu_sriov_vf(adev)) { /* It can take up to two second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts * as soon as the device gets power so by the time the OS loads this has long * completed. However, when a card is hotplugged via e.g., USB4, we need to * wait for this to complete. Once the C2PMSG is updated, we can * continue.
*/
for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33); if (msg & 0x80000000) break;
msleep(1);
}
}
staticvoid amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
{ /* * So far, apply this quirk only on those Navy Flounder boards which * have a bad harvest table of VCN config.
*/ if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { switch (adev->pdev->revision) { case 0xC1: case 0xC2: case 0xC3: case 0xC5: case 0xC7: case 0xCF: case 0xDF:
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; break; default: break;
}
}
}
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) {
drm_dbg(&adev->ddev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); if (r) goto out;
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin); if (r) goto out;
}
/* check the ip discovery binary signature */ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
dev_err(adev->dev, "get invalid ip discovery binary signature\n");
r = -EINVAL; goto out;
}
/* scan harvest bit of all IP data structures */ for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->mman.discovery_bin +
ip_offset);
inst = ip->number_instance;
hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) goto next_ip;
for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
*/ if (at + 12 > PAGE_SIZE) break;
res = sysfs_emit_at(buf, at, "0x%08X\n",
ip_hw_instance->base_addr[ii]); if (res <= 0) break;
at += res;
}
/* If there are more ip_die_entry attrs, other than the number of IPs, * we can make this intro an array of attrs, and then initialize * ip_die_entry_attrs in a loop.
*/ staticstruct ip_die_entry_attribute num_ips_attr =
__ATTR_RO(num_ips);
/* Until a uniform way is figured, get mask based on hwid */ switch (hw_id) { case VCN_HWID: /* VCN vs UVD+VCE */ if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; break; case DMU_HWID: if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
harvest = 0x1; break; case UMC_HWID: /* TODO: It needs another parsing; for now, ignore.*/ break; case GC_HWID:
harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; break; case SDMA0_HWID:
harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; break; default: break;
}
/* Find all IPs of a given HW ID, and add their instance to * #die/#hw_id/#instance/<attributes>
*/ for (ii = 0; ii < HW_ID_MAX; ii++) { struct ip_hw_id *ip_hw_id = NULL;
size_t ip_offset = _ip_offset;
DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
/* We have a hw_id match; register the hw * block if not yet registered.
*/ if (!ip_hw_id) {
ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); if (!ip_hw_id) return -ENOMEM;
ip_hw_id->hw_id = ii;
kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
res = kset_register(&ip_hw_id->hw_id_kset); if (res) {
DRM_ERROR("Couldn't register ip_hw_id kset");
kfree(ip_hw_id); return res;
} if (hw_id_names[ii]) {
res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
&ip_hw_id->hw_id_kset.kobj,
hw_id_names[ii]); if (res) {
DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
hw_id_names[ii],
kobject_name(&ip_die_entry->ip_kset.kobj));
}
}
}
/* Now register its instance.
*/
ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
base_addr,
ip->num_base_address),
GFP_KERNEL); if (!ip_hw_instance) {
DRM_ERROR("no memory for ip_hw_instance"); return -ENOMEM;
}
ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
ip_hw_instance->num_instance = ip->instance_number;
ip_hw_instance->major = ip->major;
ip_hw_instance->minor = ip->minor;
ip_hw_instance->revision = ip->revision;
ip_hw_instance->harvest =
amdgpu_discovery_get_harvest_info(
adev, ip_hw_instance->hw_id,
ip_hw_instance->num_instance);
ip_hw_instance->num_base_addresses = ip->num_base_address;
for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { if (reg_base_64)
ip_hw_instance->base_addr[kk] =
lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; else
ip_hw_instance->base_addr[kk] = ip->base_address[kk];
}
for (k = 0; k < num_base_address; k++) { /* * convert the endianness of base addresses in place, * so that we don't need to convert them when accessing adev->reg_offset.
*/ if (ihdr->base_addr_64_bit) /* Truncate the 64bit base address from ip discovery * and only store lower 32bit ip base in reg_offset[]. * Bits > 32 follows ASIC specific format, thus just * discard them and handle it within specific ASIC. * By this way reg_offset[] and related helpers can * stay unchanged. * The base address is in dwords, thus clear the * highest 2 bits to store.
*/
ip->base_address[k] =
lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; else
ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
}
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
hw_id_map[hw_ip] != 0) {
DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->instance_number] =
ip->base_address; /* Instance support is somewhat inconsistent. * SDMA is a good example. Sienna cichlid has 4 total * SDMA instances, each enumerated separately (HWIDs * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, * but they are enumerated as multiple instances of the * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another * example. On most chips there are multiple instances * with the same HWID.
*/
staticint amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{ struct binary_header *bhdr; union vcn_info *vcn_info;
u16 offset; int v;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL;
}
/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES * but that may change in the future with new GPUs so keep this * check for defensive purposes.
*/ if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
dev_err(adev->dev, "invalid vcn instances\n"); return -EINVAL;
}
switch (le16_to_cpu(vcn_info->v1.header.version_major)) { case 1: /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES * so this won't overflow.
*/ for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
adev->vcn.inst[v].vcn_codec_disable_mask =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
} break; default:
dev_err(adev->dev, "Unhandled VCN info table %d.%d\n",
le16_to_cpu(vcn_info->v1.header.version_major),
le16_to_cpu(vcn_info->v1.header.version_minor)); return -EINVAL;
} return 0;
}
nhdr = (struct nps_info_header *)(nps_data); if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
le32_to_cpu(nhdr->size_bytes),
checksum)) {
dev_err(adev->dev, "nps data refresh, checksum mismatch\n"); return -EINVAL;
}
return 0;
}
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type, struct amdgpu_gmc_memrange **ranges, int *range_cnt, bool refresh)
{ struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; union nps_info nps_data;
u16 offset; int i, r;
if (!nps_type || !range_cnt || !ranges) return -EINVAL;
if (refresh) {
r = amdgpu_discovery_refresh_nps_info(adev, &nps_data); if (r) return r;
nps_info = &nps_data;
} else { if (!adev->mman.discovery_bin) {
dev_err(adev->dev, "fetch mem range failed, ip discovery uninitialized\n"); return -EINVAL;
}
switch (le16_to_cpu(nps_info->v1.header.version_major)) { case 1:
mem_ranges = kvcalloc(nps_info->v1.count, sizeof(*mem_ranges),
GFP_KERNEL); if (!mem_ranges) return -ENOMEM;
*nps_type = nps_info->v1.nps_type;
*range_cnt = nps_info->v1.count; for (i = 0; i < *range_cnt; i++) {
mem_ranges[i].base_address =
nps_info->v1.instance_info[i].base_address;
mem_ranges[i].limit_address =
nps_info->v1.instance_info[i].limit_address;
mem_ranges[i].nid_mask = -1;
mem_ranges[i].flags = 0;
}
*ranges = mem_ranges; break; default:
dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
le16_to_cpu(nps_info->v1.header.version_major),
le16_to_cpu(nps_info->v1.header.version_minor)); return -EINVAL;
}
return 0;
}
staticint amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
{ /* what IP to use for this? */ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 2, 2): case IP_VERSION(9, 3, 0): case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); break; case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): case IP_VERSION(10, 1, 3): case IP_VERSION(10, 1, 4): case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 4): case IP_VERSION(10, 3, 5): case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &nv_common_ip_block); break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 2): case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &soc24_common_ip_block); break; default:
dev_err(adev->dev, "Failed to add common ip block(GC_HWIP:0x%x)\n",
amdgpu_ip_version(adev, GC_HWIP, 0)); return -EINVAL;
} return 0;
}
staticint amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
{ /* use GC or MMHUB IP version */ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 0, 1): case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 1): case IP_VERSION(9, 2, 2): case IP_VERSION(9, 3, 0): case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); break; case IP_VERSION(10, 1, 10): case IP_VERSION(10, 1, 1): case IP_VERSION(10, 1, 2): case IP_VERSION(10, 1, 3): case IP_VERSION(10, 1, 4): case IP_VERSION(10, 3, 0): case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 2): case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 4): case IP_VERSION(10, 3, 5): case IP_VERSION(10, 3, 6): case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 2): case IP_VERSION(11, 5, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block); break; default:
dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
amdgpu_ip_version(adev, GC_HWIP, 0)); return -EINVAL;
} return 0;
}
staticint amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
{ switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 1): case IP_VERSION(4, 1, 0): case IP_VERSION(4, 1, 1): case IP_VERSION(4, 3, 0):
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); break; case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 1): case IP_VERSION(4, 4, 0): case IP_VERSION(4, 4, 2): case IP_VERSION(4, 4, 5):
amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); break; case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 1): case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 3): case IP_VERSION(5, 2, 0): case IP_VERSION(5, 2, 1):
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); break; case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2):
amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); break; case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); break; case IP_VERSION(7, 0, 0):
amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block); break; default:
dev_err(adev->dev, "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); return -EINVAL;
} return 0;
}
staticint amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
{ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { case IP_VERSION(9, 0, 0):
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); break; case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 4): case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); break; case IP_VERSION(11, 0, 8):
amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); break; case IP_VERSION(11, 0, 3): case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); break; case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 1): case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14): case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): case IP_VERSION(14, 0, 4):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; case IP_VERSION(13, 0, 4):
amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); break; case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 3): case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block); break; default:
dev_err(adev->dev, "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
amdgpu_ip_version(adev, MP0_HWIP, 0)); return -EINVAL;
} return 0;
}
staticint amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
{ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(9, 0, 0): case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): case IP_VERSION(11, 0, 2): if (adev->asic_type == CHIP_ARCTURUS)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 5): case IP_VERSION(11, 0, 9): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); break; case IP_VERSION(11, 0, 8): if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); break; case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); break; case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 1): case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): case IP_VERSION(13, 0, 14): case IP_VERSION(13, 0, 12):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 3): case IP_VERSION(14, 0, 4): case IP_VERSION(14, 0, 5):
amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default:
dev_err(adev->dev, "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
amdgpu_ip_version(adev, MP1_HWIP, 0)); return -EINVAL;
} return 0;
}
if (!amdgpu_device_has_dc_support(adev)) return 0;
#ifdefined(CONFIG_DRM_AMD_DC) if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): case IP_VERSION(2, 0, 2): case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 3): case IP_VERSION(2, 1, 0): case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 2): case IP_VERSION(3, 0, 3): case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): case IP_VERSION(3, 2, 1): case IP_VERSION(3, 5, 0): case IP_VERSION(3, 5, 1): case IP_VERSION(3, 6, 0): case IP_VERSION(4, 1, 0): /* TODO: Fix IP version. DC code expects version 4.0.1 */ if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev); else
amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default:
dev_err(adev->dev, "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
amdgpu_ip_version(adev, DCE_HWIP, 0)); return -EINVAL;
}
} elseif (amdgpu_ip_version(adev, DCI_HWIP, 0)) { switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): case IP_VERSION(12, 1, 0): if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev); else
amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default:
dev_err(adev->dev,
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.