/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
if (data->registry_data.didt_support) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport); if (data->registry_data.sq_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); if (data->registry_data.db_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); if (data->registry_data.td_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); if (data->registry_data.tcp_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); if (data->registry_data.dbr_ramping_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping); if (data->registry_data.edc_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable); if (data->registry_data.gc_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC); if (data->registry_data.psm_didt_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
}
if (data->registry_data.power_containment_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
if (table_info->tdp_table->usClockStretchAmount &&
data->registry_data.clock_stretcher_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
if (data->registry_data.enable_pkg_pwr_tracking_feature)
data->smu_features[GNLD_PPT].supported = true;
if (data->registry_data.enable_tdc_limit_feature)
data->smu_features[GNLD_TDC].supported = true;
if (data->registry_data.thermal_support)
data->smu_features[GNLD_THERMAL].supported = true;
if (data->registry_data.fan_control_support)
data->smu_features[GNLD_FAN_CONTROL].supported = true;
if (data->registry_data.fw_ctf_enabled)
data->smu_features[GNLD_FW_CTF].supported = true;
if (data->registry_data.avfs_support)
data->smu_features[GNLD_AVFS].supported = true;
if (data->registry_data.led_dpm_enabled)
data->smu_features[GNLD_LED_DISPLAY].supported = true;
if (data->registry_data.vr1hot_enabled)
data->smu_features[GNLD_VR1HOT].supported = true;
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetSmuVersion,
&hwmgr->smu_version); if (ret) return ret;
/* ACG firmware has major version 5 */ if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true; if (data->registry_data.didt_support)
data->smu_features[GNLD_DIDT].supported = true;
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd; if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) break;
}
PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count, "Can't find requested voltage id in vdd_dep_on_socclk table!", return -EINVAL);
/* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), "Invalid VDDC value", result = -EINVAL;);
/* the voltage should not be zero nor equal to leakage ID */ if (vddc != 0 && vddc != vv_id) {
data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
data->vddc_leakage.count++;
}
}
}
return 0;
}
/** * vega10_patch_with_vdd_leakage - Change virtual leakage voltage to actual value. * * @hwmgr: the address of the powerplay hardware manager. * @voltage: pointer to changing voltage * @leakage_table: pointer to leakage table
*/ staticvoid vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
{
uint32_t index;
/* search for leakage voltage ID 0xff01 ~ 0xff08 */ for (index = 0; index < leakage_table->count; index++) { /* if this voltage matches a leakage voltage ID */ /* patch with actual leakage voltage */ if (leakage_table->leakage_id[index] == *voltage) {
*voltage = leakage_table->actual_voltage[index]; break;
}
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
/** * vega10_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages. * * @hwmgr: the address of the powerplay hardware manager. * @lookup_table: pointer to voltage lookup table * @leakage_table: pointer to leakage table * return: always 0
*/ staticint vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table, struct vega10_leakage_voltage *leakage_table)
{
uint32_t i;
for (i = 0; i < lookup_table->count; i++)
vega10_patch_with_vdd_leakage(hwmgr,
&lookup_table->entries[i].us_vdd, leakage_table);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table, "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table, "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
/* need to set voltage control types before EVV patching */
data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
/* VDDCI_MEM */ if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) { if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
}
data->config_telemetry = config_telemetry;
vega10_set_features_platform_caps(hwmgr);
result = vega10_init_dpm_defaults(hwmgr); if (result) return result;
#ifdef PPLIB_VEGA10_EVV_SUPPORT /* Get leakage voltage based on leakage ID. */
PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr), "Get EVV Voltage Failed. Abort Driver loading!", return -1); #endif
/* Patch our voltage dependency table with actual leakage voltage * We need to perform leakage translation before it's used by other functions
*/
vega10_complete_dependency_tables(hwmgr);
/* Parse pptable data read from VBIOS */
vega10_set_private_data_based_on_pptable(hwmgr);
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
hwmgr->platform_descriptor.clockStep.engineClock = 500;
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number; if (!hwmgr->not_vf) return result;
/** * vega10_trim_voltage_table - Remove repeated voltage values and create table with unique values. * * @hwmgr: the address of the powerplay hardware manager. * @vol_table: the pointer to changing voltage table * return: 0 in success
*/ staticint vega10_trim_voltage_table(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_voltage_table *vol_table)
{
uint32_t i, j;
uint16_t vvalue; bool found = false; struct pp_atomfwctrl_voltage_table *table;
for (i = 0; i < vol_table->count; i++) {
vol_table->entries[i].value = dep_table->entries[i].vddc;
vol_table->entries[i].smio_low = 0;
}
return 0;
}
/* ---- Voltage Tables ---- * If the voltage table would be bigger than * what will fit into the state table on * the SMC keep only the higher entries.
*/ staticvoid vega10_trim_voltage_table_to_fit_state_table( struct pp_hwmgr *hwmgr,
uint32_t max_vol_steps, struct pp_atomfwctrl_voltage_table *vol_table)
{ unsignedint i, diff;
if (vol_table->count <= max_vol_steps) return;
diff = vol_table->count - max_vol_steps;
for (i = 0; i < max_vol_steps; i++)
vol_table->entries[i] = vol_table->entries[i + diff];
vol_table->count = max_vol_steps;
}
/** * vega10_construct_voltage_tables - Create Voltage Tables. * * @hwmgr: the address of the powerplay hardware manager. * return: always 0
*/ staticint vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{ struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)hwmgr->pptable; int result;
if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
result = vega10_get_mvdd_voltage_table(hwmgr,
table_info->vdd_dep_on_mclk,
&(data->mvdd_voltage_table));
PP_ASSERT_WITH_CODE(!result, "Failed to retrieve MVDDC table!", return result);
}
if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
result = vega10_get_vddci_voltage_table(hwmgr,
table_info->vdd_dep_on_mclk,
&(data->vddci_voltage_table));
PP_ASSERT_WITH_CODE(!result, "Failed to retrieve VDDCI_MEM table!", return result);
}
if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
result = vega10_get_vdd_voltage_table(hwmgr,
table_info->vdd_dep_on_sclk,
&(data->vddc_voltage_table));
PP_ASSERT_WITH_CODE(!result, "Failed to retrieve VDDCR_SOC table!", return result);
}
PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16, "Too many voltage values for VDDC. Trimming to fit state table.",
vega10_trim_voltage_table_to_fit_state_table(hwmgr,
16, &(data->vddc_voltage_table)));
PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16, "Too many voltage values for VDDCI. Trimming to fit state table.",
vega10_trim_voltage_table_to_fit_state_table(hwmgr,
16, &(data->vddci_voltage_table)));
PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16, "Too many voltage values for MVDD. Trimming to fit state table.",
vega10_trim_voltage_table_to_fit_state_table(hwmgr,
16, &(data->mvdd_voltage_table)));
return 0;
}
/* * vega10_init_dpm_state * Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. * * @dpm_state: - the address of the DPM Table to initiailize. * return: None.
*/ staticvoid vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
{
dpm_state->soft_min_level = 0xff;
dpm_state->soft_max_level = 0xff;
dpm_state->hard_min_level = 0xff;
dpm_state->hard_max_level = 0xff;
}
PP_ASSERT_WITH_CODE(bios_pcie_table->count, "Incorrect number of PCIE States from VBIOS!", return -1);
for (i = 0; i < NUM_LINK_LEVELS; i++) { if (data->registry_data.pcieSpeedOverride)
pcie_table->pcie_gen[i] =
data->registry_data.pcieSpeedOverride; else
pcie_table->pcie_gen[i] =
bios_pcie_table->entries[i].gen_speed;
/* * This function is to initialize all DPM state tables * for SMU based on the dependency table. * Dynamic state patching function will then trim these * state tables to the allowed range based * on the power policy or external client requests, * such as UVD request, etc.
*/ staticint vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{ struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable); struct vega10_single_dpm_table *dpm_table;
uint32_t i;
PP_ASSERT_WITH_CODE(dep_soc_table, "SOCCLK dependency table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1, "SOCCLK dependency table is empty. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(dep_gfx_table, "GFXCLK dependency table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1, "GFXCLK dependency table is empty. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(dep_mclk_table, "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, "MCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
/* Initialize Sclk DPM table based on allow Sclk values */
dpm_table = &(data->dpm_table.soc_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_soc_table);
/* Initialize Mclk DPM table based on allow Mclk values */
data->dpm_table.mem_table.count = 0;
dpm_table = &(data->dpm_table.mem_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_mclk_table); if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
dpm_table->dpm_levels[dpm_table->count-1].value;
vega10_init_dpm_state(&(dpm_table->dpm_state));
data->dpm_table.eclk_table.count = 0;
dpm_table = &(data->dpm_table.eclk_table); for (i = 0; i < dep_mm_table->count; i++) { if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].eclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].eclk;
dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
vega10_init_dpm_state(&(dpm_table->dpm_state));
data->dpm_table.vclk_table.count = 0;
data->dpm_table.dclk_table.count = 0;
dpm_table = &(data->dpm_table.vclk_table); for (i = 0; i < dep_mm_table->count; i++) { if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].vclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].vclk;
dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
vega10_init_dpm_state(&(dpm_table->dpm_state));
dpm_table = &(data->dpm_table.dclk_table); for (i = 0; i < dep_mm_table->count; i++) { if (i == 0 || dpm_table->dpm_levels
[dpm_table->count - 1].value <=
dep_mm_table->entries[i].dclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].dclk;
dpm_table->dpm_levels[dpm_table->count].enabled = i == 0;
dpm_table->count++;
}
}
vega10_init_dpm_state(&(dpm_table->dpm_state));
/* Assume there is no headless Vega10 for now */
dpm_table = &(data->dpm_table.dcef_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
dep_dcef_table);
/* Zero out the saved copy of the CUSTOM profile * This will be checked when trying to set the profile * and will require that new values be passed in
*/
data->custom_profile_mode[0] = 0;
data->custom_profile_mode[1] = 0;
data->custom_profile_mode[2] = 0;
data->custom_profile_mode[3] = 0;
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct vega10_dpm_table));
return 0;
}
/* * vega10_populate_ulv_state * Function to provide parameters for Utral Low Voltage state to SMC. * * @hwmgr: - the address of the hardware manager. * return: Always 0.
*/ staticint vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
{ struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
lclock, ÷rs), "Failed to get LCLK clock settings from VBIOS!", return -1);
for (i = 0; i < NUM_LINK_LEVELS; i++) { if (pp_table->PcieGenSpeed[i] > pcie_gen)
pp_table->PcieGenSpeed[i] = pcie_gen;
if (pp_table->PcieLaneCount[i] > pcie_width)
pp_table->PcieLaneCount[i] = pcie_width;
}
if (data->registry_data.pcie_dpm_key_disabled) { for (i = 0; i < NUM_LINK_LEVELS; i++) {
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
}
return 0;
}
staticint vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
{ int result = -1; struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct vega10_pcie_table *pcie_table =
&(data->dpm_table.pcie_table);
uint32_t i, j;
for (i = 0; i < pcie_table->count; i++) {
pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
result = vega10_populate_single_lclk_level(hwmgr,
pcie_table->lclk[i], &(pp_table->LclkDid[i])); if (result) {
pr_info("Populate LClock Level %d Failed!\n", i); return result;
}
}
j = i - 1; while (i < NUM_LINK_LEVELS) {
pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
result = vega10_populate_single_lclk_level(hwmgr,
pcie_table->lclk[j], &(pp_table->LclkDid[i])); if (result) {
pr_info("Populate LClock Level %d Failed!\n", i); return result;
}
i++;
}
return result;
}
/** * vega10_populate_single_gfx_level - Populates single SMC GFXSCLK structure * using the provided engine clock * * @hwmgr: the address of the hardware manager * @gfx_clock: the GFX clock to use to populate the structure. * @current_gfxclk_level: location in PPTable for the SMC GFXCLK structure. * @acg_freq: ACG frequenty to return (MHz)
*/ staticint vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
uint32_t *acg_freq)
{ struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk; struct vega10_hwmgr *data = hwmgr->backend; struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t gfx_max_clock =
hwmgr->platform_descriptor.overdriveLimit.engineClock;
uint32_t i = 0;
if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock; else { for (i = 0; i < dep_on_sclk->count; i++) { if (dep_on_sclk->entries[i].clk == gfx_clock) break;
}
PP_ASSERT_WITH_CODE(dep_on_sclk->count > i, "Cannot find gfx_clk in SOC_VDD-GFX_CLK!", return -EINVAL);
}
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
gfx_clock, ÷rs), "Failed to get GFX Clock settings from VBIOS!", return -EINVAL);
/* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
current_gfxclk_level->FbMult =
cpu_to_le32(dividers.ulPll_fb_mult); /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
current_gfxclk_level->SsFbMult =
cpu_to_le32(dividers.ulPll_ss_fbsmult);
current_gfxclk_level->SsSlewFrac =
cpu_to_le16(dividers.usPll_ss_slew_frac);
current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
/** * vega10_populate_single_soc_level - Populates single SMC SOCCLK structure * using the provided clock. * * @hwmgr: the address of the hardware manager. * @soc_clock: the SOC clock to use to populate the structure. * @current_soc_did: DFS divider to pass back to caller * @current_vol_index: index of current VDD to pass back to caller * return: 0 on success
*/ staticint vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
uint32_t soc_clock, uint8_t *current_soc_did,
uint8_t *current_vol_index)
{ struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc; struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t i;
if (hwmgr->od_enabled) {
dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
&data->odn_dpm_table.vdd_dep_on_socclk; for (i = 0; i < dep_on_soc->count; i++) { if (dep_on_soc->entries[i].clk >= soc_clock) break;
}
} else {
dep_on_soc = table_info->vdd_dep_on_socclk; for (i = 0; i < dep_on_soc->count; i++) { if (dep_on_soc->entries[i].clk == soc_clock) break;
}
}
PP_ASSERT_WITH_CODE(dep_on_soc->count > i, "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", return -EINVAL);
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
soc_clock, ÷rs), "Failed to get SOC Clock settings from VBIOS!", return -EINVAL);
/** * vega10_populate_all_graphic_levels - Populates all SMC SCLK levels' structure * based on the trimmed allowed dpm engine clock states * * @hwmgr: the address of the hardware manager
*/ staticint vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{ struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); int result = 0;
uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->GfxclkLevel[i]),
&(pp_table->AcgFreqTable[i])); if (result) return result;
}
j = i - 1; while (i < NUM_GFXCLK_DPM_LEVELS) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->GfxclkLevel[i]),
&(pp_table->AcgFreqTable[i])); if (result) return result;
i++;
}
dpm_table = &(data->dpm_table.soc_table); for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_soc_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->SocclkDid[i]),
&(pp_table->SocDpmVoltageIndex[i])); if (result) return result;
}
j = i - 1; while (i < NUM_SOCCLK_DPM_LEVELS) {
result = vega10_populate_single_soc_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->SocclkDid[i]),
&(pp_table->SocDpmVoltageIndex[i])); if (result) return result;
i++;
}
if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
} else { for (i = 0; i < dep_on_mclk->count; i++) { if (dep_on_mclk->entries[i].clk == mem_clock) break;
}
PP_ASSERT_WITH_CODE(dep_on_mclk->count > i, "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!", return -EINVAL);
}
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs), "Failed to get UCLK settings from VBIOS!", return -1);
/** * vega10_populate_all_memory_levels - Populates all SMC MCLK levels' structure * based on the trimmed allowed dpm memory clock states. * * @hwmgr: the address of the hardware manager. * return: PP_Result_OK on success.
*/ staticint vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{ struct vega10_hwmgr *data = hwmgr->backend;
PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table); int result = 0;
uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->MemVid[i]),
&(pp_table->UclkLevel[i]),
&(pp_table->MemSocVoltageIndex[i])); if (result) return result;
}
j = i - 1; while (i < NUM_UCLK_DPM_LEVELS) {
result = vega10_populate_single_memory_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->MemVid[i]),
&(pp_table->UclkLevel[i]),
&(pp_table->MemSocVoltageIndex[i])); if (result) return result;
i++;
}
for (i = 0; i < DSPCLK_COUNT; i++) {
PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i), "Failed to populate Clock in DisplayClockTable!", return -1);
}
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
eclock, ÷rs), "Failed to get ECLK clock settings from VBIOS!", return -1);
*current_eclk_did = (uint8_t)dividers.ulDid;
for (i = 0; i < dep_table->count; i++) { if (dep_table->entries[i].eclk == eclock)
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.