/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/* * DO NOT use these for err/warn/info/debug messages. * Use dev_err, dev_warn, dev_info and dev_dbg instead. * They are more MGPU friendly.
*/ #undef pr_err #undef pr_warn #undef pr_info #undef pr_debug
// Registers related to GFXOFF // addressBlock: smuio_smuio_SmuSmuioDec // base address: 0x5a000 #define mmSMUIO_GFX_MISC_CNTL 0x00c5 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
ret = smu_cmn_get_metrics_table(smu,
NULL, false); if (ret) return ret;
switch (member) { case METRICS_CURR_GFXCLK:
*value = metrics->Current.GfxclkFrequency; break; case METRICS_AVERAGE_SOCCLK:
*value = metrics->Current.SocclkFrequency; break; case METRICS_AVERAGE_VCLK:
*value = metrics->Current.VclkFrequency; break; case METRICS_AVERAGE_DCLK:
*value = metrics->Current.DclkFrequency; break; case METRICS_CURR_UCLK:
*value = metrics->Current.MemclkFrequency; break; case METRICS_AVERAGE_GFXACTIVITY:
*value = metrics->Current.GfxActivity; break; case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->Current.UvdActivity; break; case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->Average.CurrentSocketPower << 8) /
1000; break; case METRICS_CURR_SOCKETPOWER:
*value = (metrics->Current.CurrentSocketPower << 8) /
1000; break; case METRICS_TEMPERATURE_EDGE:
*value = metrics->Current.GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_HOTSPOT:
*value = metrics->Current.SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_THROTTLER_STATUS:
*value = metrics->Current.ThrottlerStatus; break; case METRICS_VOLTAGE_VDDGFX:
*value = metrics->Current.Voltage[2]; break; case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Current.Voltage[1]; break; case METRICS_AVERAGE_CPUCLK:
memcpy(value, &metrics->Current.CoreFrequency[0],
smu->cpu_core_num * sizeof(uint16_t)); break; default:
*value = UINT_MAX; break;
}
return ret;
}
staticint vangogh_common_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{ int ret = 0;
if (smu->smc_fw_if_version < 0x3)
ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); else
ret = vangogh_get_smu_metrics_data(smu, member, value);
staticint vangogh_init_smc_tables(struct smu_context *smu)
{ int ret = 0;
ret = vangogh_tables_init(smu); if (ret) return ret;
ret = vangogh_allocate_dpm_context(smu); if (ret) return ret;
#ifdef CONFIG_X86 /* AMD x86 APU only */
smu->cpu_core_num = topology_num_cores_per_package(); #else
smu->cpu_core_num = 4; #endif
return smu_v11_0_init_smc_tables(smu);
}
staticint vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable, int inst)
{ int ret = 0;
if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret;
} else {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); if (ret) return ret;
}
return ret;
}
staticint vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
{ int ret = 0;
if (enable) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); if (ret) return ret;
} else {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret;
}
return ret;
}
staticbool vangogh_is_dpm_running(struct smu_context *smu)
{ struct amdgpu_device *adev = smu->adev; int ret = 0;
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */ if (adev->in_suspend) returnfalse;
ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
switch (clk_type) { case SMU_MCLK: case SMU_UCLK: case SMU_FCLK:
feature_id = SMU_FEATURE_DPM_FCLK_BIT; break; case SMU_GFXCLK: case SMU_SCLK:
feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; break; case SMU_SOCCLK:
feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; break; case SMU_VCLK: case SMU_DCLK:
feature_id = SMU_FEATURE_VCN_DPM_BIT; break; default: returntrue;
}
if (!smu_cmn_feature_is_enabled(smu, feature_id)) returnfalse;
if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk; break; case SMU_FCLK:
clock_limit = smu->smu_table.boot_values.fclk; break; case SMU_GFXCLK: case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk; break; case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk; break; case SMU_VCLK:
clock_limit = smu->smu_table.boot_values.vclk; break; case SMU_DCLK:
clock_limit = smu->smu_table.boot_values.dclk; break; default:
clock_limit = 0; break;
}
/* clock in Mhz unit */ if (min)
*min = clock_limit / 100; if (max)
*max = clock_limit / 100;
return 0;
} if (max) {
ret = vangogh_get_profiling_clk_mask(smu,
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
&vclk_mask,
&dclk_mask,
&mclk_mask,
&fclk_mask,
&soc_mask); if (ret) goto failed;
switch (clk_type) { case SMU_UCLK: case SMU_MCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); if (ret) goto failed; break; case SMU_SOCCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); if (ret) goto failed; break; case SMU_FCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); if (ret) goto failed; break; case SMU_VCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); if (ret) goto failed; break; case SMU_DCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); if (ret) goto failed; break; default:
ret = -EINVAL; goto failed;
}
} if (min) {
ret = vangogh_get_profiling_clk_mask(smu,
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
NULL,
NULL,
&mclk_mask,
&fclk_mask,
&soc_mask); if (ret) goto failed;
vclk_mask = dclk_mask = 0;
switch (clk_type) { case SMU_UCLK: case SMU_MCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); if (ret) goto failed; break; case SMU_SOCCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); if (ret) goto failed; break; case SMU_FCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); if (ret) goto failed; break; case SMU_VCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); if (ret) goto failed; break; case SMU_DCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); if (ret) goto failed; break; default:
ret = -EINVAL; goto failed;
}
}
failed: return ret;
}
for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT * Not all profile modes are supported on vangogh.
*/
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i);
if (workload_type < 0) continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
return size;
}
staticint vangogh_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask, long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0; int ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
backend_workload_mask,
NULL); if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
workload_mask); return ret;
}
return ret;
}
staticint vangogh_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min,
uint32_t max, bool automatic)
{ int ret = 0;
if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) return 0;
switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinGfxClk,
min, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxGfxClk,
max, NULL); if (ret) return ret; break; case SMU_FCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinFclkByFreq,
min, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxFclkByFreq,
max, NULL); if (ret) return ret; break; case SMU_SOCCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinSocclkByFreq,
min, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxSocclkByFreq,
max, NULL); if (ret) return ret; break; case SMU_VCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinVcn,
min << 16, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxVcn,
max << 16, NULL); if (ret) return ret; break; case SMU_DCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinVcn,
min, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxVcn,
max, NULL); if (ret) return ret; break; default: return -EINVAL;
}
switch (clk_type) { case SMU_SOCCLK:
ret = vangogh_get_dpm_clk_limited(smu, clk_type,
soft_min_level, &min_freq); if (ret) return ret;
ret = vangogh_get_dpm_clk_limited(smu, clk_type,
soft_max_level, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxSocclkByFreq,
max_freq, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinSocclkByFreq,
min_freq, NULL); if (ret) return ret; break; case SMU_FCLK:
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_min_level, &min_freq); if (ret) return ret;
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_max_level, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxFclkByFreq,
max_freq, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinFclkByFreq,
min_freq, NULL); if (ret) return ret; break; case SMU_VCLK:
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_min_level, &min_freq); if (ret) return ret;
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_max_level, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinVcn,
min_freq << 16, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxVcn,
max_freq << 16, NULL); if (ret) return ret;
break; case SMU_DCLK:
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_min_level, &min_freq); if (ret) return ret;
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_max_level, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinVcn,
min_freq, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxVcn,
max_freq, NULL); if (ret) return ret;
break; default: break;
}
return ret;
}
staticint vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
{ int ret = 0, i = 0;
uint32_t min_freq, max_freq, force_freq; enum smu_clk_type clk_type;
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); if (ret) return ret;
force_freq = highest ? max_freq : min_freq;
ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret;
}
return ret;
}
staticint vangogh_unforce_dpm_levels(struct smu_context *smu)
{ int ret = 0, i = 0;
uint32_t min_freq, max_freq; enum smu_clk_type clk_type;
ret = vangogh_set_peak_clock_by_device(smu); if (ret) return ret; break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: return 0;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
smu->gfx_actual_soft_max_freq, NULL); if (ret) return ret;
if (smu->adev->pm.fw_version >= 0x43f1b00) { for (i = 0; i < smu->cpu_core_num; i++) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
((i << 20)
| smu->cpu_actual_soft_min_freq),
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
((i << 20)
| smu->cpu_actual_soft_max_freq),
NULL); if (ret) return ret;
}
}
return ret;
}
staticint vangogh_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size)
{ int ret = 0;
if (!data || !size) return -EINVAL;
switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXACTIVITY,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_VCN_LOAD:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_VCNACTIVITY,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4; break; case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_CURR_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4; break; case AMDGPU_PP_SENSOR_VDDGFX:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_VDDNB:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4; break; case AMDGPU_PP_SENSOR_CPU_CLK:
ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_CPUCLK,
(uint32_t *)data);
*size = smu->cpu_core_num * sizeof(uint16_t); break; default:
ret = -EOPNOTSUPP; break;
}
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, void **table)
{
uint32_t smu_program;
uint32_t fw_version; int ret = 0;
smu_program = (smu->smc_fw_version >> 24) & 0xff;
fw_version = smu->smc_fw_version & 0xffffff; if (smu_program == 6) { if (fw_version >= 0x3F0800)
ret = vangogh_get_gpu_metrics_v2_4(smu, table); else
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
} else { if (smu->smc_fw_version >= 0x043F3E00) { if (smu->smc_fw_if_version < 0x3)
ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); else
ret = vangogh_get_gpu_metrics_v2_3(smu, table);
} else { if (smu->smc_fw_if_version < 0x3)
ret = vangogh_get_legacy_gpu_metrics(smu, table); else
ret = vangogh_get_gpu_metrics(smu, table);
}
}
return ret;
}
staticint vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size)
{ int ret = 0; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
dev_warn(smu->adev->dev, "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); return -EINVAL;
}
switch (type) { case PP_OD_EDIT_CCLK_VDDC_TABLE: if (size != 3) {
dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n"); return -EINVAL;
} if (input[0] >= smu->cpu_core_num) {
dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
smu->cpu_core_num);
}
smu->cpu_core_id_select = input[0]; if (input[1] == 0) { if (input[2] < smu->cpu_default_soft_min_freq) {
dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[2], smu->cpu_default_soft_min_freq); return -EINVAL;
}
smu->cpu_actual_soft_min_freq = input[2];
} elseif (input[1] == 1) { if (input[2] > smu->cpu_default_soft_max_freq) {
dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[2], smu->cpu_default_soft_max_freq); return -EINVAL;
}
smu->cpu_actual_soft_max_freq = input[2];
} else { return -EINVAL;
} break; case PP_OD_EDIT_SCLK_VDDC_TABLE: if (size != 2) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
}
if (input[0] == 0) { if (input[1] < smu->gfx_default_hard_min_freq) {
dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq); return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} elseif (input[0] == 1) { if (input[1] > smu->gfx_default_soft_max_freq) {
dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq); return -EINVAL;
}
smu->gfx_actual_soft_max_freq = input[1];
} else { return -EINVAL;
} break; case PP_OD_RESTORE_DEFAULT_TABLE: if (size != 0) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
} else {
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
} break; case PP_OD_COMMIT_DPM_TABLE: if (size != 0) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
} else { if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev, "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq); return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq, NULL); if (ret) {
dev_err(smu->adev->dev, "Set hard min sclk failed!"); return ret;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
smu->gfx_actual_soft_max_freq, NULL); if (ret) {
dev_err(smu->adev->dev, "Set soft max sclk failed!"); return ret;
}
if (smu->adev->pm.fw_version < 0x43f1b00) {
dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n"); break;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
((smu->cpu_core_id_select << 20)
| smu->cpu_actual_soft_min_freq),
NULL); if (ret) {
dev_err(smu->adev->dev, "Set hard min cclk failed!"); return ret;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
((smu->cpu_core_id_select << 20)
| smu->cpu_actual_soft_max_freq),
NULL); if (ret) {
dev_err(smu->adev->dev, "Set soft max cclk failed!"); return ret;
}
} break; default: return -ENOSYS;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.