/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/* * DO NOT use these for err/warn/info/debug messages. * Use dev_err, dev_warn, dev_info and dev_dbg instead. * They are more MGPU friendly.
*/ #undef pr_err #undef pr_warn #undef pr_info #undef pr_debug
/* * This interface just for getting uclk ultimate freq and should't introduce * other likewise function result in overmuch callback.
*/ staticint renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t dpm_level, uint32_t *freq)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL;
switch (clk_type) { case SMU_SOCCLK: if (dpm_level >= NUM_SOCCLK_DPM_LEVELS) return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq; break; case SMU_UCLK: case SMU_MCLK: if (dpm_level >= NUM_FCLK_DPM_LEVELS) return -EINVAL;
*freq = clk_table->FClocks[dpm_level].Freq; break; case SMU_DCEFCLK: if (dpm_level >= NUM_DCFCLK_DPM_LEVELS) return -EINVAL;
*freq = clk_table->DcfClocks[dpm_level].Freq; break; case SMU_FCLK: if (dpm_level >= NUM_FCLK_DPM_LEVELS) return -EINVAL;
*freq = clk_table->FClocks[dpm_level].Freq; break; case SMU_VCLK: if (dpm_level >= NUM_VCN_DPM_LEVELS) return -EINVAL;
*freq = clk_table->VClocks[dpm_level].Freq; break; case SMU_DCLK: if (dpm_level >= NUM_VCN_DPM_LEVELS) return -EINVAL;
*freq = clk_table->DClocks[dpm_level].Freq; break;
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { if (sclk_mask)
*sclk_mask = 0;
} elseif (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { if (mclk_mask) /* mclk levels are in reverse order */
*mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
} elseif (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { if (sclk_mask) /* The sclk as gfxclk and has three level about max/min/current */
*sclk_mask = 3 - 1;
if (mclk_mask) /* mclk levels are in reverse order */
*mclk_mask = 0;
if (soc_mask)
*soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
}
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK:
clock_limit = smu->smu_table.boot_values.uclk; break; case SMU_GFXCLK: case SMU_SCLK:
clock_limit = smu->smu_table.boot_values.gfxclk; break; case SMU_SOCCLK:
clock_limit = smu->smu_table.boot_values.socclk; break; default:
clock_limit = 0; break;
}
/* clock in Mhz unit */ if (min)
*min = clock_limit / 100; if (max)
*max = clock_limit / 100;
return 0;
}
if (max) {
ret = renoir_get_profiling_clk_mask(smu,
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
NULL,
&mclk_mask,
&soc_mask); if (ret) goto failed;
switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK:
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max); if (ret) {
dev_err(smu->adev->dev, "Attempt to get max GX frequency from SMC Failed !\n"); goto failed;
} break; case SMU_UCLK: case SMU_FCLK: case SMU_MCLK:
ret = renoir_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); if (ret) goto failed; break; case SMU_SOCCLK:
ret = renoir_get_dpm_clk_limited(smu, clk_type, soc_mask, max); if (ret) goto failed; break; default:
ret = -EINVAL; goto failed;
}
}
if (min) { switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK:
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min); if (ret) {
dev_err(smu->adev->dev, "Attempt to get min GX frequency from SMC Failed !\n"); goto failed;
} break; case SMU_UCLK: case SMU_FCLK: case SMU_MCLK:
ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min); if (ret) goto failed; break; case SMU_SOCCLK:
ret = renoir_get_dpm_clk_limited(smu, clk_type, 0, min); if (ret) goto failed; break; default:
ret = -EINVAL; goto failed;
}
}
failed: return ret;
}
staticint renoir_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size)
{ int ret = 0; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
dev_warn(smu->adev->dev, "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n"); return -EINVAL;
}
switch (type) { case PP_OD_EDIT_SCLK_VDDC_TABLE: if (size != 2) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
}
if (input[0] == 0) { if (input[1] < smu->gfx_default_hard_min_freq) {
dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq); return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} elseif (input[0] == 1) { if (input[1] > smu->gfx_default_soft_max_freq) {
dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq); return -EINVAL;
}
smu->gfx_actual_soft_max_freq = input[1];
} else { return -EINVAL;
} break; case PP_OD_RESTORE_DEFAULT_TABLE: if (size != 0) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
}
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; break; case PP_OD_COMMIT_DPM_TABLE: if (size != 0) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
} else { if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev, "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq); return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq,
NULL); if (ret) {
dev_err(smu->adev->dev, "Set hard min sclk failed!"); return ret;
}
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxGfxClk,
smu->gfx_actual_soft_max_freq,
NULL); if (ret) {
dev_err(smu->adev->dev, "Set soft max sclk failed!"); return ret;
}
} break; default: return -ENOSYS;
}
return ret;
}
staticint renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
uint32_t min = 0, max = 0;
uint32_t ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMinGfxclkFrequency,
0, &min); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMaxGfxclkFrequency,
0, &max); if (ret) return ret;
switch (clk_type) { case SMU_SOCCLK: case SMU_MCLK: case SMU_DCEFCLK: case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) {
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) continue;
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : ""); if (cur_value == value)
cur_value_match_level = true;
}
if (!cur_value_match_level)
size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
if (!smu_dpm_ctx->dpm_context ||
!smu_dpm_ctx->dpm_current_power_state) return -EINVAL;
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY; break; case SMU_STATE_UI_LABEL_BALLANCED:
pm_type = POWER_STATE_TYPE_BALANCED; break; case SMU_STATE_UI_LABEL_PERFORMANCE:
pm_type = POWER_STATE_TYPE_PERFORMANCE; break; default: if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; else
pm_type = POWER_STATE_TYPE_DEFAULT; break;
}
return pm_type;
}
staticint renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable, int inst)
{ int ret = 0;
if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret;
}
} else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret;
}
}
return ret;
}
staticint renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
{ int ret = 0;
if (enable) { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); if (ret) return ret;
}
} else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret;
}
}
return ret;
}
staticint renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
{ int ret = 0, i = 0;
uint32_t min_freq, max_freq, force_freq; enum smu_clk_type clk_type;
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
ret = renoir_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); if (ret) return ret;
force_freq = highest ? max_freq : min_freq;
ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret;
}
for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) continue;
clk_type = clk_feature_map[i].clk_type;
ret = renoir_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); if (ret) return ret;
ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret;
}
return ret;
}
/* * This interface get dpm clock table for dc
*/ staticint renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
{
DpmClocks_t *table = smu->smu_table.clocks_table; int i;
if (!clock_table || !table) return -EINVAL;
for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
}
for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
}
for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
}
for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
}
for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
}
for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
}
switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: if (soft_min_level > 2 || soft_max_level > 2) {
dev_info(smu->adev->dev, "Currently sclk only support 3 levels on APU\n"); return -EINVAL;
}
ret = renoir_get_dpm_ultimate_freq(smu, SMU_GFXCLK, &min_freq, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq :
soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq :
soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
NULL); if (ret) return ret; break; case SMU_SOCCLK:
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_min_level, &min_freq); if (ret) return ret;
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL); if (ret) return ret; break; case SMU_MCLK: case SMU_FCLK:
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_min_level, &min_freq); if (ret) return ret;
ret = renoir_get_dpm_clk_limited(smu, clk_type, soft_max_level, &max_freq); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL); if (ret) return ret; break; default: break;
}
return ret;
}
staticint renoir_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask, long *custom_params,
u32 custom_params_max_idx)
{ int ret;
u32 backend_workload_mask = 0;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
backend_workload_mask,
NULL); if (ret) {
dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
workload_mask); return ret;
}
return ret;
}
staticint renoir_set_peak_clock_by_device(struct smu_context *smu)
{ int ret = 0;
uint32_t sclk_freq = 0, uclk_freq = 0;
ret = renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_freq); if (ret) return ret;
ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret;
ret = renoir_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &uclk_freq); if (ret) return ret;
ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret;
ret = renoir_force_dpm_limit_value(smu, true); break; case AMD_DPM_FORCED_LEVEL_LOW:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
ret = renoir_force_dpm_limit_value(smu, false); break; case AMD_DPM_FORCED_LEVEL_AUTO:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
ret = renoir_unforce_dpm_levels(smu); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinGfxClk,
RENOIR_UMD_PSTATE_GFXCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinFclkByFreq,
RENOIR_UMD_PSTATE_FCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinSocclkByFreq,
RENOIR_UMD_PSTATE_SOCCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinVcn,
RENOIR_UMD_PSTATE_VCNCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxGfxClk,
RENOIR_UMD_PSTATE_GFXCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxFclkByFreq,
RENOIR_UMD_PSTATE_FCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxSocclkByFreq,
RENOIR_UMD_PSTATE_SOCCLK,
NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetSoftMaxVcn,
RENOIR_UMD_PSTATE_VCNCLK,
NULL); if (ret) return ret; break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
ret = renoir_set_peak_clock_by_device(smu); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break;
} return ret;
}
/* save watermark settings into pplib smu structure, * also pass data to smu controller
*/ staticint renoir_set_watermarks_table( struct smu_context *smu, struct pp_smu_wm_range_sets *clock_ranges)
{
Watermarks_t *table = smu->smu_table.watermarks_table; int ret = 0; int i;
if (clock_ranges) { if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) return -EINVAL;
/* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
table->WatermarkRow[WM_DCFCLK][i].MinClock =
clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT * Not all profile modes are supported on arcturus.
*/
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
i); if (workload_type < 0) continue;
size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
}
/* * Until now, the pmfw hasn't exported the interface of SMU * feature mask to APU SKU so just force on all the feature * at early initial stage.
*/ if (adev->in_suspend) returnfalse; else returntrue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.