/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE.
*/
/* * DO NOT use these for err/warn/info/debug messages. * Use dev_err, dev_warn, dev_info and dev_dbg instead. * They are more MGPU friendly.
*/ #undef pr_err #undef pr_warn #undef pr_info #undef pr_debug
/* only for dGPU w/ SMU13*/ if (adev->pm.fw)
dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
smu_program, smu_version, smu_major, smu_minor, smu_debug);
/* * 1. if_version mismatch is not critical as our fw is designed * to be backward compatible. * 2. New fw usually brings some optimizations. But that's visible * only on the paired driver. * Considering above, we just leave user a verbal message instead * of halt driver loading.
*/ if (if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_info(adev->dev, "SMU driver if version not matched\n");
}
switch (version_minor) { case 0:
ret = smu_v13_0_set_pptable_v2_0(smu, table, size); break; case 1:
ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id); break; default:
ret = -EINVAL; break;
}
return ret;
}
int smu_v13_0_setup_pptable(struct smu_context *smu)
{ struct amdgpu_device *adev = smu->adev;
uint32_t size = 0, pptable_id = 0; void *table; int ret = 0;
/* override pptable_id from driver parameter */ if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
}
/* force using vbios pptable in sriov mode */ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size); else
ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
if (ret) return ret;
if (!smu->smu_table.power_play_table)
smu->smu_table.power_play_table = table; if (!smu->smu_table.power_play_table_size)
smu->smu_table.power_play_table_size = size;
return 0;
}
int smu_v13_0_init_smc_tables(struct smu_context *smu)
{ struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; int ret = 0;
smu_table->driver_pptable =
kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); if (!smu_table->driver_pptable) {
ret = -ENOMEM; goto err0_out;
}
smu_table->max_sustainable_clocks =
kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL); if (!smu_table->max_sustainable_clocks) {
ret = -ENOMEM; goto err1_out;
}
/* Aldebaran does not support OVERDRIVE */ if (tables[SMU_TABLE_OVERDRIVE].size) {
smu_table->overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); if (!smu_table->overdrive_table) {
ret = -ENOMEM; goto err2_out;
}
smu_table->boot_overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); if (!smu_table->boot_overdrive_table) {
ret = -ENOMEM; goto err3_out;
}
smu_table->user_overdrive_table =
kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); if (!smu_table->user_overdrive_table) {
ret = -ENOMEM; goto err4_out;
}
}
smu_table->combo_pptable =
kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); if (!smu_table->combo_pptable) {
ret = -ENOMEM; goto err5_out;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
address_high, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
address_low, NULL); if (ret) return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
(uint32_t)memory_pool->size, NULL); if (ret) return ret;
return ret;
}
int smu_v13_0_set_driver_table_location(struct smu_context *smu)
{ struct smu_table *driver_table = &smu->smu_table.driver_table; int ret = 0;
if (driver_table->mc_address) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(driver_table->mc_address),
NULL); if (!ret)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(driver_table->mc_address),
NULL);
}
return ret;
}
int smu_v13_0_set_tool_table_location(struct smu_context *smu)
{ int ret = 0; struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
if (tool_table->mc_address) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
upper_32_bits(tool_table->mc_address),
NULL); if (!ret)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
lower_32_bits(tool_table->mc_address),
NULL);
}
return ret;
}
int smu_v13_0_set_allowed_mask(struct smu_context *smu)
{ struct smu_feature *feature = &smu->smu_feature; int ret = 0;
uint32_t feature_mask[2];
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
feature->feature_num < 64) return -EINVAL;
int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
{ int ret = 0; struct amdgpu_device *adev = smu->adev;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 1): case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): case IP_VERSION(13, 0, 11): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); else
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); break; default: break;
}
return ret;
}
int smu_v13_0_system_features_control(struct smu_context *smu, bool en)
{ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
SMU_MSG_DisableAllSmuFeatures), NULL);
}
int smu_v13_0_notify_display_change(struct smu_context *smu)
{ int ret = 0;
if (!amdgpu_device_has_dc_support(smu->adev))
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
return ret;
}
staticint
smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, enum smu_clk_type clock_select)
{ int ret = 0; int clk_id;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
clk_id << 16, clock); if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); return ret;
}
if (*clock != 0) return 0;
/* if DC limit is zero, return AC limit */
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
clk_id << 16, clock); if (ret) {
dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); return ret;
}
return 0;
}
int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu)
{ struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks =
smu->smu_table.max_sustainable_clocks; int ret = 0;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->uclock),
SMU_UCLK); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
__func__); return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->soc_clock),
SMU_SOCCLK); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
__func__); return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
ret = smu_v13_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->dcef_clock),
SMU_DCEFCLK); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
__func__); return ret;
}
ret = smu_v13_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->display_clock),
SMU_DISPCLK); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
__func__); return ret;
}
ret = smu_v13_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->phy_clock),
SMU_PHYCLK); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
__func__); return ret;
}
ret = smu_v13_0_get_max_sustainable_clock(smu,
&(max_sustainable_clocks->pixel_clock),
SMU_PIXCLK); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
__func__); return ret;
}
}
if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
return 0;
}
int smu_v13_0_get_current_power_limit(struct smu_context *smu,
uint32_t *power_limit)
{ int power_src; int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit,
power_src << 16,
power_limit); if (ret)
dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
return ret;
}
int smu_v13_0_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type,
uint32_t limit)
{ int ret = 0;
if (limit_type != SMU_DEFAULT_PPT_LIMIT) return -EINVAL;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); return -EOPNOTSUPP;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret;
}
staticint
smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
{ int ret = 0;
if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return 0;
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); if (ret)
dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
__func__, (auto_fan_control ? "Start" : "Stop"));
int
smu_v13_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{ int ret = 0;
switch (mode) { case AMD_FAN_CTRL_NONE:
ret = smu_v13_0_set_fan_speed_pwm(smu, 255); break; case AMD_FAN_CTRL_MANUAL:
ret = smu_v13_0_auto_fan_control(smu, 0); break; case AMD_FAN_CTRL_AUTO:
ret = smu_v13_0_auto_fan_control(smu, 1); break; default: break;
}
if (ret) {
dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); return -EINVAL;
}
return ret;
}
int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{ struct amdgpu_device *adev = smu->adev;
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period; int ret;
if (!speed || speed > UINT_MAX/8) return -EINVAL;
ret = smu_v13_0_auto_fan_control(smu, 0); if (ret) return ret;
staticint smu_v13_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry)
{ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id; /* * ctxid is used to distinguish different * events for SMCToHost interrupt.
*/
uint32_t ctxid = entry->src_data[0];
uint32_t data;
uint32_t high;
if (client_id == SOC15_IH_CLIENTID_THM) { switch (src_id) { case THM_11_0__SRCID__THM_DIG_THERM_L2H:
schedule_delayed_work(&smu->swctf_delayed_work,
msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); break; case THM_11_0__SRCID__THM_DIG_THERM_H2L:
dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); break; default:
dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
src_id); break;
}
} elseif (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); /* * HW CTF just occurred. Shutdown to prevent further damage.
*/
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
orderly_poweroff(true);
} elseif (client_id == SOC15_IH_CLIENTID_MP1) { if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { /* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
switch (ctxid) { case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true; break; case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false; break; case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: /* * Increment the throttle interrupt counter
*/
atomic64_inc(&smu->throttle_int_counter);
if (!atomic_read(&adev->throttling_logging_enabled)) return 0;
if (__ratelimit(&adev->throttling_logging_rs))
schedule_work(&smu->throttling_logging_work);
break; case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
high = smu->thermal_range.software_shutdown_temp +
smu->thermal_range.software_shutdown_temp_offset;
high = min_t(typeof(high),
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
high);
dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
high,
smu->thermal_range.software_shutdown_temp_offset);
data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
DIG_THERM_INTH,
(high & 0xff));
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); break; case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
high = min_t(typeof(high),
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
smu->thermal_range.software_shutdown_temp);
dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
DIG_THERM_INTH,
(high & 0xff));
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); break; default:
dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
ctxid, client_id); break;
}
}
}
if (max) { if (smu->adev->pm.ac_power)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMaxDpmFreq,
param,
max); else
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetDcModeMaxDpmFreq,
param,
max); if (ret) goto failed;
}
if (min) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); if (ret) goto failed;
}
failed: return ret;
}
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min,
uint32_t max, bool automatic)
{ int ret = 0, clk_id = 0;
uint32_t param;
if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) return 0;
ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))
++(*value);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
param,
&value); if (ret) return ret;
/* * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM * now, we un-support it
*/
*is_fine_grained_dpm = value & 0x80000000;
return 0;
}
int smu_v13_0_set_single_dpm_table(struct smu_context *smu, enum smu_clk_type clk_type, struct smu_13_0_dpm_table *single_dpm_table)
{ int ret = 0;
uint32_t clk; int i;
ret = smu_v13_0_get_dpm_level_count(smu,
clk_type,
&single_dpm_table->count); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); return ret;
}
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
ret = smu_v13_0_get_fine_grained_status(smu,
clk_type,
&single_dpm_table->is_fine_grained); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); return ret;
}
}
for (i = 0; i < single_dpm_table->count; i++) {
ret = smu_v13_0_get_dpm_freq_by_index(smu,
clk_type,
i,
&clk); if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); return ret;
}
int smu_v13_0_get_bamaco_support(struct smu_context *smu)
{ struct smu_baco_context *smu_baco = &smu->smu_baco; int bamaco_support = 0;
if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) return 0;
if (smu_baco->maco_support)
bamaco_support |= MACO_SUPPORT;
/* return true if ASIC is in BACO state already */ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) return bamaco_support |= BACO_SUPPORT;
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) return 0;
return (bamaco_support |= BACO_SUPPORT);
}
int smu_v13_0_baco_enter(struct smu_context *smu)
{ struct amdgpu_device *adev = smu->adev; int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { return smu_v13_0_baco_set_armd3_sequence(smu,
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
BACO_SEQ_BAMACO : BACO_SEQ_BACO);
} else {
ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); if (!ret)
usleep_range(10000, 11000);
return ret;
}
}
int smu_v13_0_baco_exit(struct smu_context *smu)
{ struct amdgpu_device *adev = smu->adev; int ret;
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */
usleep_range(10000, 11000);
ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
} else {
ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
}
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
SMU_MSG_EnableGfxImu); return smu_cmn_send_msg_without_waiting(smu, index,
ENABLE_IMU_ARG_GFXOFF_ENABLE);
}
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size)
{ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); int ret = 0;
/* Only allowed in manual mode */ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) return -EINVAL;
switch (type) { case PP_OD_EDIT_SCLK_VDDC_TABLE: if (size != 2) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
}
if (input[0] == 0) { if (input[1] < smu->gfx_default_hard_min_freq) {
dev_warn(smu->adev->dev, "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], smu->gfx_default_hard_min_freq); return -EINVAL;
}
smu->gfx_actual_hard_min_freq = input[1];
} elseif (input[0] == 1) { if (input[1] > smu->gfx_default_soft_max_freq) {
dev_warn(smu->adev->dev, "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], smu->gfx_default_soft_max_freq); return -EINVAL;
}
smu->gfx_actual_soft_max_freq = input[1];
} else { return -EINVAL;
} break; case PP_OD_RESTORE_DEFAULT_TABLE: if (size != 0) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
}
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; break; case PP_OD_COMMIT_DPM_TABLE: if (size != 0) {
dev_err(smu->adev->dev, "Input parameter number not correct\n"); return -EINVAL;
} if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
dev_err(smu->adev->dev, "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu->gfx_actual_hard_min_freq,
smu->gfx_actual_soft_max_freq); return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
smu->gfx_actual_hard_min_freq,
NULL); if (ret) {
dev_err(smu->adev->dev, "Set hard min sclk failed!"); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.