/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher
*/
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_sclk) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
low);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_mclk) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
low);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate, int inst)
{ int ret = 0; conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate"); return 0;
}
mutex_lock(&adev->pm.mutex);
switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCE: case AMD_IP_BLOCK_TYPE_GFX: case AMD_IP_BLOCK_TYPE_SDMA: case AMD_IP_BLOCK_TYPE_JPEG: case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_VPE: case AMD_IP_BLOCK_TYPE_ISP: if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, 0)); break; case AMD_IP_BLOCK_TYPE_VCN: if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate, inst)); break; default: break;
}
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret = -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_set_gfx_power_up_by_imu(smu);
mutex_unlock(&adev->pm.mutex);
msleep(10);
return ret;
}
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; int ret = 0;
if (!pp_funcs || !pp_funcs->set_asic_baco_state) return -ENOENT;
mutex_lock(&adev->pm.mutex);
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; int ret = 0;
if (!pp_funcs || !pp_funcs->set_asic_baco_state) return -ENOENT;
mutex_lock(&adev->pm.mutex);
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, enum pp_mp1_state mp1_state)
{ int ret = 0; conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (mp1_state == PP_MP1_STATE_FLR) { /* VF lost access to SMU */ if (amdgpu_sriov_vf(adev))
adev->pm.dpm_enabled = false;
} elseif (pp_funcs && pp_funcs->set_mp1_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
mp1_state);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
{ int ret = 0; conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (pp_funcs && pp_funcs->notify_rlc_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->notify_rlc_state(
adev->powerplay.pp_handle,
en);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; int ret;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return 0; /* Don't use baco for reset in S3. * This is a workaround for some platforms * where entering BACO during suspend * seems to cause reboots or hangs. * This might be related to the fact that BACO controls * power to the whole GPU including devices like audio and USB. * Powering down/up everything may adversely affect these other * devices. Needs more investigation.
*/ if (adev->in_s3) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_asic_baco_capability(pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; int ret = 0;
if (!pp_funcs || !pp_funcs->asic_reset_mode_2) return -ENOENT;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->asic_reset_mode_2(pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; int ret = 0;
if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features) return -ENOENT;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; int ret = 0;
if (!pp_funcs || !pp_funcs->set_asic_baco_state) return -ENOENT;
mutex_lock(&adev->pm.mutex);
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1); if (ret) goto out;
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
support_link_reset = smu_link_reset_is_support(smu);
mutex_unlock(&adev->pm.mutex);
}
return support_link_reset;
}
int amdgpu_dpm_link_reset(struct amdgpu_device *adev)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_link_reset(smu);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, enum PP_SMC_POWER_PROFILE type, bool en)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (amdgpu_sriov_vf(adev)) return 0;
if (pp_funcs && pp_funcs->switch_power_profile) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->switch_power_profile(
adev->powerplay.pp_handle, type, en);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev, bool pause)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (amdgpu_sriov_vf(adev)) return 0;
if (pp_funcs && pp_funcs->pause_power_profile) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->pause_power_profile(
adev->powerplay.pp_handle, pause);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
uint32_t pstate)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (pp_funcs && pp_funcs->set_xgmi_pstate) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate)
{ int ret = 0; conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle;
if (pp_funcs && pp_funcs->set_df_cstate) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev, enum pp_pm_policy p_type, char *buf)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_get_pm_policy_info(smu, p_type, buf);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type, int policy_level)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret = -EOPNOTSUPP;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_set_pm_policy(smu, policy_type, policy_level);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
{ void *pp_handle = adev->powerplay.pp_handle; conststruct amd_pm_funcs *pp_funcs =
adev->powerplay.pp_funcs; int ret = 0;
if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
uint32_t msg_id)
{ void *pp_handle = adev->powerplay.pp_handle; conststruct amd_pm_funcs *pp_funcs =
adev->powerplay.pp_funcs; int ret = 0;
if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_clockgating_by_smu(pp_handle,
msg_id);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, bool acquire)
{ void *pp_handle = adev->powerplay.pp_handle; conststruct amd_pm_funcs *pp_funcs =
adev->powerplay.pp_funcs; int ret = -EOPNOTSUPP;
if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->smu_i2c_bus_access(pp_handle,
acquire);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{ if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex); if (power_supply_is_system_supplied() > 0)
adev->pm.ac_power = true; else
adev->pm.ac_power = false;
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
if (is_support_sw_smu(adev))
smu_set_ac_dc(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
}
}
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = -EINVAL;
if (!data || !size) return -EINVAL;
if (pp_funcs && pp_funcs->read_sensor) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
sensor,
data,
size);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = -EOPNOTSUPP;
if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = -EOPNOTSUPP;
if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
mutex_unlock(&adev->pm.mutex);
}
mutex_lock(&adev->pm.mutex);
r = pp_funcs->load_firmware(adev->powerplay.pp_handle); if (r) {
pr_err("smu firmware loading failed\n"); goto out;
}
if (smu_version)
*smu_version = adev->pm.fw_version;
out:
mutex_unlock(&adev->pm.mutex); return r;
}
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
{ int ret = 0;
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
enable);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0;
if (!is_support_sw_smu(adev)) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_pages_num(smu, size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0;
if (!is_support_sw_smu(adev)) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_send_hbm_bad_channel_flag(smu, size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret;
if (!is_support_sw_smu(adev)) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
if (adev->cper.enabled) if (amdgpu_cper_generate_bp_threshold_record(adev))
dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
return ret;
}
/** * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported * @adev: amdgpu_device pointer * * This function checks if the SMU supports resetting the SDMA engine. * It returns false if the hardware does not support software SMU or * if the feature is not supported.
*/ bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
{ struct smu_context *smu = adev->powerplay.pp_handle; bool ret;
if (!is_support_sw_smu(adev)) returnfalse;
mutex_lock(&adev->pm.mutex);
ret = smu_reset_sdma_is_supported(smu);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret;
if (!is_support_sw_smu(adev)) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_reset_sdma(smu, inst_mask);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
{ struct smu_context *smu = adev->powerplay.pp_handle; int ret;
if (!is_support_sw_smu(adev)) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_reset_vcn(smu, inst_mask);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, enum pp_clock_type type,
uint32_t *min,
uint32_t *max)
{ int ret = 0;
if (type != PP_SCLK) return -EINVAL;
if (!is_support_sw_smu(adev)) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
SMU_SCLK,
min,
max);
mutex_unlock(&adev->pm.mutex);
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level)) {
mutex_unlock(&adev->pm.mutex); /* If new level failed, retain the umd state as before */ if (!(current_level & profile_mode_mask) &&
(level & profile_mode_mask))
amdgpu_dpm_exit_umd_state(adev); elseif ((current_level & profile_mode_mask) &&
!(level & profile_mode_mask))
amdgpu_dpm_enter_umd_state(adev);
return -EINVAL;
}
adev->pm.dpm.forced_level = level;
mutex_unlock(&adev->pm.mutex);
return 0;
}
int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, struct pp_states_info *states)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_pp_num_states) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
states);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, enum amd_pp_task task_id, enum amd_pm_state_type *user_state)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->dispatch_tasks) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
task_id,
user_state);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_pp_table) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
table);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
uint32_t type, long *input,
uint32_t size)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_fine_grain_clk_vol) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
type,
input,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
uint32_t type, long *input,
uint32_t size)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->odn_edit_dpm_table) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
type,
input,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, enum pp_clock_type type, char *buf)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->print_clock_levels) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
type,
buf);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev, enum pp_clock_type type, char *buf, int *offset)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->emit_clock_levels) return -ENOENT;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
type,
buf,
offset);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
uint64_t ppfeature_masks)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_ppfeature_status) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
ppfeature_masks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_ppfeature_status) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
buf);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, enum pp_clock_type type,
uint32_t mask)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->force_clock_level) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
type,
mask);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_sclk_od) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, char *buf)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_power_profile_mode) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
buf);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_power_profile_mode) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
input,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_gpu_metrics) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
table);
mutex_unlock(&adev->pm.mutex);
return ret;
}
ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
size_t size)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_pm_metrics) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
size);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_fan_control_mode) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
fan_mode);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t speed)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_fan_speed_pwm) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
uint32_t *speed)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_fan_speed_pwm) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t *speed)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_fan_speed_rpm) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
uint32_t speed)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_fan_speed_rpm) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
speed);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
uint32_t mode)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_fan_control_mode) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
mode);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
uint32_t *limit, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_power_limit) return -ENODATA;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
limit,
pp_limit_level,
power_type);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
uint32_t limit)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->set_power_limit) return -EINVAL;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
limit);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
{ bool cclk_dpm_supported = false;
/* * dpm on some legacy asics don't carry od_enabled member * as its pp_handle is casted directly from adev.
*/ if (amdgpu_dpm_is_legacy_dpm(adev)) returnfalse;
/* * dpm on some legacy asics don't carry od_enabled member * as its pp_handle is casted directly from adev.
*/ if (amdgpu_dpm_is_legacy_dpm(adev)) returnfalse;
int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, bool disable_memory_clock_switch)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->display_disable_memory_clock_switch) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
disable_memory_clock_switch);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, struct pp_smu_nv_clock_table *max_clocks)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_max_sustainable_clocks_by_dc) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
max_clocks);
mutex_unlock(&adev->pm.mutex);
return ret;
}
enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, unsignedint *clock_values_in_khz, unsignedint *num_states)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_uclk_dpm_states) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
clock_values_in_khz,
num_states);
mutex_unlock(&adev->pm.mutex);
return ret;
}
int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, struct dpm_clocks *clock_table)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_dpm_clock_table) return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
clock_table);
mutex_unlock(&adev->pm.mutex);
return ret;
}
/** * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute * partition * @adev: Pointer to the device. * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. * @table: Pointer to a buffer where the metrics will be stored. If NULL, the * function returns the size of the metrics structure. * * This function retrieves metrics for a specific XCP, including details such as * VCN/JPEG activity, clock frequencies, and other performance metrics. If the * table parameter is NULL, the function returns the size of the metrics * structure without populating it. * * Return: Size of the metrics structure on success, or a negative error code on failure.
*/
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, void *table)
{ conststruct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0;
if (!pp_funcs->get_xcp_metrics) return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
table);
mutex_unlock(&adev->pm.mutex);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.