/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#define SWSMU_CODE_LAYER_L2
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_atombios.h"
#include "smu_v13_0_6_pmfw.h"
#include "smu13_driver_if_v13_0_6.h"
#include "smu_v13_0_6_ppsmc.h"
#include "soc15_common.h"
#include "atom.h"
#include "power_state.h"
#include "smu_v13_0.h"
#include "smu_v13_0_6_ppt.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "thm/thm_11_0_2_offset.h"
#include "thm/thm_11_0_2_sh_mask.h"
#include "amdgpu_xgmi.h"
#include <linux/pci.h>
#include "amdgpu_ras.h"
#include "amdgpu_mca.h"
#include "amdgpu_aca.h"
#include "smu_cmn.h"
#include "mp/mp_13_0_6_offset.h"
#include "mp/mp_13_0_6_sh_mask.h"
#include "umc_v12_0.h"
#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS
/* TODO: Check final register offsets */
#define MP1_Public 0x03b00000
#define smnMP1_FIRMWARE_FLAGS 0x3010028
/*
* DO NOT use these for err/warn/info/debug messages.
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
* They are more MGPU friendly.
*/
#undef pr_err
#undef pr_warn
#undef pr_info
#undef pr_debug
MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin" );
MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin" );
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \
[smu_feature] = { 1, (smu_13_0_6_feature) }
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE \
(FEATURE_MASK(FEATURE_DATA_CALCULATION) | \
FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \
FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \
FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \
FEATURE_MASK(FEATURE_DPM_VCN))
/* possible frequency drift (1Mhz) */
#define EPSILON 1
#define smnPCIE_ESM_CTRL 0x93D0
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
#define MAX_LINK_WIDTH 6
#define smnPCIE_LC_SPEED_CNTL 0x1a340290
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
#define LINK_SPEED_MAX 4
#define SMU_13_0_6_DSCLK_THRESHOLD 140
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_## _ip] = { .hwid = _hwid, .mcatype = _type, }
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
uint16_t mcatype;
};
struct mca_ras_info {
enum amdgpu_ras_block blkid;
enum amdgpu_mca_ip ip;
int *err_code_array;
int err_code_count;
int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry);
};
#define P2S_TABLE_ID_A 0x50325341
#define P2S_TABLE_ID_X 0x50325358
#define P2S_TABLE_ID_3 0x50325303
// clang-format off
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0),
MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0),
MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0),
MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0),
MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0),
MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0),
MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0),
MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI),
MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI),
MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI),
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
};
// clang-format on
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
CLK_MAP(FCLK, PPCLK_FCLK),
CLK_MAP(UCLK, PPCLK_UCLK),
CLK_MAP(MCLK, PPCLK_UCLK),
CLK_MAP(DCLK, PPCLK_DCLK),
CLK_MAP(VCLK, PPCLK_VCLK),
CLK_MAP(LCLK, PPCLK_LCLK),
};
static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_VCN_BIT, FEATURE_DS_VCN),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP1CLK_BIT, FEATURE_DS_MP1CLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
};
#define TABLE_PMSTATUSLOG 0
#define TABLE_SMU_METRICS 1
#define TABLE_I2C_COMMANDS 2
#define TABLE_COUNT 3
static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(PMSTATUSLOG),
TAB_MAP(SMU_METRICS),
TAB_MAP(I2C_COMMANDS),
};
static const uint8_t smu_v13_0_6_throttler_map[] = {
[THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT),
[THROTTLER_THERMAL_SOCKET_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT),
[THROTTLER_THERMAL_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
[THROTTLER_THERMAL_VR_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
[THROTTLER_PROCHOT_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT),
};
#define GET_GPU_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V0) ?\
(metrics_v0->field) : (metrics_v2->field))
#define GET_METRIC_FIELD(field, version) ((version == METRICS_VERSION_V1) ?\
(metrics_v1->field) : GET_GPU_METRIC_FIELD(field, version))
#define METRICS_TABLE_SIZE (max3(sizeof (MetricsTableV0_t),\
sizeof (MetricsTableV1_t),\
sizeof (MetricsTableV2_t)))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
uint32_t feature_num;
struct smu_13_0_dpm_table *dpm_table;
uint32_t *freq_table;
};
static inline int smu_v13_0_6_get_metrics_version(struct smu_context *smu)
{
if ((smu->adev->flags & AMD_IS_APU) &&
smu->smc_fw_version <= 0x4556900)
return METRICS_VERSION_V1;
else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12))
return METRICS_VERSION_V2;
return METRICS_VERSION_V0;
}
static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
dpm_context->caps |= BIT_ULL(cap);
}
static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
dpm_context->caps &= ~BIT_ULL(cap);
}
bool smu_v13_0_6_cap_supported(struct smu_context *smu,
enum smu_v13_0_6_caps cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
return !!(dpm_context->caps & BIT_ULL(cap));
}
static void smu_v13_0_14_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND) };
uint32_t fw_ver = smu->smc_fw_version;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
smu_v13_0_6_cap_set(smu, default_cap_list[i]);
if (fw_ver >= 0x05550E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
if (fw_ver >= 0x05550B00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if (fw_ver >= 0x5551600) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
}
static void smu_v13_0_12_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(OTHER_END_METRICS),
SMU_CAP(PER_INST_METRICS) };
uint32_t fw_ver = smu->smc_fw_version;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
smu_v13_0_6_cap_set(smu, default_cap_list[i]);
if (fw_ver < 0x00561900)
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
if (fw_ver >= 0x00561700)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
if (fw_ver >= 0x00561E00)
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
if (fw_ver >= 0x00562500)
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if (fw_ver >= 0x04560100) {
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
{
enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
SMU_CAP(SET_UCLK_MAX),
SMU_CAP(DPM_POLICY),
SMU_CAP(PCIE_METRICS),
SMU_CAP(CTF_LIMIT),
SMU_CAP(MCA_DEBUG_MODE),
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND) };
struct amdgpu_device *adev = smu->adev;
uint32_t fw_ver = smu->smc_fw_version;
uint32_t pgm = (fw_ver >> 24) & 0xFF;
for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
smu_v13_0_6_cap_set(smu, default_cap_list[i]);
if (fw_ver < 0x552F00)
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
if (fw_ver < 0x554500)
smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
if (adev->flags & AMD_IS_APU) {
smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
if (fw_ver >= 0x04556A00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
} else {
if (fw_ver >= 0x557600)
smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
if (fw_ver < 0x00556000)
smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
if (fw_ver < 0x556300)
smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
if (fw_ver < 0x554800)
smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
if (fw_ver >= 0x556F00)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver < 0x00555a00)
smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
if (fw_ver < 0x00555600)
smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
if ((pgm == 0 && fw_ver >= 0x00557F01) ||
(pgm == 7 && fw_ver >= 0x7551000)) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
}
if ((pgm == 0 && fw_ver >= 0x00558000) ||
(pgm == 7 && fw_ver >= 0x7551000))
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
}
static void smu_v13_0_x_init_caps(struct smu_context *smu)
{
switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 12):
return smu_v13_0_12_init_caps(smu);
case IP_VERSION(13, 0, 14):
return smu_v13_0_14_init_caps(smu);
default :
return smu_v13_0_6_init_caps(smu);
}
}
static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
{
int r;
r = smu_v13_0_check_fw_version(smu);
/* Initialize caps flags once fw version is fetched */
if (!r)
smu_v13_0_x_init_caps(smu);
return r;
}
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
const struct common_firmware_header *hdr;
struct amdgpu_firmware_info *ucode = NULL;
struct smc_soft_pptable_entry *entries;
struct amdgpu_device *adev = smu->adev;
uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count;
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
/* No need to load P2S tables in IOV mode or for smu v13.0.12 */
if (amdgpu_sriov_vf(adev) ||
(amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
return 0;
if (!(adev->flags & AMD_IS_APU)) {
p2s_table_id = P2S_TABLE_ID_X;
if (var == 0x5)
p2s_table_id = P2S_TABLE_ID_3;
}
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof (ucode_prefix));
ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin" , ucode_prefix);
if (ret)
goto out;
hdr = (const struct common_firmware_header *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(hdr);
/* SMU v13.0.6 binary file doesn't carry pptables, instead the entries
* are used to carry p2s tables.
*/
v2_1 = (const struct smc_firmware_header_v2_1 *)adev->pm.fw->data;
entries = (struct smc_soft_pptable_entry
*)((uint8_t *)v2_1 +
le32_to_cpu(v2_1->pptable_entry_offset));
p2stable_count = le32_to_cpu(v2_1->pptable_count);
for (i = 0; i < p2stable_count; i++) {
if (le32_to_cpu(entries[i].id) == p2s_table_id) {
smu->pptable_firmware.data =
((uint8_t *)v2_1 +
le32_to_cpu(entries[i].ppt_offset_bytes));
smu->pptable_firmware.size =
le32_to_cpu(entries[i].ppt_size_bytes);
break ;
}
}
if (smu->pptable_firmware.data && smu->pptable_firmware.size) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
ucode->ucode_id = AMDGPU_UCODE_ID_P2S_TABLE;
ucode->fw = &smu->pptable_firmware;
adev->firmware.fw_size += ALIGN(ucode->fw->size, PAGE_SIZE);
}
return 0;
out:
amdgpu_ucode_release(&adev->pm.fw);
return ret;
}
static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
struct amdgpu_device *adev = smu->adev;
int gpu_metrcs_size = METRICS_TABLE_SIZE;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
max(gpu_metrcs_size,
smu_v13_0_12_get_max_metrics_size()),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof (SwI2cRequest_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
smu_table->gpu_metrics_table_size = sizeof (struct gpu_metrics_v1_8);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
kfree(smu_table->metrics_table);
return -ENOMEM;
}
smu_table->driver_pptable =
kzalloc(sizeof (struct PPTable_t), GFP_KERNEL);
if (!smu_table->driver_pptable) {
kfree(smu_table->metrics_table);
kfree(smu_table->gpu_metrics_table);
return -ENOMEM;
}
return 0;
}
static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu,
int policy)
{
struct amdgpu_device *adev = smu->adev;
int ret, param;
switch (policy) {
case SOC_PSTATE_DEFAULT:
param = 0;
break ;
case SOC_PSTATE_0:
param = 1;
break ;
case SOC_PSTATE_1:
param = 2;
break ;
case SOC_PSTATE_2:
param = 3;
break ;
default :
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetThrottlingPolicy,
param, NULL);
if (ret)
dev_err(adev->dev, "select soc pstate policy %d failed" ,
policy);
return ret;
}
static int smu_v13_0_6_select_plpd_policy(struct smu_context *smu, int level)
{
struct amdgpu_device *adev = smu->adev;
int ret, param;
switch (level) {
case XGMI_PLPD_DEFAULT:
param = PPSMC_PLPD_MODE_DEFAULT;
break ;
case XGMI_PLPD_OPTIMIZED:
param = PPSMC_PLPD_MODE_OPTIMIZED;
break ;
case XGMI_PLPD_DISALLOW:
param = 0;
break ;
default :
return -EINVAL;
}
if (level == XGMI_PLPD_DISALLOW)
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_GmiPwrDnControl, param, NULL);
else
/* change xgmi per-link power down policy */
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_SelectPLPDMode, param, NULL);
if (ret)
dev_err(adev->dev,
"select xgmi per-link power down policy %d failed\n" ,
level);
return ret;
}
static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_dpm_policy *policy;
smu_dpm->dpm_context =
kzalloc(sizeof (struct smu_13_0_dpm_context), GFP_KERNEL);
if (!smu_dpm->dpm_context)
return -ENOMEM;
smu_dpm->dpm_context_size = sizeof (struct smu_13_0_dpm_context);
smu_dpm->dpm_policies =
kzalloc(sizeof (struct smu_dpm_policy_ctxt), GFP_KERNEL);
if (!smu_dpm->dpm_policies) {
kfree(smu_dpm->dpm_context);
return -ENOMEM;
}
if (!(smu->adev->flags & AMD_IS_APU)) {
policy = &(smu_dpm->dpm_policies->policies[0]);
policy->policy_type = PP_PM_POLICY_SOC_PSTATE;
policy->level_mask = BIT(SOC_PSTATE_DEFAULT) |
BIT(SOC_PSTATE_0) | BIT(SOC_PSTATE_1) |
BIT(SOC_PSTATE_2);
policy->current_level = SOC_PSTATE_DEFAULT;
policy->set_policy = smu_v13_0_6_select_policy_soc_pstate;
smu_cmn_generic_soc_policy_desc(policy);
smu_dpm->dpm_policies->policy_mask |=
BIT(PP_PM_POLICY_SOC_PSTATE);
}
policy = &(smu_dpm->dpm_policies->policies[1]);
policy->policy_type = PP_PM_POLICY_XGMI_PLPD;
policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT) |
BIT(XGMI_PLPD_OPTIMIZED);
policy->current_level = XGMI_PLPD_DEFAULT;
policy->set_policy = smu_v13_0_6_select_plpd_policy;
smu_cmn_generic_plpd_policy_desc(policy);
smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD);
return 0;
}
static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
{
int ret = 0;
ret = smu_v13_0_6_tables_init(smu);
if (ret)
return ret;
ret = smu_v13_0_6_allocate_dpm_context(smu);
return ret;
}
static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num)
{
if (num > 2)
return -EINVAL;
/* pptable will handle the features to enable */
memset(feature_mask, 0xFF, sizeof (uint32_t) * num);
return 0;
}
int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
bool bypass_cache)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
struct smu_table *table = &smu_table->driver_table;
int ret;
if (bypass_cache || !smu_table->metrics_time ||
time_after(jiffies,
smu_table->metrics_time + msecs_to_jiffies(1))) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
if (ret) {
dev_info(smu->adev->dev,
"Failed to export SMU metrics table!\n" );
return ret;
}
amdgpu_asic_invalidate_hdp(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
smu_table->metrics_time = jiffies;
}
if (metrics_table)
memcpy(metrics_table, smu_table->metrics_table, table_size);
return 0;
}
static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
void *metrics, size_t max_size)
{
struct smu_table_context *smu_tbl_ctxt = &smu->smu_table;
uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version;
uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size;
struct amdgpu_pm_metrics *pm_metrics = metrics;
uint32_t pmfw_version;
int ret;
if (!pm_metrics || !max_size)
return -EINVAL;
if (max_size < (table_size + sizeof (pm_metrics->common_header)))
return -EOVERFLOW;
/* Don't use cached metrics data */
ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true );
if (ret)
return ret;
smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
memset(&pm_metrics->common_header, 0,
sizeof (pm_metrics->common_header));
pm_metrics->common_header.mp1_ip_discovery_version =
amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
pm_metrics->common_header.pmfw_version = pmfw_version;
pm_metrics->common_header.pmmetrics_version = table_version;
pm_metrics->common_header.structure_size =
sizeof (pm_metrics->common_header) + table_size;
return pm_metrics->common_header.structure_size;
}
static void smu_v13_0_6_fill_static_metrics_table(struct smu_context *smu,
StaticMetricsTable_t *static_metrics)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
if (!static_metrics->InputTelemetryVoltageInmV) {
dev_warn(smu->adev->dev, "Invalid board voltage %d\n" ,
static_metrics->InputTelemetryVoltageInmV);
}
dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) &&
static_metrics->pldmVersion[0] != 0xFFFFFFFF)
smu->adev->firmware.pldm_version =
static_metrics->pldmVersion[0];
}
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
struct smu_table *table = &smu_table->driver_table;
int ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetStaticMetricsTable, NULL);
if (ret) {
dev_info(smu->adev->dev,
"Failed to export static metrics table!\n" );
return ret;
}
amdgpu_asic_invalidate_hdp(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
return 0;
}
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int version = smu_v13_0_6_get_metrics_version(smu);
int ret, i, retry = 100;
uint32_t table_version;
uint16_t max_speed;
uint8_t max_width;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_setup_driver_pptable(smu);
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
while (--retry) {
ret = smu_v13_0_6_get_metrics_table(smu, NULL, true );
if (ret)
return ret;
/* Ensure that metrics have been updated */
if (GET_METRIC_FIELD(AccumulationCounter, version))
break ;
usleep_range(1000, 1100);
}
if (!retry)
return -ETIME;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
&table_version);
if (ret)
return ret;
smu_table->tables[SMU_TABLE_SMU_METRICS].version =
table_version;
pptable->MaxSocketPowerLimit =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, version));
pptable->MaxGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version));
pptable->MinGfxclkFrequency =
SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version));
max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version);
max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version);
amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width);
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, version)[i]);
pptable->UclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, version)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
GET_METRIC_FIELD(SocclkFrequencyTable, version)[i]);
pptable->VclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, version)[i]);
pptable->DclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, version)[i]);
pptable->LclkFrequencyTable[i] =
SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, version)[i]);
}
/* use AID0 serial number by default */
pptable->PublicSerialNumber_AID =
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
pptable->Init = true ;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
if (ret)
return ret;
smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
}
}
return 0;
}
static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
struct smu_13_0_dpm_table *dpm_table;
uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
/* Use dpm tables, if data is already fetched */
if (pptable->Init) {
switch (clk_type) {
case SMU_MCLK:
case SMU_UCLK:
dpm_table = &dpm_context->dpm_tables.uclk_table;
break ;
case SMU_GFXCLK:
case SMU_SCLK:
dpm_table = &dpm_context->dpm_tables.gfx_table;
break ;
case SMU_SOCCLK:
dpm_table = &dpm_context->dpm_tables.soc_table;
break ;
case SMU_FCLK:
dpm_table = &dpm_context->dpm_tables.fclk_table;
break ;
case SMU_VCLK:
dpm_table = &dpm_context->dpm_tables.vclk_table;
break ;
case SMU_DCLK:
dpm_table = &dpm_context->dpm_tables.dclk_table;
break ;
default :
return -EINVAL;
}
min_clk = dpm_table->min;
max_clk = dpm_table->max;
if (min)
*min = min_clk;
if (max)
*max = max_clk;
if (min_clk && max_clk)
return 0;
}
if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
clk_id = smu_cmn_to_asic_specific_index(
smu, CMN2ASIC_MAPPING_CLK, clk_type);
if (clk_id < 0) {
ret = -EINVAL;
goto failed;
}
param = (clk_id & 0xffff) << 16;
}
if (max) {
if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
ret = smu_cmn_send_smc_msg(
smu, SMU_MSG_GetMaxGfxclkFrequency, max);
else
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
ret = smu_cmn_send_smc_msg(
smu, SMU_MSG_GetMinGfxclkFrequency, min);
else
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_GetMinDpmFreq, param, min);
}
failed:
return ret;
}
static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *levels)
{
int ret;
ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
if (!ret)
++(*levels);
return ret;
}
static void smu_v13_0_6_pm_policy_init(struct smu_context *smu)
{
struct smu_dpm_policy *policy;
policy = smu_get_pm_policy(smu, PP_PM_POLICY_SOC_PSTATE);
if (policy)
policy->current_level = SOC_PSTATE_DEFAULT;
}
static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_13_0_dpm_table *dpm_table = NULL;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t gfxclkmin, gfxclkmax, levels;
int ret = 0, i, j;
struct smu_v13_0_6_dpm_map dpm_map[] = {
{ SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
&dpm_context->dpm_tables.soc_table,
pptable->SocclkFrequencyTable },
{ SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
&dpm_context->dpm_tables.uclk_table,
pptable->UclkFrequencyTable },
{ SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
&dpm_context->dpm_tables.fclk_table,
pptable->FclkFrequencyTable },
{ SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
&dpm_context->dpm_tables.vclk_table,
pptable->VclkFrequencyTable },
{ SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
&dpm_context->dpm_tables.dclk_table,
pptable->DclkFrequencyTable },
};
smu_v13_0_6_setup_driver_pptable(smu);
/* DPM policy not supported in older firmwares */
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
~BIT(PP_PM_POLICY_SOC_PSTATE);
}
smu_v13_0_6_pm_policy_init(smu);
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
/* In the case of gfxclk, only fine-grained dpm is honored.
* Get min/max values from FW.
*/
ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
&gfxclkmin, &gfxclkmax);
if (ret)
return ret;
dpm_table->count = 2;
dpm_table->dpm_levels[0].value = gfxclkmin;
dpm_table->dpm_levels[0].enabled = true ;
dpm_table->dpm_levels[1].value = gfxclkmax;
dpm_table->dpm_levels[1].enabled = true ;
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[1].value;
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
dpm_table->dpm_levels[0].enabled = true ;
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[0].value;
}
for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
dpm_table = dpm_map[j].dpm_table;
levels = 1;
if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
ret = smu_v13_0_6_get_dpm_level_count(
smu, dpm_map[j].clk_type, &levels);
if (ret)
return ret;
}
dpm_table->count = levels;
for (i = 0; i < dpm_table->count; ++i) {
dpm_table->dpm_levels[i].value =
dpm_map[j].freq_table[i];
dpm_table->dpm_levels[i].enabled = true ;
}
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
}
return 0;
}
static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
/* TODO: PPTable is not available.
* 1) Find an alternate way to get 'PPTable values' here.
* 2) Check if there is SW CTF
*/
table_context->thermal_controller_type = 0;
return 0;
}
static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
mp1_fw_flags =
RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
return 0;
return -EIO;
}
static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_13_0_dpm_table *mem_table =
&dpm_context->dpm_tables.uclk_table;
struct smu_13_0_dpm_table *soc_table =
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
pstate_table->uclk_pstate.curr.min = mem_table->min;
pstate_table->uclk_pstate.curr.max = mem_table->max;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
pstate_table->socclk_pstate.curr.min = soc_table->min;
pstate_table->socclk_pstate.curr.max = soc_table->max;
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
pstate_table->gfxclk_pstate.standard =
gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
pstate_table->uclk_pstate.standard =
mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
pstate_table->socclk_pstate.standard =
soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
} else {
pstate_table->gfxclk_pstate.standard =
pstate_table->gfxclk_pstate.min;
pstate_table->uclk_pstate.standard =
pstate_table->uclk_pstate.min;
pstate_table->socclk_pstate.standard =
pstate_table->socclk_pstate.min;
}
return 0;
}
static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
struct smu_13_0_dpm_table *dpm_table)
{
int i, count;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
dpm_table->count;
clocks->num_levels = count;
for (i = 0; i < count; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
return 0;
}
static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
int32_t frequency2)
{
return (abs(frequency1 - frequency2) <= EPSILON);
}
static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_13_0_power_context *power_context = smu_power->power_context;
uint32_t throttler_status = 0;
throttler_status = atomic_read(&power_context->throttle_status);
dev_dbg(smu->adev->dev, "SMU Throttler status: %u" , throttler_status);
return throttler_status;
}
static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableV0_t *metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table;
MetricsTableV1_t *metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table;
MetricsTableV2_t *metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
ret = smu_v13_0_6_get_metrics_table(smu, NULL, false );
if (ret)
return ret;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
/* For clocks with multiple instances, only report the first one */
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
*value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, version)[xcc_id]);
} else {
*value = 0;
}
break ;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, version)[0]);
break ;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version));
break ;
case METRICS_CURR_VCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, version)[0]);
break ;
case METRICS_CURR_DCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, version)[0]);
break ;
case METRICS_CURR_FCLK:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, version));
break ;
case METRICS_AVERAGE_GFXACTIVITY:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, version));
break ;
case METRICS_AVERAGE_MEMACTIVITY:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, version));
break ;
case METRICS_CURR_SOCKETPOWER:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, version)) << 8;
break ;
case METRICS_TEMPERATURE_HOTSPOT:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break ;
case METRICS_TEMPERATURE_MEM:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break ;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
*value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, version)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break ;
default :
*value = UINT_MAX;
break ;
}
return ret;
}
static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
MetricsMember_t member_type;
if (!value)
return -EINVAL;
switch (clk_type) {
case SMU_GFXCLK:
member_type = METRICS_CURR_GFXCLK;
break ;
case SMU_UCLK:
member_type = METRICS_CURR_UCLK;
break ;
case SMU_SOCCLK:
member_type = METRICS_CURR_SOCCLK;
break ;
case SMU_VCLK:
member_type = METRICS_CURR_VCLK;
break ;
case SMU_DCLK:
member_type = METRICS_CURR_DCLK;
break ;
case SMU_FCLK:
member_type = METRICS_CURR_FCLK;
break ;
default :
return -EINVAL;
}
return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
}
static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
struct smu_13_0_dpm_table *single_dpm_table,
uint32_t curr_clk, const char *clk_name)
{
struct pp_clock_levels_with_latency clocks;
int i, ret, level = -1;
uint32_t clk1, clk2;
ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
if (ret) {
dev_err(smu->adev->dev, "Attempt to get %s clk levels failed!" ,
clk_name);
return ret;
}
if (!clocks.num_levels)
return -EINVAL;
if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) {
size = sysfs_emit_at(buf, size, "S: %uMhz *\n" , curr_clk);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz\n" , i,
clocks.data[i].clocks_in_khz /
1000);
} else {
if ((clocks.num_levels == 1) ||
(curr_clk < (clocks.data[0].clocks_in_khz / 1000)))
level = 0;
for (i = 0; i < clocks.num_levels; i++) {
clk1 = clocks.data[i].clocks_in_khz / 1000;
if (i < (clocks.num_levels - 1))
clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
if (curr_clk == clk1) {
level = i;
} else if (curr_clk >= clk1 && curr_clk < clk2) {
level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
i :
i + 1;
}
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n" , i,
clk1, (level == i) ? "*" : "" );
}
}
return size;
}
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
int now, size = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t min_clk, max_clk;
smu_cmn_get_sysfs_buf(&buf, &size);
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n" );
return size;
}
dpm_context = smu_dpm->dpm_context;
switch (type) {
case SMU_OD_SCLK:
size += sysfs_emit_at(buf, size, "%s:\n" , "OD_SCLK" );
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n" ,
pstate_table->gfxclk_pstate.curr.min,
pstate_table->gfxclk_pstate.curr.max);
break ;
case SMU_SCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current gfx clk Failed!" );
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
min_clk = single_dpm_table->min;
max_clk = single_dpm_table->max;
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n" ,
now);
size += sysfs_emit_at(buf, size, "0: %uMhz\n" ,
min_clk);
size += sysfs_emit_at(buf, size, "1: %uMhz\n" ,
max_clk);
} else if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
!smu_v13_0_6_freqs_in_same_level(now, max_clk)) {
size += sysfs_emit_at(buf, size, "0: %uMhz\n" ,
min_clk);
size += sysfs_emit_at(buf, size, "1: %uMhz *\n" ,
now);
size += sysfs_emit_at(buf, size, "2: %uMhz\n" ,
max_clk);
} else {
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n" ,
min_clk,
smu_v13_0_6_freqs_in_same_level(now, min_clk) ? "*" : "" );
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n" ,
max_clk,
smu_v13_0_6_freqs_in_same_level(now, max_clk) ? "*" : "" );
}
break ;
case SMU_OD_MCLK:
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SET_UCLK_MAX)))
return 0;
size += sysfs_emit_at(buf, size, "%s:\n" , "OD_MCLK" );
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n" ,
pstate_table->uclk_pstate.curr.min,
pstate_table->uclk_pstate.curr.max);
break ;
case SMU_MCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current mclk Failed!" );
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
now, "mclk" );
case SMU_SOCCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current socclk Failed!" );
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
now, "socclk" );
case SMU_FCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current fclk Failed!" );
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
now, "fclk" );
case SMU_VCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current vclk Failed!" );
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
now, "vclk" );
case SMU_DCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
&now);
if (ret) {
dev_err(smu->adev->dev,
"Attempt to get current dclk Failed!" );
return ret;
}
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
now, "dclk" );
default :
break ;
}
return size;
}
static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
uint32_t feature_mask, uint32_t level)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
uint32_t freq;
int ret = 0;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
ret = smu_cmn_send_smc_msg_with_param(
smu,
(max ? SMU_MSG_SetSoftMaxGfxClk :
SMU_MSG_SetSoftMinGfxclk),
freq & 0xffff, NULL);
if (ret) {
dev_err(smu->adev->dev,
"Failed to set soft %s gfxclk !\n" ,
max ? "max" : "min" );
return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
(feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
.value;
ret = smu_cmn_send_smc_msg_with_param(
smu,
(max ? SMU_MSG_SetSoftMaxByFreq :
SMU_MSG_SetSoftMinByFreq),
(PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
if (ret) {
dev_err(smu->adev->dev,
"Failed to set soft %s memclk !\n" ,
max ? "max" : "min" );
return ret;
}
}
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
(feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
ret = smu_cmn_send_smc_msg_with_param(
smu,
(max ? SMU_MSG_SetSoftMaxByFreq :
SMU_MSG_SetSoftMinByFreq),
(PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
if (ret) {
dev_err(smu->adev->dev,
"Failed to set soft %s socclk !\n" ,
max ? "max" : "min" );
return ret;
}
}
return ret;
}
static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
enum smu_clk_type type, uint32_t mask)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
int ret = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
switch (type) {
case SMU_SCLK:
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
if (soft_max_level >= single_dpm_table->count) {
dev_err(smu->adev->dev,
"Clock level specified %d is over max allowed %d\n" ,
soft_max_level, single_dpm_table->count - 1);
ret = -EINVAL;
break ;
}
ret = smu_v13_0_6_upload_dpm_level(
smu, false , FEATURE_MASK(FEATURE_DPM_GFXCLK),
soft_min_level);
if (ret) {
dev_err(smu->adev->dev,
"Failed to upload boot level to lowest!\n" );
break ;
}
ret = smu_v13_0_6_upload_dpm_level(
smu, true , FEATURE_MASK(FEATURE_DPM_GFXCLK),
soft_max_level);
if (ret)
dev_err(smu->adev->dev,
"Failed to upload dpm max level to highest!\n" );
break ;
case SMU_MCLK:
case SMU_SOCCLK:
case SMU_FCLK:
/*
* Should not arrive here since smu_13_0_6 does not
* support mclk/socclk/fclk softmin/softmax settings
*/
ret = -EINVAL;
break ;
default :
break ;
}
return ret;
}
static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
enum amd_pp_sensors sensor,
uint32_t *value)
{
int ret = 0;
if (!value)
return -EINVAL;
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_AVERAGE_GFXACTIVITY, value);
break ;
case AMDGPU_PP_SENSOR_MEM_LOAD:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_AVERAGE_MEMACTIVITY, value);
break ;
default :
dev_err(smu->adev->dev,
"Invalid sensor for retrieving clock activity\n" );
return -EINVAL;
}
return ret;
}
static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
enum amd_pp_sensors sensor,
uint32_t *value)
{
int ret = 0;
if (!value)
return -EINVAL;
switch (sensor) {
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_TEMPERATURE_HOTSPOT, value);
break ;
case AMDGPU_PP_SENSOR_MEM_TEMP:
ret = smu_v13_0_6_get_smu_metrics_data(
smu, METRICS_TEMPERATURE_MEM, value);
break ;
default :
dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n" );
return -EINVAL;
}
return ret;
}
static int smu_v13_0_6_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor, void *data,
uint32_t *size)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
int ret = 0;
if (amdgpu_ras_intr_triggered())
return 0;
if (!data || !size)
return -EINVAL;
switch (sensor) {
case AMDGPU_PP_SENSOR_MEM_LOAD:
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
(uint32_t *)data);
*size = 4;
break ;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
ret = smu_v13_0_6_get_smu_metrics_data(smu,
METRICS_CURR_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break ;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
case AMDGPU_PP_SENSOR_MEM_TEMP:
ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
(uint32_t *)data);
*size = 4;
break ;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(
smu, SMU_UCLK, (uint32_t *)data);
/* the output clock frequency in 10K unit */
*(uint32_t *)data *= 100;
*size = 4;
break ;
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(
smu, SMU_GFXCLK, (uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break ;
case AMDGPU_PP_SENSOR_VDDGFX:
ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
*size = 4;
break ;
case AMDGPU_PP_SENSOR_VDDBOARD:
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) {
*(uint32_t *)data = dpm_context->board_volt;
*size = 4;
break ;
} else {
ret = -EOPNOTSUPP;
break ;
}
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default :
ret = -EOPNOTSUPP;
break ;
}
return ret;
}
static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t power_limit = 0;
int ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
if (ret) {
dev_err(smu->adev->dev, "Couldn't get PPT limit" );
return -EINVAL;
}
if (current_power_limit)
*current_power_limit = power_limit;
if (default_power_limit)
*default_power_limit = power_limit;
if (max_power_limit) {
*max_power_limit = pptable->MaxSocketPowerLimit;
}
if (min_power_limit)
*min_power_limit = 0;
return 0;
}
static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
{
return smu_v13_0_set_power_limit(smu, limit_type, limit);
}
static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct smu_context *smu = adev->powerplay.pp_handle;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_13_0_power_context *power_context = smu_power->power_context;
uint32_t client_id = entry->client_id;
uint32_t ctxid = entry->src_data[0];
uint32_t src_id = entry->src_id;
uint32_t data;
if (client_id == SOC15_IH_CLIENTID_MP1) {
if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
/* ACK SMUToHost interrupt */
data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
/*
* ctxid is used to distinguish different events for SMCToHost
* interrupt.
*/
switch (ctxid) {
case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
/*
* Increment the throttle interrupt counter
*/
atomic64_inc(&smu->throttle_int_counter);
if (!atomic_read(&adev->throttling_logging_enabled))
return 0;
/* This uses the new method which fixes the
* incorrect throttling status reporting
* through metrics table. For older FWs,
* it will be ignored.
*/
if (__ratelimit(&adev->throttling_logging_rs)) {
atomic_set(
&power_context->throttle_status,
entry->src_data[1]);
schedule_work(&smu->throttling_logging_work);
}
break ;
default :
dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n" ,
ctxid, client_id);
break ;
}
}
}
return 0;
}
static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
uint32_t val = 0;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* For MP1 SW irqs */
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
break ;
case AMDGPU_IRQ_STATE_ENABLE:
/* For MP1 SW irqs */
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
break ;
default :
break ;
}
return 0;
}
static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = {
.set = smu_v13_0_6_set_irq_state,
.process = smu_v13_0_6_irq_process,
};
static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
if (amdgpu_sriov_vf(adev))
return 0;
irq_src->num_types = 1;
irq_src->funcs = &smu_v13_0_6_irq_funcs;
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
IH_INTERRUPT_ID_TO_DRIVER,
irq_src);
if (ret)
return ret;
return ret;
}
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload" );
/* Ignore return, just intimate FW that driver is not going to be there */
smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
return 0;
}
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK,
NULL);
}
static int smu_v13_0_6_system_features_control(struct smu_context *smu,
bool enable)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (amdgpu_sriov_vf(adev))
return 0;
if (enable) {
if (!(adev->flags & AMD_IS_APU))
ret = smu_v13_0_system_features_control(smu, enable);
} else {
/* Notify FW that the device is no longer driver managed */
smu_v13_0_6_notify_unload(smu);
}
return ret;
}
static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
uint32_t min,
uint32_t max)
{
int ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
max & 0xffff, NULL);
if (ret)
return ret;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
min & 0xffff, NULL);
return ret;
}
static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_13_0_dpm_table *gfx_table =
&dpm_context->dpm_tables.gfx_table;
struct smu_13_0_dpm_table *uclk_table =
&dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int ret;
/* Disable determinism if switching to another mode */
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
(level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
}
switch (level) {
case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
return 0;
case AMD_DPM_FORCED_LEVEL_AUTO:
if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) ||
(gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) {
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
smu, gfx_table->min, gfx_table->max);
if (ret)
return ret;
pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
}
if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
/* Min UCLK is not expected to be changed */
ret = smu_v13_0_set_soft_freq_limited_range(
smu, SMU_UCLK, 0, uclk_table->max, false );
if (ret)
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
}
smu_v13_0_reset_custom_level(smu);
return 0;
case AMD_DPM_FORCED_LEVEL_MANUAL:
return 0;
default :
break ;
}
return -EOPNOTSUPP;
}
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min, uint32_t max,
bool automatic)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
uint32_t min_clk;
uint32_t max_clk;
int ret = 0;
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK &&
clk_type != SMU_UCLK)
return -EINVAL;
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
(smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
return -EINVAL;
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
if (min >= max) {
dev_err(smu->adev->dev,
"Minimum clk should be less than the maximum allowed clock\n" );
return -EINVAL;
}
if (clk_type == SMU_GFXCLK) {
if ((min == pstate_table->gfxclk_pstate.curr.min) &&
(max == pstate_table->gfxclk_pstate.curr.max))
return 0;
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
smu, min, max);
if (!ret) {
pstate_table->gfxclk_pstate.curr.min = min;
pstate_table->gfxclk_pstate.curr.max = max;
}
}
if (clk_type == SMU_UCLK) {
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
/* For VF, only allowed in FW versions 85.102 or greater */
if (!smu_v13_0_6_cap_supported(smu,
SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
smu, SMU_UCLK, 0, max, false );
if (!ret)
pstate_table->uclk_pstate.curr.max = max;
}
return ret;
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
(max > dpm_context->dpm_tables.gfx_table.max)) {
dev_warn(
adev->dev,
"Invalid max frequency %d MHz specified for determinism\n" ,
max);
return -EINVAL;
}
/* Restore default min/max clocks and enable determinism */
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
max_clk);
if (!ret) {
usleep_range(500, 1000);
ret = smu_cmn_send_smc_msg_with_param(
smu, SMU_MSG_EnableDeterminism, max, NULL);
if (ret) {
dev_err(adev->dev,
"Failed to enable determinism at GFX clock %d MHz\n" ,
max);
} else {
pstate_table->gfxclk_pstate.curr.min = min_clk;
pstate_table->gfxclk_pstate.curr.max = max;
}
}
}
return ret;
}
static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
uint32_t min_clk;
uint32_t max_clk;
int ret = 0;
/* Only allowed in manual or determinism mode */
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
(smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
return -EINVAL;
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
if (size != 2) {
dev_err(smu->adev->dev,
"Input parameter number not correct\n" );
return -EINVAL;
}
if (input[0] == 0) {
if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
dev_warn(
smu->adev->dev,
"Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n" ,
input[1],
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=93 H=96 G=94
¤ Dauer der Verarbeitung: 0.57 Sekunden
¤
*© Formatika GbR, Deutschland