Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/gpu/drm/amd/pm/powerplay/hwmgr/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 188 kB image not shown  

Quelle  smu7_hwmgr.c   Sprache: C

 
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include "pp_debug.h"
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/div64.h>
#if IS_ENABLED(CONFIG_X86_64)
#include <asm/intel-family.h>
#endif
#include <drm/amdgpu_drm.h>
#include "ppatomctrl.h"
#include "atombios.h"
#include "pptable_v1_0.h"
#include "pppcielanes.h"
#include "amd_pcie_helpers.h"
#include "hardwaremanager.h"
#include "process_pptables_v1_0.h"
#include "cgs_common.h"

#include "smu7_common.h"

#include "hwmgr.h"
#include "smu7_hwmgr.h"
#include "smu_ucode_xfer_vi.h"
#include "smu7_powertune.h"
#include "smu7_dyn_defaults.h"
#include "smu7_thermal.h"
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
#include "smu7_baco.h"
#include "smu7_smumgr.h"
#include "polaris10_smumgr.h"

#include "ivsrcid/ivsrcid_vislands30.h"

#define MC_CG_ARB_FREQ_F0           0x0a
#define MC_CG_ARB_FREQ_F1           0x0b
#define MC_CG_ARB_FREQ_F2           0x0c
#define MC_CG_ARB_FREQ_F3           0x0d

#define MC_CG_SEQ_DRAMCONF_S0       0x05
#define MC_CG_SEQ_DRAMCONF_S1       0x06
#define MC_CG_SEQ_YCLK_SUSPEND      0x04
#define MC_CG_SEQ_YCLK_RESUME       0x0a

#define SMC_CG_IND_START            0xc0030000
#define SMC_CG_IND_END              0xc0040000

#define MEM_FREQ_LOW_LATENCY        25000
#define MEM_FREQ_HIGH_LATENCY       80000

#define MEM_LATENCY_HIGH            45
#define MEM_LATENCY_LOW             35
#define MEM_LATENCY_ERR             0xFFFF

#define MC_SEQ_MISC0_GDDR5_SHIFT 28
#define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
#define MC_SEQ_MISC0_GDDR5_VALUE 5

#define PCIE_BUS_CLK                10000
#define TCLK                        (PCIE_BUS_CLK / 10)

static struct profile_mode_setting smu7_profiling[7] = {
      {0, 0, 0, 0, 0, 0, 0, 0},
      {1, 0, 100, 30, 1, 0, 100, 10},
      {1, 10, 0, 30, 0, 0, 0, 0},
      {0, 0, 0, 0, 1, 10, 16, 31},
      {1, 0, 11, 50, 1, 0, 100, 10},
      {1, 0, 5, 30, 0, 0, 0, 0},
      {0, 0, 0, 0, 0, 0, 0, 0},
};

#define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)

#define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006

#define STRAP_EVV_REVISION_MSB  2211
#define STRAP_EVV_REVISION_LSB  2208

/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
enum DPM_EVENT_SRC {
 DPM_EVENT_SRC_ANALOG = 0,
 DPM_EVENT_SRC_EXTERNAL = 1,
 DPM_EVENT_SRC_DIGITAL = 2,
 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
};

#define ixDIDT_SQ_EDC_CTRL                         0x0013
#define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
#define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
#define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
#define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
#define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018

#define ixDIDT_TD_EDC_CTRL                         0x0053
#define ixDIDT_TD_EDC_THRESHOLD                    0x0054
#define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
#define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
#define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
#define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058

#define ixDIDT_TCP_EDC_CTRL                        0x0073
#define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
#define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
#define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
#define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
#define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078

#define ixDIDT_DB_EDC_CTRL                         0x0033
#define ixDIDT_DB_EDC_THRESHOLD                    0x0034
#define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
#define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
#define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
#define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038

uint32_t DIDTEDCConfig_P12[] = {
    ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
    ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
    ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
    ixDIDT_SQ_EDC_STALL_PATTERN_7,
    ixDIDT_SQ_EDC_THRESHOLD,
    ixDIDT_SQ_EDC_CTRL,
    ixDIDT_TD_EDC_STALL_PATTERN_1_2,
    ixDIDT_TD_EDC_STALL_PATTERN_3_4,
    ixDIDT_TD_EDC_STALL_PATTERN_5_6,
    ixDIDT_TD_EDC_STALL_PATTERN_7,
    ixDIDT_TD_EDC_THRESHOLD,
    ixDIDT_TD_EDC_CTRL,
    ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
    ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
    ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
    ixDIDT_TCP_EDC_STALL_PATTERN_7,
    ixDIDT_TCP_EDC_THRESHOLD,
    ixDIDT_TCP_EDC_CTRL,
    ixDIDT_DB_EDC_STALL_PATTERN_1_2,
    ixDIDT_DB_EDC_STALL_PATTERN_3_4,
    ixDIDT_DB_EDC_STALL_PATTERN_5_6,
    ixDIDT_DB_EDC_STALL_PATTERN_7,
    ixDIDT_DB_EDC_THRESHOLD,
    ixDIDT_DB_EDC_CTRL,
    0xFFFFFFFF // End of list
};

static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
  enum pp_clock_type type, uint32_t mask);
static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);

static struct smu7_power_state *cast_phw_smu7_power_state(
      struct pp_hw_power_state *hw_ps)
{
 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
    "Invalid Powerstate Type!",
     return NULL);

 return (struct smu7_power_state *)hw_ps;
}

static const struct smu7_power_state *cast_const_phw_smu7_power_state(
     const struct pp_hw_power_state *hw_ps)
{
 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
    "Invalid Powerstate Type!",
     return NULL);

 return (const struct smu7_power_state *)hw_ps;
}

/**
 * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always 0
 */

static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
{
 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);

 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);

 return 0;
}

static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
{
 uint32_t speedCntl = 0;

 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
   ixPCIE_LC_SPEED_CNTL);
 return((uint16_t)PHM_GET_FIELD(speedCntl,
   PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
}

static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
{
 uint32_t link_width;

 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
   PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);

 PP_ASSERT_WITH_CODE((7 >= link_width),
   "Invalid PCIe lane width!"return 0);

 return decode_pcie_lane_width(link_width);
}

/**
 * smu7_enable_smc_voltage_controller - Enable voltage control
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always PP_Result_OK
 */

static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
     hwmgr->chip_id <= CHIP_VEGAM) {
  PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
    CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
  PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
    CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
 }

 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
  smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);

 return 0;
}

/**
 * smu7_voltage_control - Checks if we want to support voltage control
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 */

static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
{
 const struct smu7_hwmgr *data =
   (const struct smu7_hwmgr *)(hwmgr->backend);

 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
}

/**
 * smu7_enable_voltage_control - Enable voltage control
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always 0
 */

static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
{
 /* enable voltage control */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);

 return 0;
}

static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
  struct phm_clock_voltage_dependency_table *voltage_dependency_table
  )
{
 uint32_t i;

 PP_ASSERT_WITH_CODE((NULL != voltage_table),
   "Voltage Dependency Table empty."return -EINVAL;);

 voltage_table->mask_low = 0;
 voltage_table->phase_delay = 0;
 voltage_table->count = voltage_dependency_table->count;

 for (i = 0; i < voltage_dependency_table->count; i++) {
  voltage_table->entries[i].value =
   voltage_dependency_table->entries[i].v;
  voltage_table->entries[i].smio_low = 0;
 }

 return 0;
}


/**
 * smu7_construct_voltage_tables - Create Voltage Tables.
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always 0
 */

static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)hwmgr->pptable;
 int result = 0;
 uint32_t tmp;

 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
  result = atomctrl_get_voltage_table_v3(hwmgr,
    VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
    &(data->mvdd_voltage_table));
  PP_ASSERT_WITH_CODE((0 == result),
    "Failed to retrieve MVDD table.",
    return result);
 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
  if (hwmgr->pp_table_version == PP_TABLE_V1)
   result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
     table_info->vdd_dep_on_mclk);
  else if (hwmgr->pp_table_version == PP_TABLE_V0)
   result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
     hwmgr->dyn_state.mvdd_dependency_on_mclk);

  PP_ASSERT_WITH_CODE((0 == result),
    "Failed to retrieve SVI2 MVDD table from dependency table.",
    return result;);
 }

 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
  result = atomctrl_get_voltage_table_v3(hwmgr,
    VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
    &(data->vddci_voltage_table));
  PP_ASSERT_WITH_CODE((0 == result),
    "Failed to retrieve VDDCI table.",
    return result);
 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
  if (hwmgr->pp_table_version == PP_TABLE_V1)
   result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
     table_info->vdd_dep_on_mclk);
  else if (hwmgr->pp_table_version == PP_TABLE_V0)
   result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
     hwmgr->dyn_state.vddci_dependency_on_mclk);
  PP_ASSERT_WITH_CODE((0 == result),
    "Failed to retrieve SVI2 VDDCI table from dependency table.",
    return result);
 }

 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
  /* VDDGFX has only SVI2 voltage control */
  result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
     table_info->vddgfx_lookup_table);
  PP_ASSERT_WITH_CODE((0 == result),
   "Failed to retrieve SVI2 VDDGFX table from lookup table."return result;);
 }


 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
  result = atomctrl_get_voltage_table_v3(hwmgr,
     VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
     &data->vddc_voltage_table);
  PP_ASSERT_WITH_CODE((0 == result),
   "Failed to retrieve VDDC table."return result;);
 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {

  if (hwmgr->pp_table_version == PP_TABLE_V0)
   result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
     hwmgr->dyn_state.vddc_dependency_on_mclk);
  else if (hwmgr->pp_table_version == PP_TABLE_V1)
   result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
    table_info->vddc_lookup_table);

  PP_ASSERT_WITH_CODE((0 == result),
   "Failed to retrieve SVI2 VDDC table from dependency table."return result;);
 }

 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
 PP_ASSERT_WITH_CODE(
   (data->vddc_voltage_table.count <= tmp),
  "Too many voltage values for VDDC. Trimming to fit state table.",
   phm_trim_voltage_table_to_fit_state_table(tmp,
      &(data->vddc_voltage_table)));

 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
 PP_ASSERT_WITH_CODE(
   (data->vddgfx_voltage_table.count <= tmp),
  "Too many voltage values for VDDC. Trimming to fit state table.",
   phm_trim_voltage_table_to_fit_state_table(tmp,
      &(data->vddgfx_voltage_table)));

 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
 PP_ASSERT_WITH_CODE(
   (data->vddci_voltage_table.count <= tmp),
  "Too many voltage values for VDDCI. Trimming to fit state table.",
   phm_trim_voltage_table_to_fit_state_table(tmp,
     &(data->vddci_voltage_table)));

 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
 PP_ASSERT_WITH_CODE(
   (data->mvdd_voltage_table.count <= tmp),
  "Too many voltage values for MVDD. Trimming to fit state table.",
   phm_trim_voltage_table_to_fit_state_table(tmp,
      &(data->mvdd_voltage_table)));

 return 0;
}

/**
 * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always 0
 */

static int smu7_program_static_screen_threshold_parameters(
       struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 /* Set static screen threshold unit */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
   data->static_screen_threshold_unit);
 /* Set static screen threshold */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
   data->static_screen_threshold);

 return 0;
}

/**
 * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching.
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always  0
 */

static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
{
 uint32_t display_gap =
   cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
     ixCG_DISPLAY_GAP_CNTL);

 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
   DISP_GAP, DISPLAY_GAP_IGNORE);

 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
   DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);

 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   ixCG_DISPLAY_GAP_CNTL, display_gap);

 return 0;
}

/**
 * smu7_program_voting_clients - Programs activity state transition voting clients
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always  0
 */

static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 int i;

 /* Clear reset for voting clients before enabling DPM */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);

 for (i = 0; i < 8; i++)
  cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
     ixCG_FREQ_TRAN_VOTING_0 + i * 4,
     data->voting_rights_clients[i]);
 return 0;
}

static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
 int i;

 /* Reset voting clients before disabling DPM */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);

 for (i = 0; i < 8; i++)
  cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
    ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);

 return 0;
}

/* Copy one arb setting to another and then switch the active set.
 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
 */

static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
  uint32_t arb_src, uint32_t arb_dest)
{
 uint32_t mc_arb_dram_timing;
 uint32_t mc_arb_dram_timing2;
 uint32_t burst_time;
 uint32_t mc_cg_config;

 switch (arb_src) {
 case MC_CG_ARB_FREQ_F0:
  mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
  mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
  burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
  break;
 case MC_CG_ARB_FREQ_F1:
  mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
  mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
  burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
  break;
 default:
  return -EINVAL;
 }

 switch (arb_dest) {
 case MC_CG_ARB_FREQ_F0:
  cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
  cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
  PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
  break;
 case MC_CG_ARB_FREQ_F1:
  cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
  cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
  PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
  break;
 default:
  return -EINVAL;
 }

 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
 mc_cg_config |= 0x0000000F;
 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);

 return 0;
}

static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
}

/**
 * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1
 *
 * @hwmgr:  the address of the powerplay hardware manager.
 * Return:   always 0
 * This function is to be called from the SetPowerState table.
 */

static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
{
 return smu7_copy_and_switch_arb_sets(hwmgr,
   MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}

static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
{
 uint32_t tmp;

 tmp = (cgs_read_ind_register(hwmgr->device,
   CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
   0x0000ff00) >> 8;

 if (tmp == MC_CG_ARB_FREQ_F0)
  return 0;

 return smu7_copy_and_switch_arb_sets(hwmgr,
   tmp, MC_CG_ARB_FREQ_F0);
}

static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
{
 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
 uint16_t pcie_gen = 0;

 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
     adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
  pcie_gen = 3;
 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
  adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
  pcie_gen = 2;
 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
  adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
  pcie_gen = 1;
 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
  adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
  pcie_gen = 0;

 return pcie_gen;
}

static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
{
 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
 uint16_t pcie_width = 0;

 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
  pcie_width = 16;
 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
  pcie_width = 12;
 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
  pcie_width = 8;
 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
  pcie_width = 4;
 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
  pcie_width = 2;
 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
  pcie_width = 1;

 return pcie_width;
}

static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
 struct phm_ppt_v1_pcie_table *pcie_table = NULL;

 uint32_t i, max_entry;
 uint32_t tmp;

 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
   data->use_pcie_power_saving_levels), "No pcie performance levels!",
   return -EINVAL);

 if (table_info != NULL)
  pcie_table = table_info->pcie_table;

 if (data->use_pcie_performance_levels &&
   !data->use_pcie_power_saving_levels) {
  data->pcie_gen_power_saving = data->pcie_gen_performance;
  data->pcie_lane_power_saving = data->pcie_lane_performance;
 } else if (!data->use_pcie_performance_levels &&
   data->use_pcie_power_saving_levels) {
  data->pcie_gen_performance = data->pcie_gen_power_saving;
  data->pcie_lane_performance = data->pcie_lane_power_saving;
 }
 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
     tmp,
     MAX_REGULAR_DPM_NUMBER);

 if (pcie_table != NULL) {
  /* max_entry is used to make sure we reserve one PCIE level
 * for boot level (fix for A+A PSPP issue).
 * If PCIE table from PPTable have ULV entry + 8 entries,
 * then ignore the last entry.*/

  max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
  for (i = 1; i < max_entry; i++) {
   phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
     get_pcie_gen_support(data->pcie_gen_cap,
       pcie_table->entries[i].gen_speed),
     get_pcie_lane_support(data->pcie_lane_cap,
       pcie_table->entries[i].lane_width));
  }
  data->dpm_table.pcie_speed_table.count = max_entry - 1;
  smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
 } else {
  /* Hardcode Pcie Table */
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Min_PCIEGen),
    get_pcie_lane_support(data->pcie_lane_cap,
      PP_Max_PCIELane));
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Min_PCIEGen),
    get_pcie_lane_support(data->pcie_lane_cap,
      PP_Max_PCIELane));
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Max_PCIEGen),
    get_pcie_lane_support(data->pcie_lane_cap,
      PP_Max_PCIELane));
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Max_PCIEGen),
    get_pcie_lane_support(data->pcie_lane_cap,
      PP_Max_PCIELane));
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Max_PCIEGen),
    get_pcie_lane_support(data->pcie_lane_cap,
      PP_Max_PCIELane));
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Max_PCIEGen),
    get_pcie_lane_support(data->pcie_lane_cap,
      PP_Max_PCIELane));

  data->dpm_table.pcie_speed_table.count = 6;
 }
 /* Populate last level for boot PCIE level, but do not increment count. */
 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
  for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
   phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
    get_pcie_gen_support(data->pcie_gen_cap,
      PP_Max_PCIEGen),
    data->vbios_boot_state.pcie_lane_bootup_value);
 } else {
  phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
   data->dpm_table.pcie_speed_table.count,
   get_pcie_gen_support(data->pcie_gen_cap,
     PP_Min_PCIEGen),
   get_pcie_lane_support(data->pcie_lane_cap,
     PP_Max_PCIELane));

  if (data->pcie_dpm_key_disabled)
   phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
    data->dpm_table.pcie_speed_table.count,
    smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
 }
 return 0;
}

static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));

 phm_reset_single_dpm_table(
   &data->dpm_table.sclk_table,
    smum_get_mac_definition(hwmgr,
     SMU_MAX_LEVELS_GRAPHICS),
     MAX_REGULAR_DPM_NUMBER);
 phm_reset_single_dpm_table(
   &data->dpm_table.mclk_table,
   smum_get_mac_definition(hwmgr,
    SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);

 phm_reset_single_dpm_table(
   &data->dpm_table.vddc_table,
    smum_get_mac_definition(hwmgr,
     SMU_MAX_LEVELS_VDDC),
     MAX_REGULAR_DPM_NUMBER);
 phm_reset_single_dpm_table(
   &data->dpm_table.vddci_table,
   smum_get_mac_definition(hwmgr,
    SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);

 phm_reset_single_dpm_table(
   &data->dpm_table.mvdd_table,
    smum_get_mac_definition(hwmgr,
     SMU_MAX_LEVELS_MVDD),
     MAX_REGULAR_DPM_NUMBER);
 return 0;
}
/*
 * This function is to initialize all DPM state tables
 * for SMU7 based on the dependency table.
 * Dynamic state patching function will then trim these
 * state tables to the allowed range based
 * on the power policy or external client requests,
 * such as UVD request, etc.
 */


static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
  hwmgr->dyn_state.vddc_dependency_on_sclk;
 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
  hwmgr->dyn_state.vddc_dependency_on_mclk;
 struct phm_cac_leakage_table *std_voltage_table =
  hwmgr->dyn_state.cac_leakage_table;
 uint32_t i;

 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
  "SCLK dependency table is missing. This table is mandatory"return -EINVAL);
 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
  "SCLK dependency table has to have is missing. This table is mandatory"return -EINVAL);

 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
  "MCLK dependency table is missing. This table is mandatory"return -EINVAL);
 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
  "VMCLK dependency table has to have is missing. This table is mandatory"return -EINVAL);


 /* Initialize Sclk DPM table based on allow Sclk values*/
 data->dpm_table.sclk_table.count = 0;

 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
  if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
    allowed_vdd_sclk_table->entries[i].clk) {
   data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
    allowed_vdd_sclk_table->entries[i].clk;
   data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
   data->dpm_table.sclk_table.count++;
  }
 }

 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
  "MCLK dependency table is missing. This table is mandatory"return -EINVAL);
 /* Initialize Mclk DPM table based on allow Mclk values */
 data->dpm_table.mclk_table.count = 0;
 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
  if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
   allowed_vdd_mclk_table->entries[i].clk) {
   data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
    allowed_vdd_mclk_table->entries[i].clk;
   data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
   data->dpm_table.mclk_table.count++;
  }
 }

 /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
  data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
  data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
  /* param1 is for corresponding std voltage */
  data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
 }

 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;

 if (NULL != allowed_vdd_mclk_table) {
  /* Initialize Vddci DPM table based on allow Mclk values */
  for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
   data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
   data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
  }
  data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
 }

 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;

 if (NULL != allowed_vdd_mclk_table) {
  /*
 * Initialize MVDD DPM table based on allow Mclk
 * values
 */

  for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
   data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
   data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
  }
  data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
 }

 return 0;
}

static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
 uint32_t i;

 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;

 if (table_info == NULL)
  return -EINVAL;

 dep_sclk_table = table_info->vdd_dep_on_sclk;
 dep_mclk_table = table_info->vdd_dep_on_mclk;

 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
   "SCLK dependency table is missing.",
   return -EINVAL);
 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
   "SCLK dependency table count is 0.",
   return -EINVAL);

 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
   "MCLK dependency table is missing.",
   return -EINVAL);
 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
   "MCLK dependency table count is 0",
   return -EINVAL);

 /* Initialize Sclk DPM table based on allow Sclk values */
 data->dpm_table.sclk_table.count = 0;
 for (i = 0; i < dep_sclk_table->count; i++) {
  if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
      dep_sclk_table->entries[i].clk) {

   data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
     dep_sclk_table->entries[i].clk;

   data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
     i == 0;
   data->dpm_table.sclk_table.count++;
  }
 }
 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
  hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
 /* Initialize Mclk DPM table based on allow Mclk values */
 data->dpm_table.mclk_table.count = 0;
 for (i = 0; i < dep_mclk_table->count; i++) {
  if (i == 0 || data->dpm_table.mclk_table.dpm_levels
    [data->dpm_table.mclk_table.count - 1].value !=
      dep_mclk_table->entries[i].clk) {
   data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
       dep_mclk_table->entries[i].clk;
   data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
       i == 0;
   data->dpm_table.mclk_table.count++;
  }
 }

 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
  hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
 return 0;
}

static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
 uint32_t i;

 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
 struct phm_odn_performance_level *entries;

 if (table_info == NULL)
  return -EINVAL;

 dep_sclk_table = table_info->vdd_dep_on_sclk;
 dep_mclk_table = table_info->vdd_dep_on_mclk;

 odn_table->odn_core_clock_dpm_levels.num_of_pl =
      data->golden_dpm_table.sclk_table.count;
 entries = odn_table->odn_core_clock_dpm_levels.entries;
 for (i = 0; i < data->golden_dpm_table.sclk_table.count; i++) {
  entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
  entries[i].enabled = true;
  entries[i].vddc = dep_sclk_table->entries[i].vddc;
 }

 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
  (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));

 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
      data->golden_dpm_table.mclk_table.count;
 entries = odn_table->odn_memory_clock_dpm_levels.entries;
 for (i = 0; i < data->golden_dpm_table.mclk_table.count; i++) {
  entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
  entries[i].enabled = true;
  entries[i].vddc = dep_mclk_table->entries[i].vddc;
 }

 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
  (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));

 return 0;
}

static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
 uint32_t min_vddc = 0;
 uint32_t max_vddc = 0;

 if (!table_info)
  return;

 dep_sclk_table = table_info->vdd_dep_on_sclk;

 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);

 if (min_vddc == 0 || min_vddc > 2000
  || min_vddc > dep_sclk_table->entries[0].vddc)
  min_vddc = dep_sclk_table->entries[0].vddc;

 if (max_vddc == 0 || max_vddc > 2000
  || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
  max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;

 data->odn_dpm_table.min_vddc = min_vddc;
 data->odn_dpm_table.max_vddc = max_vddc;
}

static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
 uint32_t i;

 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;

 if (table_info == NULL)
  return;

 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
  if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
     data->dpm_table.sclk_table.dpm_levels[i].value) {
   data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
   break;
  }
 }

 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
  if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
     data->dpm_table.mclk_table.dpm_levels[i].value) {
   data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
   break;
  }
 }

 dep_table = table_info->vdd_dep_on_mclk;
 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);

 for (i = 0; i < dep_table->count; i++) {
  if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
   data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
   return;
  }
 }

 dep_table = table_info->vdd_dep_on_sclk;
 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
 for (i = 0; i < dep_table->count; i++) {
  if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
   data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
   return;
  }
 }
 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
  data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
  data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
 }
}

static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 smu7_reset_dpm_tables(hwmgr);

 if (hwmgr->pp_table_version == PP_TABLE_V1)
  smu7_setup_dpm_tables_v1(hwmgr);
 else if (hwmgr->pp_table_version == PP_TABLE_V0)
  smu7_setup_dpm_tables_v0(hwmgr);

 smu7_setup_default_pcie_table(hwmgr);

 /* save a copy of the default DPM table */
 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
   sizeof(struct smu7_dpm_table));

 /* initialize ODN table */
 if (hwmgr->od_enabled) {
  if (data->odn_dpm_table.max_vddc) {
   smu7_check_dpm_table_updated(hwmgr);
  } else {
   smu7_setup_voltage_range_from_vbios(hwmgr);
   smu7_odn_initial_default_setting(hwmgr);
  }
 }
 return 0;
}

static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
{

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_RegulatorHot))
  return smum_send_msg_to_smc(hwmgr,
    PPSMC_MSG_EnableVRHotGPIOInterrupt,
    NULL);

 return 0;
}

static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
{
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
   SCLK_PWRMGT_OFF, 0);
 return 0;
}

static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 if (data->ulv_supported)
  return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);

 return 0;
}

static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 if (data->ulv_supported)
  return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);

 return 0;
}

static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_SclkDeepSleep)) {
  if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
   PP_ASSERT_WITH_CODE(false,
     "Attempt to enable Master Deep Sleep switch failed!",
     return -EINVAL);
 } else {
  if (smum_send_msg_to_smc(hwmgr,
    PPSMC_MSG_MASTER_DeepSleep_OFF,
    NULL)) {
   PP_ASSERT_WITH_CODE(false,
     "Attempt to disable Master Deep Sleep switch failed!",
     return -EINVAL);
  }
 }

 return 0;
}

static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_SclkDeepSleep)) {
  if (smum_send_msg_to_smc(hwmgr,
    PPSMC_MSG_MASTER_DeepSleep_OFF,
    NULL)) {
   PP_ASSERT_WITH_CODE(false,
     "Attempt to disable Master Deep Sleep switch failed!",
     return -EINVAL);
  }
 }

 return 0;
}

static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 uint32_t soft_register_value = 0;
 uint32_t handshake_disables_offset = data->soft_regs_start
    + smum_get_offsetof(hwmgr,
     SMU_SoftRegisters, HandshakeDisables);

 soft_register_value = cgs_read_ind_register(hwmgr->device,
    CGS_IND_REG__SMC, handshake_disables_offset);
 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   handshake_disables_offset, soft_register_value);
 return 0;
}

static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 uint32_t soft_register_value = 0;
 uint32_t handshake_disables_offset = data->soft_regs_start
    + smum_get_offsetof(hwmgr,
     SMU_SoftRegisters, HandshakeDisables);

 soft_register_value = cgs_read_ind_register(hwmgr->device,
    CGS_IND_REG__SMC, handshake_disables_offset);
 soft_register_value |= smum_get_mac_definition(hwmgr,
     SMU_UVD_MCLK_HANDSHAKE_DISABLE);
 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   handshake_disables_offset, soft_register_value);
 return 0;
}

static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 /* enable SCLK dpm */
 if (!data->sclk_dpm_key_disabled) {
  if (hwmgr->chip_id >= CHIP_POLARIS10 &&
      hwmgr->chip_id <= CHIP_VEGAM)
   smu7_disable_sclk_vce_handshake(hwmgr);

  PP_ASSERT_WITH_CODE(
  (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
  "Failed to enable SCLK DPM during DPM Start Function!",
  return -EINVAL);
 }

 /* enable MCLK dpm */
 if (0 == data->mclk_dpm_key_disabled) {
  if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
   smu7_disable_handshake_uvd(hwmgr);

  PP_ASSERT_WITH_CODE(
    (0 == smum_send_msg_to_smc(hwmgr,
      PPSMC_MSG_MCLKDPM_Enable,
      NULL)),
    "Failed to enable MCLK DPM during DPM Start Function!",
    return -EINVAL);

  if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
      (hwmgr->chip_id == CHIP_POLARIS10) ||
      (hwmgr->chip_id == CHIP_POLARIS11) ||
      (hwmgr->chip_id == CHIP_POLARIS12) ||
      (hwmgr->chip_id == CHIP_TONGA) ||
      (hwmgr->chip_id == CHIP_TOPAZ))
   PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);


  if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
   udelay(10);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
  } else {
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
   udelay(10);
   if (hwmgr->chip_id == CHIP_VEGAM) {
    cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
    cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
   } else {
    cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
    cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
   }
   cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
  }
 }

 return 0;
}

static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 /*enable general power management */

 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
   GLOBAL_PWRMGT_EN, 1);

 /* enable sclk deep sleep */

 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
   DYNAMIC_PM_EN, 1);

 /* prepare for PCIE DPM */

 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
   data->soft_regs_start +
   smum_get_offsetof(hwmgr, SMU_SoftRegisters,
      VoltageChangeTimeout), 0x1000);
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
   SWRST_COMMAND_1, RESETLC, 0x0);

 if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
  cgs_write_register(hwmgr->device, 0x1488,
   (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));

 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
  pr_err("Failed to enable Sclk DPM and Mclk DPM!");
  return -EINVAL;
 }

 /* enable PCIE dpm */
 if (0 == data->pcie_dpm_key_disabled) {
  PP_ASSERT_WITH_CODE(
    (0 == smum_send_msg_to_smc(hwmgr,
      PPSMC_MSG_PCIeDPM_Enable,
      NULL)),
    "Failed to enable pcie DPM during DPM Start Function!",
    return -EINVAL);
 } else {
  PP_ASSERT_WITH_CODE(
    (0 == smum_send_msg_to_smc(hwmgr,
      PPSMC_MSG_PCIeDPM_Disable,
      NULL)),
    "Failed to disable pcie DPM during DPM Start Function!",
    return -EINVAL);
 }

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
    PHM_PlatformCaps_Falcon_QuickTransition)) {
  PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
    PPSMC_MSG_EnableACDCGPIOInterrupt,
    NULL)),
    "Failed to enable AC DC GPIO Interrupt!",
    );
 }

 return 0;
}

static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 /* disable SCLK dpm */
 if (!data->sclk_dpm_key_disabled) {
  PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
    "Trying to disable SCLK DPM when DPM is disabled",
    return 0);
  smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
 }

 /* disable MCLK dpm */
 if (!data->mclk_dpm_key_disabled) {
  PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
    "Trying to disable MCLK DPM when DPM is disabled",
    return 0);
  smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
 }

 return 0;
}

static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 /* disable general power management */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
   GLOBAL_PWRMGT_EN, 0);
 /* disable sclk deep sleep */
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
   DYNAMIC_PM_EN, 0);

 /* disable PCIE dpm */
 if (!data->pcie_dpm_key_disabled) {
  PP_ASSERT_WITH_CODE(
    (smum_send_msg_to_smc(hwmgr,
      PPSMC_MSG_PCIeDPM_Disable,
      NULL) == 0),
    "Failed to disable pcie DPM during DPM Stop Function!",
    return -EINVAL);
 }

 smu7_disable_sclk_mclk_dpm(hwmgr);

 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
   "Trying to disable voltage DPM when DPM is disabled",
   return 0);

 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);

 return 0;
}

static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
{
 bool protection;
 enum DPM_EVENT_SRC src;

 switch (sources) {
 default:
  pr_err("Unknown throttling event sources.");
  fallthrough;
 case 0:
  protection = false;
  /* src is unused */
  break;
 case (1 << PHM_AutoThrottleSource_Thermal):
  protection = true;
  src = DPM_EVENT_SRC_DIGITAL;
  break;
 case (1 << PHM_AutoThrottleSource_External):
  protection = true;
  src = DPM_EVENT_SRC_EXTERNAL;
  break;
 case (1 << PHM_AutoThrottleSource_External) |
   (1 << PHM_AutoThrottleSource_Thermal):
  protection = true;
  src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
  break;
 }
 /* Order matters - don't enable thermal protection for the wrong source. */
 if (protection) {
  PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
    DPM_EVENT_SRC, src);
  PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
    THERMAL_PROTECTION_DIS,
    !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
      PHM_PlatformCaps_ThermalController));
 } else
  PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
    THERMAL_PROTECTION_DIS, 1);
}

static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
  PHM_AutoThrottleSource source)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 if (!(data->active_auto_throttle_sources & (1 << source))) {
  data->active_auto_throttle_sources |= 1 << source;
  smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
 }
 return 0;
}

static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
{
 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}

static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
  PHM_AutoThrottleSource source)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 if (data->active_auto_throttle_sources & (1 << source)) {
  data->active_auto_throttle_sources &= ~(1 << source);
  smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
 }
 return 0;
}

static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
{
 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}

static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 data->pcie_performance_request = true;

 return 0;
}

static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
        uint32_t *cac_config_regs,
        AtomCtrl_EDCLeakgeTable *edc_leakage_table)
{
 uint32_t data, i = 0;

 while (cac_config_regs[i] != 0xFFFFFFFF) {
  data = edc_leakage_table->DIDT_REG[i];
  cgs_write_ind_register(hwmgr->device,
           CGS_IND_REG__DIDT,
           cac_config_regs[i],
           data);
  i++;
 }

 return 0;
}

static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 int ret = 0;

 if (!data->disable_edc_leakage_controller &&
     data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
     data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
  ret = smu7_program_edc_didt_registers(hwmgr,
            DIDTEDCConfig_P12,
            &data->edc_leakage_table);
  if (ret)
   return ret;

  ret = smum_send_msg_to_smc(hwmgr,
        (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
        NULL);
 } else {
  ret = smum_send_msg_to_smc(hwmgr,
        (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
        NULL);
 }

 return ret;
}

static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
 int32_t tmp_sclk, count, percentage;

 if (golden_dpm_table->mclk_table.count == 1) {
  percentage = 70;
  hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
 } else {
  percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
    golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
  hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
 }

 tmp_sclk = hwmgr->pstate_mclk * percentage / 100;

 if (hwmgr->pp_table_version == PP_TABLE_V0) {
  struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
   hwmgr->dyn_state.vddc_dependency_on_sclk;

  for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
   if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
    hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
    break;
   }
  }
  if (count < 0)
   hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;

  hwmgr->pstate_sclk_peak =
   vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
  struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
  struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
   table_info->vdd_dep_on_sclk;

  for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
   if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
    hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
    break;
   }
  }
  if (count < 0)
   hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;

  hwmgr->pstate_sclk_peak =
   vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
 }

 hwmgr->pstate_mclk_peak =
  golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;

 /* make sure the output is in Mhz */
 hwmgr->pstate_sclk /= 100;
 hwmgr->pstate_mclk /= 100;
 hwmgr->pstate_sclk_peak /= 100;
 hwmgr->pstate_mclk_peak /= 100;
}

static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
 int tmp_result = 0;
 int result = 0;

 if (smu7_voltage_control(hwmgr)) {
  tmp_result = smu7_enable_voltage_control(hwmgr);
  PP_ASSERT_WITH_CODE(tmp_result == 0,
    "Failed to enable voltage control!",
    result = tmp_result);

  tmp_result = smu7_construct_voltage_tables(hwmgr);
  PP_ASSERT_WITH_CODE((0 == tmp_result),
    "Failed to construct voltage tables!",
    result = tmp_result);
 }
 smum_initialize_mc_reg_table(hwmgr);

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_EngineSpreadSpectrumSupport))
  PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_ThermalController))
  PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);

 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to program static screen threshold parameters!",
   result = tmp_result);

 tmp_result = smu7_enable_display_gap(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable display gap!", result = tmp_result);

 tmp_result = smu7_program_voting_clients(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to program voting clients!", result = tmp_result);

 tmp_result = smum_process_firmware_header(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to process firmware header!", result = tmp_result);

 if (hwmgr->chip_id != CHIP_VEGAM) {
  tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
  PP_ASSERT_WITH_CODE((0 == tmp_result),
    "Failed to initialize switch from ArbF0 to F1!",
    result = tmp_result);
 }

 result = smu7_setup_default_dpm_tables(hwmgr);
 PP_ASSERT_WITH_CODE(0 == result,
   "Failed to setup default DPM tables!"return result);

 tmp_result = smum_init_smc_table(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to initialize SMC table!", result = tmp_result);

 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable VR hot GPIO interrupt!", result = tmp_result);

 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
     hwmgr->chip_id <= CHIP_VEGAM) {
  tmp_result = smu7_notify_has_display(hwmgr);
  PP_ASSERT_WITH_CODE((0 == tmp_result),
    "Failed to enable display setting!", result = tmp_result);
 } else {
  smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
 }

 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
     hwmgr->chip_id <= CHIP_VEGAM) {
  tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
  PP_ASSERT_WITH_CODE((0 == tmp_result),
    "Failed to populate edc leakage registers!", result = tmp_result);
 }

 tmp_result = smu7_enable_sclk_control(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable SCLK control!", result = tmp_result);

 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable voltage control!", result = tmp_result);

 tmp_result = smu7_enable_ulv(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable ULV!", result = tmp_result);

 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable deep sleep master switch!", result = tmp_result);

 tmp_result = smu7_enable_didt_config(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to enable deep sleep master switch!", result = tmp_result);

 tmp_result = smu7_start_dpm(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to start DPM!", result = tmp_result);

 tmp_result = smu7_enable_smc_cac(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable SMC CAC!", result = tmp_result);

 tmp_result = smu7_enable_power_containment(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable power containment!", result = tmp_result);

 tmp_result = smu7_power_control_set_level(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to power control set level!", result = tmp_result);

 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "Failed to enable thermal auto throttle!", result = tmp_result);

 tmp_result = smu7_pcie_performance_request(hwmgr);
 PP_ASSERT_WITH_CODE((0 == tmp_result),
   "pcie performance request failed!", result = tmp_result);

 smu7_populate_umdpstate_clocks(hwmgr);

 return 0;
}

static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
 if (!hwmgr->avfs_supported)
  return 0;

 if (enable) {
  if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
    CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
   PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
     hwmgr, PPSMC_MSG_EnableAvfs, NULL),
     "Failed to enable AVFS!",
     return -EINVAL);
  }
 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
   CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
  PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
    hwmgr, PPSMC_MSG_DisableAvfs, NULL),
    "Failed to disable AVFS!",
    return -EINVAL);
 }

 return 0;
}

static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);

 if (!hwmgr->avfs_supported)
  return 0;

 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
  smu7_avfs_control(hwmgr, false);
 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
  smu7_avfs_control(hwmgr, false);
  smu7_avfs_control(hwmgr, true);
 } else {
  smu7_avfs_control(hwmgr, true);
 }

 return 0;
}

static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
 int tmp_result, result = 0;

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_ThermalController))
  PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
    GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);

 tmp_result = smu7_disable_power_containment(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable power containment!", result = tmp_result);

 tmp_result = smu7_disable_smc_cac(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable SMC CAC!", result = tmp_result);

 tmp_result = smu7_disable_didt_config(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable DIDT!", result = tmp_result);

 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
   GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);

 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable thermal auto throttle!", result = tmp_result);

 tmp_result = smu7_avfs_control(hwmgr, false);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable AVFS!", result = tmp_result);

 tmp_result = smu7_stop_dpm(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to stop DPM!", result = tmp_result);

 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable deep sleep master switch!", result = tmp_result);

 tmp_result = smu7_disable_ulv(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to disable ULV!", result = tmp_result);

 tmp_result = smu7_clear_voting_clients(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to clear voting clients!", result = tmp_result);

 tmp_result = smu7_reset_to_default(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to reset to default!", result = tmp_result);

 tmp_result = smum_stop_smc(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to stop smc!", result = tmp_result);

 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
 PP_ASSERT_WITH_CODE((tmp_result == 0),
   "Failed to force to switch arbf0!", result = tmp_result);

 return result;
}

static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 struct phm_ppt_v1_information *table_info =
   (struct phm_ppt_v1_information *)(hwmgr->pptable);
 struct amdgpu_device *adev = hwmgr->adev;
 uint8_t tmp1, tmp2;
 uint16_t tmp3 = 0;

 data->dll_default_on = false;
 data->mclk_dpm0_activity_target = 0xa;
 data->vddc_vddgfx_delta = 300;
 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
 data->voting_rights_clients[1] = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
 data->voting_rights_clients[3] = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
 data->voting_rights_clients[4] = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
 data->voting_rights_clients[5] = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
 data->voting_rights_clients[6] = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
 data->voting_rights_clients[7] = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;

 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
 data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
 /* need to set voltage control types before EVV patching */
 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
 data->enable_tdc_limit_feature = true;
 data->enable_pkg_pwr_tracking_feature = true;
 data->force_pcie_gen = PP_PCIEGenInvalid;
 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
 data->current_profile_setting.bupdate_sclk = 1;
 data->current_profile_setting.sclk_up_hyst = 0;
 data->current_profile_setting.sclk_down_hyst = 100;
 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
 data->current_profile_setting.bupdate_mclk = 1;
 if (hwmgr->chip_id >= CHIP_POLARIS10) {
  if (adev->gmc.vram_width == 256) {
   data->current_profile_setting.mclk_up_hyst = 10;
   data->current_profile_setting.mclk_down_hyst = 60;
   data->current_profile_setting.mclk_activity = 25;
  } else if (adev->gmc.vram_width == 128) {
   data->current_profile_setting.mclk_up_hyst = 5;
   data->current_profile_setting.mclk_down_hyst = 16;
   data->current_profile_setting.mclk_activity = 20;
  } else if (adev->gmc.vram_width == 64) {
   data->current_profile_setting.mclk_up_hyst = 3;
   data->current_profile_setting.mclk_down_hyst = 16;
   data->current_profile_setting.mclk_activity = 20;
  }
 } else {
  data->current_profile_setting.mclk_up_hyst = 0;
  data->current_profile_setting.mclk_down_hyst = 100;
  data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
 }
 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;

 if (hwmgr->chip_id  == CHIP_HAWAII) {
  data->thermal_temp_setting.temperature_low = 94500;
  data->thermal_temp_setting.temperature_high = 95000;
  data->thermal_temp_setting.temperature_shutdown = 104000;
 } else {
  data->thermal_temp_setting.temperature_low = 99500;
  data->thermal_temp_setting.temperature_high = 100000;
  data->thermal_temp_setting.temperature_shutdown = 104000;
 }

 data->fast_watermark_threshold = 100;
 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
  data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
  data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_ControlVDDGFX)) {
  if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
   VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
   data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
  }
 }

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_EnableMVDDControl)) {
  if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
    VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
   data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
  else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
    VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
   data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
 }

 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
  phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_ControlVDDGFX);

 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
   PHM_PlatformCaps_ControlVDDCI)) {
  if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
    VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
   data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
  else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
    VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
   data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
 }

 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=98 H=93 G=95

¤ Dauer der Verarbeitung: 0.15 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.