/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
staticvoid vi_init_golden_registers(struct amdgpu_device *adev)
{ /* Some of the registers might be dependent on GRBM_GFX_INDEX */
mutex_lock(&adev->grbm_idx_mutex);
if (amdgpu_sriov_vf(adev)) {
xgpu_vi_init_golden_registers(adev);
mutex_unlock(&adev->grbm_idx_mutex); return;
}
switch (adev->asic_type) { case CHIP_TOPAZ:
amdgpu_device_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
ARRAY_SIZE(iceland_mgcg_cgcg_init)); break; case CHIP_FIJI:
amdgpu_device_program_register_sequence(adev,
fiji_mgcg_cgcg_init,
ARRAY_SIZE(fiji_mgcg_cgcg_init)); break; case CHIP_TONGA:
amdgpu_device_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
ARRAY_SIZE(tonga_mgcg_cgcg_init)); break; case CHIP_CARRIZO:
amdgpu_device_program_register_sequence(adev,
cz_mgcg_cgcg_init,
ARRAY_SIZE(cz_mgcg_cgcg_init)); break; case CHIP_STONEY:
amdgpu_device_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
ARRAY_SIZE(stoney_mgcg_cgcg_init)); break; case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_VEGAM: default: break;
}
mutex_unlock(&adev->grbm_idx_mutex);
}
/** * vi_get_xclk - get the xclk * * @adev: amdgpu_device pointer * * Returns the reference clock used by the gfx engine * (VI).
*/ static u32 vi_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
if (adev->flags & AMD_IS_APU) { switch (adev->asic_type) { case CHIP_STONEY: /* vbios says 48Mhz, but the actual freq is 100Mhz */ return 10000; default: return reference_clock;
}
}
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) return 1000;
if (bios == NULL) returnfalse; if (length_bytes == 0) returnfalse; /* APU vbios image is part of sbios image */ if (adev->flags & AMD_IS_APU) returnfalse;
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4; /* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags); /* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_11, 0); /* set index to data for continous read */
WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
switch (reg_offset) { case mmCC_RB_BACKEND_DISABLE: return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; case mmGC_USER_RB_BACKEND_DISABLE: return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; case mmPA_SC_RASTER_CONFIG: return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; case mmPA_SC_RASTER_CONFIG_1: return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
}
switch (reg_offset) { case mmGB_ADDR_CONFIG: return adev->gfx.config.gb_addr_config; case mmMC_ARB_RAMCFG: return adev->gfx.config.mc_arb_ramcfg; case mmGB_TILE_MODE0: case mmGB_TILE_MODE1: case mmGB_TILE_MODE2: case mmGB_TILE_MODE3: case mmGB_TILE_MODE4: case mmGB_TILE_MODE5: case mmGB_TILE_MODE6: case mmGB_TILE_MODE7: case mmGB_TILE_MODE8: case mmGB_TILE_MODE9: case mmGB_TILE_MODE10: case mmGB_TILE_MODE11: case mmGB_TILE_MODE12: case mmGB_TILE_MODE13: case mmGB_TILE_MODE14: case mmGB_TILE_MODE15: case mmGB_TILE_MODE16: case mmGB_TILE_MODE17: case mmGB_TILE_MODE18: case mmGB_TILE_MODE19: case mmGB_TILE_MODE20: case mmGB_TILE_MODE21: case mmGB_TILE_MODE22: case mmGB_TILE_MODE23: case mmGB_TILE_MODE24: case mmGB_TILE_MODE25: case mmGB_TILE_MODE26: case mmGB_TILE_MODE27: case mmGB_TILE_MODE28: case mmGB_TILE_MODE29: case mmGB_TILE_MODE30: case mmGB_TILE_MODE31:
idx = (reg_offset - mmGB_TILE_MODE0); return adev->gfx.config.tile_mode_array[idx]; case mmGB_MACROTILE_MODE0: case mmGB_MACROTILE_MODE1: case mmGB_MACROTILE_MODE2: case mmGB_MACROTILE_MODE3: case mmGB_MACROTILE_MODE4: case mmGB_MACROTILE_MODE5: case mmGB_MACROTILE_MODE6: case mmGB_MACROTILE_MODE7: case mmGB_MACROTILE_MODE8: case mmGB_MACROTILE_MODE9: case mmGB_MACROTILE_MODE10: case mmGB_MACROTILE_MODE11: case mmGB_MACROTILE_MODE12: case mmGB_MACROTILE_MODE13: case mmGB_MACROTILE_MODE14: case mmGB_MACROTILE_MODE15:
idx = (reg_offset - mmGB_MACROTILE_MODE0); return adev->gfx.config.macrotile_mode_array[idx]; default: return RREG32(reg_offset);
}
}
}
/* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { /* enable BM */
pci_set_master(adev->pdev);
adev->has_hw_reset = true;
r = 0; break;
}
udelay(1);
}
staticint vi_asic_supports_baco(struct amdgpu_device *adev)
{ switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_TOPAZ: return amdgpu_dpm_is_baco_supported(adev); default: return 0;
}
}
staticenum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{ int baco_reset;
if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO) return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_TOPAZ:
baco_reset = amdgpu_dpm_is_baco_supported(adev); break; default:
baco_reset = 0; break;
}
if (baco_reset) return AMD_RESET_METHOD_BACO; else return AMD_RESET_METHOD_LEGACY;
}
/** * vi_asic_reset - soft reset GPU * * @adev: amdgpu_device pointer * * Look up which blocks are hung and attempt * to reset them. * Returns 0 for success.
*/ staticint vi_asic_reset(struct amdgpu_device *adev)
{ int r;
/* APUs don't have full asic reset */ if (adev->flags & AMD_IS_APU) return 0;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
} else {
dev_info(adev->dev, "PCI CONFIG reset\n");
r = vi_asic_pci_config_reset(adev);
}
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL3, data);
orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_P_CNTL, data);
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL6, data);
orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
pci_read_config_dword(adev->pdev, LINK_CAP, &data); if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
bClkReqSupport = false;
if (bClkReqSupport) {
orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
(1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); if (orig != data)
WREG32_SMC(ixTHM_CLK_CNTL, data);
orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
(1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT); if (orig != data)
WREG32_SMC(ixMISC_CLK_CTRL, data);
orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK; if (orig != data)
WREG32_SMC(ixCG_CLKPIN_CNTL, data);
orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK; if (orig != data)
WREG32_SMC(ixCG_CLKPIN_CNTL, data);
orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT); if (orig != data)
WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
orig = data = RREG32_PCIE(ixCPM_CONTROL);
data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK); if (orig != data)
WREG32_PCIE(ixCPM_CONTROL, data);
orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT); if (orig != data)
WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
orig = data = RREG32(mmBIF_CLK_CTRL);
data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK; if (orig != data)
WREG32(mmBIF_CLK_CTRL, data);
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL7, data);
orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_HW_DEBUG, data);
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; if (bL1SS)
data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL2, data);
}
vi_enable_aspm(adev);
data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
data1 = RREG32_PCIE(ixPCIE_LC_STATUS1); if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
if ((adev->asic_type == CHIP_POLARIS12 &&
!(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK; if (orig != data)
WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
}
}
staticbool vi_need_full_reset(struct amdgpu_device *adev)
{ switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: /* CZ has hang issues with full reset at the moment */ returnfalse; case CHIP_FIJI: case CHIP_TONGA: /* XXX: soft reset should work on fiji and tonga */ returntrue; case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_TOPAZ: default: /* change this when we support soft reset */ returntrue;
}
}
/* This reports 0 on APUs, so return to avoid writing/reading registers * that may or may not be different from their GPU counterparts
*/ if (adev->flags & AMD_IS_APU) return;
/* Set the 2 events that we wish to watch, defined above */ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); /* Zero out and enable the perf counters * Write 0x5: * Bit 0 = Start all counters(1) * Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
msleep(1000);
/* Load the shadow and disable the perf counters * Write 0x2: * Bit 0 = Stop counters(0) * Bit 1 = Load the shadow counters(1)
*/
WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
/* Read register values to get any >32bit overflow */
tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
/* Get the values and add the overflow */
*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
/* move the golden regs per IP block */
vi_init_golden_registers(adev); /* enable aspm */
vi_program_aspm(adev); /* enable the doorbell aperture */
vi_enable_doorbell_aperture(adev, true);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; else
data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
if (temp != data)
WREG32(mmHDP_HOST_PATH_CNTL, data);
}
switch (adev->asic_type) { case CHIP_FIJI:
vi_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
vi_update_hdp_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE);
vi_update_rom_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE); break; case CHIP_CARRIZO: case CHIP_STONEY:
vi_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
vi_update_hdp_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE);
vi_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE); break; case CHIP_TONGA: case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_VEGAM:
vi_common_set_clockgating_state_by_smu(adev, state); break; default: break;
} return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.