/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/ #include <linux/firmware.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pci.h>
*value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i]; if (!adev->reg_offset[en->hwip][en->inst]) continue; elseif (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset)) continue;
/** * soc15_program_register_sequence - program an array of registers. * * @adev: amdgpu_device pointer * @regs: pointer to the register array * @array_size: size of the register array * * Programs an array or registers with and and or masks. * This is a helper for setting golden registers.
*/
if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
connected_to_cpu = true;
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
amdgpu_reset_method == AMD_RESET_METHOD_PCI) { /* If connected to cpu, driver only support mode2 */ if (connected_to_cpu) return AMD_RESET_METHOD_MODE2; return amdgpu_reset_method;
}
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(10, 0, 0): case IP_VERSION(10, 0, 1): case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): return AMD_RESET_METHOD_MODE2; case IP_VERSION(9, 0, 0): case IP_VERSION(11, 0, 2): if (adev->asic_type == CHIP_VEGA20) { if (adev->psp.sos.fw_version >= 0x80067)
baco_reset = amdgpu_dpm_is_baco_supported(adev); /* * 1. PMFW version > 0x284300: all cases use baco * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
*/ if (ras && adev->ras_enabled &&
adev->pm.fw_version <= 0x283400)
baco_reset = 0;
} else {
baco_reset = amdgpu_dpm_is_baco_supported(adev);
} break; case IP_VERSION(13, 0, 2): /* * 1.connected to cpu: driver issue mode2 reset * 2.discret gpu: driver issue mode1 reset
*/ if (connected_to_cpu) return AMD_RESET_METHOD_MODE2; break; case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 14): case IP_VERSION(13, 0, 12): /* Use gpu_recovery param to target a reset method. * Enable triggering of GPU reset only if specified * by module parameter.
*/ if (adev->pcie_reset_ctx.in_link_reset) return AMD_RESET_METHOD_LINK; if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5) return AMD_RESET_METHOD_MODE2; elseif (!(adev->flags & AMD_IS_APU)) return AMD_RESET_METHOD_MODE1; else return AMD_RESET_METHOD_MODE2; default: break;
}
if (baco_reset) return AMD_RESET_METHOD_BACO; else return AMD_RESET_METHOD_MODE1;
}
staticbool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{ /* Will reset for the following suspend abort cases. * 1) S3 suspend aborted in the normal S3 suspend * 2) S3 suspend aborted in performing pm core test.
*/ if (adev->in_s3 && !pm_resume_via_firmware()) returntrue; else returnfalse;
}
staticint soc15_asic_reset(struct amdgpu_device *adev)
{ /* original raven doesn't have full asic reset */ /* On the latest Raven, the GPU reset can be performed * successfully. So now, temporarily enable it for the * S3 suspend abort case.
*/
staticvoid soc15_reg_base_init(struct amdgpu_device *adev)
{ /* Set IP register base before any HW register access */ switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: case CHIP_RENOIR:
vega10_reg_base_init(adev); break; case CHIP_VEGA20:
vega20_reg_base_init(adev); break; case CHIP_ARCTURUS:
arct_reg_base_init(adev); break; case CHIP_ALDEBARAN:
aldebaran_reg_base_init(adev); break; default:
DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type); break;
}
}
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops; /* init soc15 reg base early enough so we can * request request full access for sriov before
* set_ip_blocks. */
soc15_reg_base_init(adev);
}
staticbool soc15_need_full_reset(struct amdgpu_device *adev)
{ /* change this when we implement soft reset */ returntrue;
}
/* This reports 0 on APUs, so return to avoid writing/reading registers * that may or may not be different from their GPU counterparts
*/ if (adev->flags & AMD_IS_APU) return;
/* Set the 2 events that we wish to watch, defined above */ /* Reg 40 is # received msgs */ /* Reg 104 is # of posted requests sent */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); /* Zero out and enable the perf counters * Write 0x5: * Bit 0 = Start all counters(1) * Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
msleep(1000);
/* Load the shadow and disable the perf counters * Write 0x2: * Bit 0 = Stop counters(0) * Bit 1 = Load the shadow counters(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
/* Read register values to get any >32bit overflow */
tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
/* Get the values and add the overflow */
*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
/* This reports 0 on APUs, so return to avoid writing/reading registers * that may or may not be different from their GPU counterparts
*/ if (adev->flags & AMD_IS_APU) return;
/* Set the 2 events that we wish to watch, defined above */ /* Reg 40 is # received msgs */ /* Reg 108 is # of posted requests sent on VG20 */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
EVENT0_SEL, 40);
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
EVENT1_SEL, 108);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); /* Zero out and enable the perf counters * Write 0x5: * Bit 0 = Start all counters(1) * Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
msleep(1000);
/* Load the shadow and disable the perf counters * Write 0x2: * Bit 0 = Stop counters(0) * Bit 1 = Load the shadow counters(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
/* Read register values to get any >32bit overflow */
tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
/* Get the values and add the overflow */
*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
}
/* CP hangs in IGT reloading test on RN, reset to WA */ if (adev->asic_type == CHIP_RENOIR) returntrue;
if (amdgpu_gmc_need_reset_on_init(adev)) returntrue; if (amdgpu_psp_tos_reload_needed(adev)) returntrue; /* Just return false for soc15 GPUs. Reset does not seem to * be necessary.
*/ if (!amdgpu_passthrough(adev)) returnfalse;
if (adev->flags & AMD_IS_APU) returnfalse;
/* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); if (sol_reg) returntrue;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
/* Enable selfring doorbell aperture late because doorbell BAR * aperture will change if resize BAR successfully in gmc sw_init.
*/
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
if (adev->df.funcs &&
adev->df.funcs->sw_fini)
adev->df.funcs->sw_fini(adev); return 0;
}
staticvoid soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
{ int i;
/* sdma doorbell range is programed by hypervisor */ if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) {
adev->nbio.funcs->sdma_doorbell_range(adev, i, true, adev->doorbell_index.sdma_engine[i] << 1,
adev->doorbell_index.sdma_doorbell_range);
}
}
}
/* enable aspm */
soc15_program_aspm(adev); /* setup nbio registers */
adev->nbio.funcs->init_registers(adev); /* remap HDP registers to a hole in mmio space, * for the purpose of expose those registers * to process space
*/ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
/* HW doorbell routing policy: doorbell writing not * in SDMA/IH/MM/ACV range will be routed to CP. So * we need to init SDMA doorbell range prior * to CP ip block init and ring test. IH already * happens before CP.
*/
soc15_sdma_doorbell_range_init(adev);
/* Disable the doorbell aperture and selfring doorbell aperture * separately in hw_fini because soc15_enable_doorbell_aperture * has been removed and there is no need to delay disabling * selfring doorbell.
*/
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
/* * For minimal init, late_init is not called, hence RAS irqs are not * enabled.
*/ if ((!amdgpu_sriov_vf(adev)) &&
(adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { if (adev->nbio.ras &&
adev->nbio.ras->init_ras_controller_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); if (adev->nbio.ras &&
adev->nbio.ras->init_ras_err_event_athub_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
}
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { case IP_VERSION(6, 1, 0): case IP_VERSION(6, 2, 0): case IP_VERSION(7, 4, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->smuio.funcs->update_rom_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->df.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE); break; case IP_VERSION(7, 0, 0): case IP_VERSION(7, 0, 1): case IP_VERSION(2, 5, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE); break; case IP_VERSION(7, 4, 1): case IP_VERSION(7, 4, 4):
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE); break; default: break;
} return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.