/* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Sonny Jiang <sonny.jiang@amd.com>
*/
for (i = 0; i < 10; ++i) {
uint32_t status; for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_STATUS); if (status & 2) break;
mdelay(10);
}
r = 0; if (status & 2) break;
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10);
r = -1;
}
if (r) {
DRM_ERROR("UVD not responding, giving up!!!\n"); return r;
}
for (i = 0; i < 10; ++i) { for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_STATUS); if (status & 2) break;
mdelay(1);
} if (status & 2) break;
}
for (i = 0; i < 10; ++i) { for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_LMI_STATUS); if (status & 0xf) break;
mdelay(1);
} if (status & 0xf) break;
}
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
for (i = 0; i < 10; ++i) { for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_LMI_STATUS); if (status & 0x240) break;
mdelay(1);
} if (status & 0x240) break;
}
WREG32_P(0x3D49, 0, ~(1 << 2));
WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
/* put LMI, VCPU, RBC etc... into reset */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data |= 0x3fff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; if (orig != data)
WREG32(mmUVD_CGC_CTRL, data);
} else {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data &= ~0x3fff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; if (orig != data)
WREG32(mmUVD_CGC_CTRL, data);
}
}
/** * uvd_v3_1_hw_init - start and test UVD block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing. * * On SI, the UVD is meant to be used in a specific power state, * or alternatively the driver can manually enable its clock. * In amdgpu we use the dedicated UVD power state when DPM is enabled. * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state * for the SMU and afterwards enables the UVD clock. * This is automatically done by amdgpu_uvd_ring_begin_use when work * is submitted to the UVD ring. Here, we have to call it manually * in order to power up UVD before firmware validation. * * Note that we must not disable the UVD clock here, as that would * cause the ring test to fail. However, UVD is powered off * automatically after the ring test: amdgpu_uvd_ring_end_use calls * the UVD idle work handler which will disable the UVD clock when * all fences are signalled.
*/ staticint uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; int r;
/* Make sure UVD is powered during FW validation. * It's going to be automatically powered off after the ring test.
*/ if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true); else
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev); if (r) {
DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r); return r;
}
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring); if (r) {
DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r); goto done;
}
r = amdgpu_ring_alloc(ring, 10); if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); goto done;
}
done: if (!r)
DRM_INFO("UVD initialized successfully.\n");
return r;
}
/** * uvd_v3_1_hw_fini - stop the hardware block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the UVD block, mark ring as not ready any more
*/ staticint uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v3_1_stop(adev);
/* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work * - enable powergating * - enable clockgating * - disable dpm * * TODO: to align with the VCN implementation, move the * jobs for clockgating/powergating/dpm setting to * ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->uvd.idle_work);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.