/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/** * vcn_v5_0_0_early_init - set function pointers and load microcode * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Set ring and irq function pointers * Load microcode from filesystem
*/ staticint vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) /* re-use enc ring as unified ring */
adev->vcn.inst[i].num_enc_rings = 1;
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
}
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); if (!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
vcn_v5_0_0_alloc_ip_dump(adev);
r = amdgpu_vcn_sysfs_reset_mask_init(adev); if (r) return r;
return 0;
}
/** * vcn_v5_0_0_sw_fini - sw fini for VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * VCN suspend and free up sw allocation
*/ staticint vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatilestruct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i)) continue;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i); if (r) return r;
}
amdgpu_vcn_sysfs_reset_mask_fini(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_fini(adev, i); if (r) return r;
}
kfree(adev->vcn.ip_dump);
return 0;
}
/** * vcn_v5_0_0_hw_init - start and test VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Initialize the hardware, boot up the VCPU and do some testing
*/ staticint vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; struct amdgpu_ring *ring; int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue;
r = amdgpu_ring_test_helper(ring); if (r) return r;
}
return 0;
}
/** * vcn_v5_0_0_hw_fini - stop the hardware block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Stop the VCN block, mark ring as not ready any more
*/ staticint vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
if (adev->vcn.harvest_config & (1 << i)) continue;
cancel_delayed_work_sync(&vinst->idle_work);
if (!amdgpu_sriov_vf(adev)) { if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(vinst->cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
}
}
return 0;
}
/** * vcn_v5_0_0_suspend - suspend VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * HW fini and suspend VCN block
*/ staticint vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int r, i;
r = vcn_v5_0_0_hw_fini(ip_block); if (r) return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(ip_block->adev, i); if (r) return r;
}
return r;
}
/** * vcn_v5_0_0_resume - resume VCN block * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Resume firmware and hw init VCN block
*/ staticint vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int r, i;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_resume(ip_block->adev, i); if (r) return r;
}
data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
data &= ~0x103; if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
UVD_POWER_STATUS__UVD_PG_EN_MASK;
/** * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating * * @vinst: VCN instance * * Enable static power gating for VCN block
*/ staticvoid vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{ struct amdgpu_device *adev = vinst->adev; int inst = vinst->inst;
uint32_t data;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
/* release VCPU reset to boot */
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (j = 0; j < 10; ++j) {
uint32_t status;
for (k = 0; k < 100; ++k) {
status = RREG32_SOC15(VCN, i, regUVD_STATUS); if (status & 2) break;
mdelay(10); if (amdgpu_emu_mode == 1)
msleep(1);
}
if (amdgpu_emu_mode == 1) {
r = -1; if (status & 2) {
r = 0; break;
}
} else {
r = 0; if (status & 2) break;
dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
r = -1;
}
}
if (r) {
dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); return r;
}
/* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, regUVD_STATUS);
/* Keeping one read-back to ensure all register writes are done, * otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
return;
}
/** * vcn_v5_0_0_stop - VCN stop * * @vinst: VCN instance * * Stop VCN block
*/ staticint vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{ struct amdgpu_device *adev = vinst->adev; int i = vinst->inst; volatilestruct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp; int r = 0;
if (adev->vcn.harvest_config & (1 << i)) return 0;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = vcn_v5_0_0_stop(vinst); if (r) return r;
r = vcn_v5_0_0_start(vinst); if (r) return r; return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
/** * vcn_v5_0_0_is_idle - check VCN block is idle * * @ip_block: Pointer to the amdgpu_ip_block structure * * Check whether VCN block is idle
*/ staticbool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue;
ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
}
return ret;
}
/** * vcn_v5_0_0_wait_for_idle - wait for VCN block idle * * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. * * Wait for VCN block idle
*/ staticint vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{ struct amdgpu_device *adev = ip_block->adev; int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue;
ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE); if (ret) return ret;
}
return ret;
}
/** * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state * * @ip_block: amdgpu_ip_block pointer * @state: clock gating state * * Set VCN block clockgating state
*/ staticint vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state)
{ struct amdgpu_device *adev = ip_block->adev; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
if (adev->vcn.harvest_config & (1 << i)) continue;
if (enable) { if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) return -EBUSY;
vcn_v5_0_0_enable_clock_gating(vinst);
} else {
vcn_v5_0_0_disable_clock_gating(vinst);
}
}
return 0;
}
staticint vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state)
{ int ret = 0;
if (state == vinst->cur_state) return 0;
if (state == AMD_PG_STATE_GATE)
ret = vcn_v5_0_0_stop(vinst); else
ret = vcn_v5_0_0_start(vinst);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue;
inst_off = i * reg_count; /* mmUVD_POWER_STATUS is always readable and is first element of the array */
adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
is_powered = (adev->vcn.ip_dump[inst_off] &
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
if (is_powered) for (j = 1; j < reg_count; j++)
adev->vcn.ip_dump[inst_off + j] =
RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.