/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "vega10_enum.h"
#include "v9_structs.h"
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
#include "gc/gc_9_4_3_offset.h"
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
#include "gfx_v9_4_3_cleaner_shader.h"
#include "amdgpu_xcp.h"
#include "amdgpu_aca.h"
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin" );
MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin" );
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
#define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */
#define NORMALIZE_XCC_REG_OFFSET(offset) \
(offset & 0xFFFF)
static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
/* SE status registers */
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
};
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
/* compute queue registers */
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras;
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
struct amdgpu_device *adev = kiq_ring->adev;
u64 shader_mc_addr;
/* Cleaner shader MC address */
shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
/* vmid_mask:0* queue_type:0 (KIQ) */
PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
amdgpu_ring_write(kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = kiq_ring->adev;
uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
/*queue_type: normal compute queue */
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
/* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
/* num_queues: must be 1 */
PACKET3_MAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
u64 gpu_addr, u64 seq)
{
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(action) |
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring,
PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
if (action == PREEMPT_QUEUES_NO_UNMAP) {
amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(kiq_ring, seq);
} else {
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
}
static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
struct amdgpu_ring *ring,
u64 addr,
u64 seq)
{
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
amdgpu_ring_write(kiq_ring,
PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
PACKET3_QUERY_STATUS_COMMAND(2));
/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
amdgpu_ring_write(kiq_ring,
PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
}
static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub)
{
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
amdgpu_ring_write(kiq_ring,
PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
PACKET3_INVALIDATE_TLBS_PASID(pasid) |
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
uint32_t xcc_id, uint32_t vmid)
{
struct amdgpu_device *adev = kiq_ring->adev;
unsigned i;
/* enter save mode */
amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
/* wait till dequeue take effects */
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
break ;
udelay(1);
}
if (i >= adev->usec_timeout)
dev_err(adev->dev, "fail to wait on hqd deactive\n" );
} else {
dev_err(adev->dev, "reset queue_type(%d) not supported\n\n" , queue_type);
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* exit safe mode */
amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
}
static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
.kiq_query_status = gfx_v9_4_3_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
.kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
.invalidate_tlbs_size = 2,
};
static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++)
adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
}
static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
{
int i, num_xcc, dev_inst;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
dev_inst = GET_INST(GC, i);
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
}
}
static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
{
uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
/* If it is an XCC reg, normalize the reg to keep
lower 16 bits in local xcc */
if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
return normalized_reg;
else
return reg;
}
static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
bool wc, uint32_t reg, uint32_t val)
{
reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
WRITE_DATA_DST_SEL(0) |
(wc ? WR_CONFIRM : 0));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
int mem_space, int opt, uint32_t addr0,
uint32_t addr1, uint32_t ref, uint32_t mask,
uint32_t inv)
{
/* Only do the normalization on regspace */
if (mem_space == 0) {
addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring,
/* memory (1) or register (0) */
(WAIT_REG_MEM_MEM_SPACE(mem_space) |
WAIT_REG_MEM_OPERATION(opt) | /* wait */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(eng_sel)));
if (mem_space)
BUG_ON(addr0 & 0x3); /* Dword align */
amdgpu_ring_write(ring, addr0);
amdgpu_ring_write(ring, addr1);
amdgpu_ring_write(ring, ref);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, inv); /* poll interval */
}
static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
{
uint32_t scratch_reg0_offset, xcc_offset;
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned i;
int r;
/* Use register offset which is local to XCC in the packet */
xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
WREG32(scratch_reg0_offset, 0xCAFEDEAD);
tmp = RREG32(scratch_reg0_offset);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch_reg0_offset);
if (tmp == 0xDEADBEEF)
break ;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
uint32_t tmp;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof (ib));
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
ib.ptr[3] = upper_32_bits(gpu_addr);
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
r = dma_fence_wait_timeout(f, false , timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
goto err2;
}
tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err2:
amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
return r;
}
/* This value might differs per partition */
static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
{
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
amdgpu_ucode_release(&adev->gfx.rlc_fw);
amdgpu_ucode_release(&adev->gfx.mec_fw);
amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
uint16_t version_minor;
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin" , chip_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
if (err)
amdgpu_ucode_release(&adev->gfx.rlc_fw);
return err;
}
static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
int err;
if (amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sjt_mec.bin" , chip_name);
if (err)
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin" , chip_name);
} else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin" , chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
out:
if (err)
amdgpu_ucode_release(&adev->gfx.mec_fw);
return err;
}
static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
{
char ucode_prefix[15];
int r;
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof (ucode_prefix));
r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
return r;
}
static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
{
int r, i, num_xcc;
u32 *hpd;
const __le32 *fw_data;
unsigned fw_size;
u32 *fw;
size_t mec_hpd_size;
const struct gfx_firmware_header_v1_0 *mec_hdr;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++)
bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size =
adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n" , r);
gfx_v9_4_3_mec_fini(adev);
return r;
}
if (amdgpu_emu_mode == 1) {
for (i = 0; i < mec_hpd_size / 4; i++) {
memset((void *)(hpd + i), 0, 4);
if (i % 50 == 0)
msleep(1);
}
} else {
memset(hpd, 0, mec_hpd_size);
}
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
}
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_obj,
&adev->gfx.mec.mec_fw_gpu_addr,
(void **)&fw);
if (r) {
dev_warn(adev->dev, "(%d) create mec firmware bo failed\n" , r);
gfx_v9_4_3_mec_fini(adev);
return r;
}
memcpy(fw, fw_data, fw_size);
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
return 0;
}
static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
if (instance == 0xffffffff)
data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
INSTANCE_INDEX, instance);
if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
if (sh_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SH_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
}
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
{
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK));
return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
}
static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK) |
(SQ_IND_INDEX__AUTO_INCR_MASK));
while (num--)
*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
}
static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
uint32_t xcc_id, uint32_t simd, uint32_t wave,
uint32_t *dst, int *no_fields)
{
/* type 1 wave data */
dst[(*no_fields)++] = 1;
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
}
static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
wave_read_regs(adev, xcc_id, simd, wave, 0,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t start, uint32_t size,
uint32_t *dst)
{
wave_read_regs(adev, xcc_id, simd, wave, thread,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
}
static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
{
u32 xcp_ctl;
/* Value is expected to be the same on all, fetch from first instance */
xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
}
static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
int num_xccs_per_xcp)
{
int ret, i, num_xcc;
u32 tmp = 0;
if (adev->psp.funcs) {
ret = psp_spatial_partition(&adev->psp,
NUM_XCC(adev->gfx.xcc_mask) /
num_xccs_per_xcp);
if (ret)
return ret;
} else {
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
num_xccs_per_xcp);
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
i % num_xccs_per_xcp);
WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
tmp);
}
ret = 0;
}
adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
return ret;
}
static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
{
int xcc;
xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
if (!xcc) {
dev_err(adev->dev, "Couldn't find xcc mapping from IH node" );
return -EINVAL;
}
return xcc - 1;
}
static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
.read_wave_data = &gfx_v9_4_3_read_wave_data,
.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
};
static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
struct aca_bank *bank, enum aca_smu_type type,
void *data)
{
struct aca_bank_info info;
u64 misc0;
u32 instlo;
int ret;
ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
/* NOTE: overwrite info.die_id with xcd id for gfx */
instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
instlo &= GENMASK(31, 1);
info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
break ;
case ACA_SMU_TYPE_CE:
bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break ;
default :
return -EINVAL;
}
return ret;
}
static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
enum aca_smu_type type, void *data)
{
u32 instlo;
instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
instlo &= GENMASK(31, 1);
switch (instlo) {
case mmSMNAID_XCD0_MCA_SMU:
case mmSMNAID_XCD1_MCA_SMU:
case mmSMNXCD_XCD0_MCA_SMU:
return true ;
default :
break ;
}
return false ;
}
static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
.aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
};
static const struct aca_info gfx_v9_4_3_aca_info = {
.hwip = ACA_HWIP_TYPE_SMU,
.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
.bank_ops = &gfx_v9_4_3_aca_bank_ops,
};
static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
{
adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
adev->gfx.ras = &gfx_v9_4_3_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_PIPES);
adev->gfx.config.max_tile_pipes =
adev->gfx.config.gb_addr_config_fields.num_pipes;
adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_BANKS);
adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
MAX_COMPRESSED_FRAGS);
adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_RB_PER_SE);
adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_SHADER_ENGINES);
adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
PIPE_INTERLEAVE_SIZE));
return 0;
}
static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int xcc_id, int mec, int pipe, int queue)
{
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
unsigned int hw_prio;
uint32_t xcc_doorbell_start;
ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
ring_id];
/* mec0 is me1 */
ring->xcc_id = xcc_id;
ring->me = mec + 1;
ring->pipe = pipe;
ring->queue = queue;
ring->ring_obj = NULL;
ring->use_doorbell = true ;
xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
xcc_id * adev->doorbell_index.xcc_doorbell_range;
ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
(ring_id + xcc_id * adev->gfx.num_compute_rings) *
GFX9_MEC_HPD_SIZE;
ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
sprintf(ring->name, "comp_%d.%d.%d.%d" ,
ring->xcc_id, ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
hw_prio, NULL);
}
static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
{
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
uint32_t *ptr, num_xcc, inst;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
ptr = kcalloc(reg_count * num_xcc, sizeof (uint32_t), GFP_KERNEL);
if (!ptr) {
DRM_ERROR("Failed to allocate memory for GFX IP Dump\n" );
adev->gfx.ip_dump_core = NULL;
} else {
adev->gfx.ip_dump_core = ptr;
}
/* Allocate memory for compute queue registers for all the instances */
reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst * num_xcc, sizeof (uint32_t), GFP_KERNEL);
if (!ptr) {
DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n" );
adev->gfx.ip_dump_compute_queues = NULL;
} else {
adev->gfx.ip_dump_compute_queues = ptr;
}
}
static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof (gfx_9_4_3_cleaner_shader_hex);
if (adev->gfx.mec_fw_version >= 153) {
adev->gfx.enable_cleaner_shader = true ;
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
if (r) {
adev->gfx.enable_cleaner_shader = false ;
dev_err(adev->dev, "Failed to initialize cleaner shader\n" );
}
}
break ;
default :
adev->gfx.enable_cleaner_shader = false ;
break ;
}
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
return r;
/* Bad opcode Event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
&adev->gfx.bad_op_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n" );
return r;
}
r = gfx_v9_4_3_mec_init(adev);
if (r) {
DRM_ERROR("Failed to init MEC BOs!\n" );
return r;
}
/* set up the compute queues - allocate horizontally across pipes */
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
ring_id = 0;
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
k++) {
if (!amdgpu_gfx_is_mec_queue_enabled(
adev, xcc_id, i, k, j))
continue ;
r = gfx_v9_4_3_compute_ring_init(adev,
ring_id,
xcc_id,
i, k, j);
if (r)
return r;
ring_id++;
}
}
}
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n" );
return r;
}
r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
if (r)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
r = amdgpu_gfx_mqd_sw_init(adev,
sizeof (struct v9_mqd_allocation), xcc_id);
if (r)
return r;
}
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
if ((adev->gfx.mec_fw_version >= 155) &&
!amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break ;
case IP_VERSION(9, 5, 0):
if ((adev->gfx.mec_fw_version >= 21) &&
!amdgpu_sriov_vf(adev)) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break ;
default :
break ;
}
r = gfx_v9_4_3_gpu_early_init(adev);
if (r)
return r;
r = amdgpu_gfx_ras_sw_init(adev);
if (r)
return r;
r = amdgpu_gfx_sysfs_init(adev);
if (r)
return r;
gfx_v9_4_3_alloc_ip_dump(adev);
return 0;
}
static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, num_xcc;
struct amdgpu_device *adev = ip_block->adev;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
for (i = 0; i < num_xcc; i++) {
amdgpu_gfx_mqd_sw_fini(adev, i);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
amdgpu_gfx_kiq_fini(adev, i);
}
amdgpu_gfx_cleaner_shader_sw_fini(adev);
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
amdgpu_gfx_sysfs_fini(adev);
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
return 0;
}
#define DEFAULT_SH_MEM_BASES (0x6000)
static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
int xcc_id)
{
int i;
uint32_t sh_mem_config;
uint32_t sh_mem_bases;
uint32_t data;
/*
* Configure apertures:
* LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
* Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
* GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
*/
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
/* CP and shaders */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
/* Enable trap for each kfd vmid. */
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
}
soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
mutex_unlock(&adev->srbm_mutex);
/*
* Initialize all compute VMIDs to have no GDS, GWS, or OA
* access. These should be enabled by FW for target VMIDs.
*/
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
}
}
static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
{
int vmid;
/*
* Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
* access. Compute VMIDs should be enabled by FW for target VMIDs,
* the driver can enable them for graphics. VMID0 should maintain
* access so that HWS firmware can save/restore entries.
*/
for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
}
}
/* For ASICs that needs xnack chain and MEC version supports, set SG_CONFIG1
* DISABLE_XNACK_CHECK_IN_RETRY_DISABLE bit and inform KFD to set xnack_chain
* bit in SET_RESOURCES
*/
static void gfx_v9_4_3_xcc_init_sq(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
if (!(adev->gmc.xnack_flags & AMDGPU_GMC_XNACK_FLAG_CHAIN))
return ;
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_CONFIG1);
data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
WREG32_SOC15(GC, xcc_id, regSQ_CONFIG1, data);
}
static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
int xcc_id)
{
u32 tmp;
int i;
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
/* CP and shaders */
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
!!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
regSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
regSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
!!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
regSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >>
48));
tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
(adev->gmc.shared_aperture_start >>
48));
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
regSH_MEM_BASES, tmp);
}
}
soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
mutex_unlock(&adev->srbm_mutex);
gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
gfx_v9_4_3_xcc_init_sq(adev, xcc_id);
}
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
{
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 =
RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
/* ToDo: GC 9.4.4 */
case IP_VERSION(9, 4, 3):
if (adev->gfx.mec_fw_version >= 184 &&
(amdgpu_sriov_reg_access_sq_config(adev) ||
!amdgpu_sriov_vf(adev)))
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break ;
case IP_VERSION(9, 5, 0):
if (adev->gfx.mec_fw_version >= 23)
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break ;
default :
break ;
}
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_constants_init(adev, i);
}
static void
gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
int xcc_id)
{
WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
{
/*
* Rlc save restore list is workable since v2_1.
*/
gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
}
static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
}
static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
{
uint32_t rlc_setting;
/* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return false ;
return true ;
}
static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break ;
udelay(1);
}
}
static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
int xcc_id)
{
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
}
static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
{
int xcc_id, num_xcc;
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
}
adev->gfx.rlc.rlcg_reg_access_supported = true ;
}
static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
{
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
return 0;
}
static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
int xcc_id)
{
u32 i, j, k;
u32 mask;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
xcc_id);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
break ;
udelay(1);
}
if (k == adev->usec_timeout) {
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
0xffffffff,
0xffffffff, xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n" ,
i, j);
return ;
}
}
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
for (k = 0; k < adev->usec_timeout; k++) {
if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
break ;
udelay(1);
}
}
static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable, int xcc_id)
{
u32 tmp;
/* These interrupts should be enabled to drive DS clock */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
}
static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
{
WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
RLC_ENABLE_F32, 0);
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false , xcc_id);
gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
}
static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
{
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_rlc_stop(adev, i);
}
static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
{
WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
SOFT_RESET_RLC, 1);
udelay(50);
WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
SOFT_RESET_RLC, 0);
udelay(50);
}
static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
{
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_xcc_rlc_reset(adev, i);
}
static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
{
WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
RLC_ENABLE_F32, 1);
udelay(50);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true , xcc_id);
udelay(50);
}
}
static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
{
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
int i, num_xcc;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
gfx_v9_4_3_xcc_rlc_start(adev, i);
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
if (rlc_ucode_ver == 0x108) {
dev_info(adev->dev,
"Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n" ,
rlc_ucode_ver, adev->gfx.rlc_fw_version);
/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
* default is 0x9C4 to create a 100us interval */
WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
* to disable the page fault retry interrupts, default is
* 0x100 (256) */
WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
}
#endif
}
}
static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
int xcc_id)
{
const struct rlc_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.rlc_fw)
return -EINVAL;
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
RLCG_UCODE_LOADING_START_ADDRESS);
for (i = 0; i < fw_size; i++) {
if (amdgpu_emu_mode == 1 && i % 100 == 0) {
dev_info(adev->dev, "Write RLC ucode data %u DWs\n" , i);
msleep(1);
}
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
}
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
return 0;
}
static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
{
int r;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
/* legacy rlc firmware loading */
r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
if (r)
return r;
gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
}
amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
/* disable CG */
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
return 0;
}
static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
{
int r, i, num_xcc;
if (amdgpu_sriov_vf(adev))
return 0;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
if (r)
return r;
}
return 0;
}
static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned vmid)
{
u32 reg, pre_data, data;
reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
pre_data = RREG32_NO_KIQ(reg);
else
pre_data = RREG32(reg);
data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (pre_data != data) {
if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
} else
WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
}
}
static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
};
static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
uint32_t offset,
struct soc15_reg_rlcg *entries, int arr_size)
{
int i, inst;
uint32_t reg;
if (!entries)
return false ;
for (i = 0; i < arr_size; i++) {
const struct soc15_reg_rlcg *entry;
entry = &entries[i];
inst = adev->ip_map.logical_to_dev_inst ?
adev->ip_map.logical_to_dev_inst(
adev, entry->hwip, entry->instance) :
entry->instance;
reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
entry->reg;
if (offset == reg)
return true ;
}
return false ;
}
static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
{
return gfx_v9_4_3_check_rlcg_range(adev, offset,
(void *)rlcg_access_gc_9_4_3,
ARRAY_SIZE(rlcg_access_gc_9_4_3));
}
static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
bool enable, int xcc_id)
{
if (enable) {
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[xcc_id].ring.sched.ready = false ;
}
udelay(50);
}
static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
int xcc_id)
{
const struct gfx_firmware_header_v1_0 *mec_hdr;
const __le32 *fw_data;
unsigned i;
u32 tmp;
u32 mec_ucode_addr_offset;
u32 mec_ucode_data_offset;
if (!adev->gfx.mec_fw)
return -EINVAL;
gfx_v9_4_3_xcc_cp_compute_enable(adev, false , xcc_id);
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
tmp = 0;
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
mec_ucode_addr_offset =
SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
mec_ucode_data_offset =
SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
/* MEC1 */
WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
for (i = 0; i < mec_hdr->jt_size; i++)
WREG32(mec_ucode_data_offset,
le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
return 0;
}
/* KIQ functions */
static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
{
uint32_t tmp;
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
{
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
}
}
}
static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
mqd->dynamic_cu_mask_addr_lo =
lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi =
upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
if (amdgpu_sriov_multi_vf_mode(adev))
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_MODE, 1);
} else {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
}
mqd->cp_hqd_pq_doorbell_control = tmp;
/* disable the queue if it's active */
ring->wptr = 0;
mqd->cp_hqd_dequeue_request = 0;
mqd->cp_hqd_pq_rptr = 0;
mqd->cp_hqd_pq_wptr_lo = 0;
mqd->cp_hqd_pq_wptr_hi = 0;
/* set the pointer to the MQD */
mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
/* set MQD vmid to 0 */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
hqd_gpu_addr = ring->gpu_addr >> 8;
mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
upper_32_bits(wb_gpu_addr) & 0xffff;
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
/* set static priority for a queue/ring */
gfx_v9_4_3_mqd_set_priority(ring, mqd);
mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
mqd->cp_hqd_active = 1;
return 0;
}
static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
int xcc_id)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int j;
/* disable wptr polling */
WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
mqd->cp_hqd_eop_base_addr_lo);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
mqd->cp_hqd_eop_base_addr_hi);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
mqd->cp_hqd_eop_control);
/* enable doorbell? */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
break ;
udelay(1);
}
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
mqd->cp_hqd_dequeue_request);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
mqd->cp_hqd_pq_rptr);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
}
/* set the pointer to the MQD */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
mqd->cp_mqd_base_addr_lo);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
mqd->cp_mqd_base_addr_hi);
/* set MQD vmid to 0 */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
mqd->cp_mqd_control);
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
mqd->cp_hqd_pq_base_lo);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
mqd->cp_hqd_pq_base_hi);
/* set up the HQD, this is similar to CP_RB0_CNTL */
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
mqd->cp_hqd_pq_control);
/* set the wb address whether it's enabled or not */
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=90 G=93
¤ Dauer der Verarbeitung: 0.18 Sekunden
¤
*© Formatika GbR, Deutschland