/* * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
staticvoid sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
{ int i;
/* * The only chips with SDMAv4 and ULV are VG10 and VG20. * Server SKUs take a different hysteresis setting from other SKUs.
*/ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 0, 0): if (adev->pdev->device == 0x6860) break; return; case IP_VERSION(4, 2, 0): if (adev->pdev->device == 0x66a1) break; return; default: return;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
uint32_t temp;
/** * sdma_v4_0_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure.
*/
// emulation only, won't work on real chip // vega10 real chip need to use PSP to load firmware staticint sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{ int ret, i;
for (i = 0; i < adev->sdma.num_instances; i++) { if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 2, 2) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 0)) { /* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
ret = amdgpu_sdma_init_microcode(adev, 0, true); break;
} else {
ret = amdgpu_sdma_init_microcode(adev, i, false); if (ret) return ret;
}
}
return ret;
}
/** * sdma_v4_0_ring_get_rptr - get the current read pointer * * @ring: amdgpu ring pointer * * Get the current rptr from the hardware (VEGA10+).
*/ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
u64 *rptr;
/* XXX check if swapping is necessary on BE */
rptr = ((u64 *)ring->rptr_cpu_addr);
/** * sdma_v4_0_ring_get_wptr - get the current write pointer * * @ring: amdgpu ring pointer * * Get the current wptr from the hardware (VEGA10+).
*/ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
u64 wptr;
if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */
wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
wptr = wptr << 32;
wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
ring->me, wptr);
}
return wptr >> 2;
}
/** * sdma_v4_0_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * * Write the wptr back to the hardware (VEGA10+).
*/ staticvoid sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
/** * sdma_v4_0_page_ring_get_wptr - get the current write pointer * * @ring: amdgpu ring pointer * * Get the current wptr from the hardware (VEGA10+).
*/ static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
u64 wptr;
if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */
wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
} else {
wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
wptr = wptr << 32;
wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
}
return wptr >> 2;
}
/** * sdma_v4_0_page_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * * Write the wptr back to the hardware (VEGA10+).
*/ staticvoid sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
u64 *wb = (u64 *)ring->wptr_cpu_addr;
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (ring->wptr << 2));
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
uint64_t wptr = ring->wptr << 2;
/** * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer * @addr: address * @seq: sequence number * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate * an interrupt if needed (VEGA10).
*/ staticvoid sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags)
{ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; /* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); /* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */ if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); /* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
/** * sdma_v4_0_gfx_enable - enable the gfx async dma engines * * @adev: amdgpu_device pointer * @enable: enable SDMA RB/IB * control the gfx async dma ring buffers (VEGA10).
*/ staticvoid sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
{
u32 rb_cntl, ib_cntl; int i;
/* * Enable SDMA utilization. Its only supported on * Arcturus for the moment and firmware version 14 * and above.
*/ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 2, 2) &&
adev->sdma.instance[i].fw_version >= 14)
WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable); /* Extend page fault timeout to avoid interrupt storm */
WREG32_SDMA(i, mmSDMA0_UTCL1_TIMEOUT, 0x00800080);
}
}
/** * sdma_v4_0_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (VEGA10).
*/ staticvoid sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl; int i;
if (!enable) {
sdma_v4_0_gfx_enable(adev, enable);
sdma_v4_0_rlc_stop(adev); if (adev->sdma.has_page_queue)
sdma_v4_0_page_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
}
}
/* * sdma_v4_0_rb_cntl - get parameters for rb_cntl
*/ static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
{ /* Set ring buffer size in dwords */
uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
/* Enable HW based PG. */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK; if (data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* enable interrupt */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK; if (data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
/* Configure hold time to filter in-valid power on/off request. Use default right now */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK); /* Configure switch time for hysteresis purpose. Use default right now */
data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK); if(data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
}
staticvoid sdma_v4_0_init_pg(struct amdgpu_device *adev)
{ if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA)) return;
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { case IP_VERSION(4, 1, 0): case IP_VERSION(4, 1, 1): case IP_VERSION(4, 1, 2):
sdma_v4_1_init_power_gating(adev);
sdma_v4_1_update_power_gating(adev, true); break; default: break;
}
}
/** * sdma_v4_0_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the compute DMA queues and enable them (VEGA10). * Returns 0 for success, error for failure.
*/ staticint sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
{
sdma_v4_0_init_pg(adev);
return 0;
}
/** * sdma_v4_0_load_microcode - load the sDMA ME ucode * * @adev: amdgpu_device pointer * * Loads the sDMA0/1 ucode. * Returns 0 for success, -EINVAL if the ucode is not available.
*/ staticint sdma_v4_0_load_microcode(struct amdgpu_device *adev)
{ conststruct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data;
u32 fw_size; int i, j;
/* halt the MEs */
sdma_v4_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { if (!adev->sdma.instance[i].fw) return -EINVAL;
/** * sdma_v4_0_start - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the DMA engines and enable them (VEGA10). * Returns 0 for success, error for failure.
*/ staticint sdma_v4_0_start(struct amdgpu_device *adev)
{ struct amdgpu_ring *ring; int i, r = 0;
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
} else {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
r = sdma_v4_0_load_microcode(adev); if (r) return r;
}
/* unhalt the MEs */
sdma_v4_0_enable(adev, true); /* enable sdma ring preemption */
sdma_v4_0_ctx_switch_enable(adev, true);
}
/* start the gfx rings and rlc compute queues */ for (i = 0; i < adev->sdma.num_instances; i++) {
uint32_t temp;
WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
sdma_v4_0_gfx_resume(adev, i); if (adev->sdma.has_page_queue)
sdma_v4_0_page_resume(adev, i);
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, mmSDMA0_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, true);
sdma_v4_0_enable(adev, true);
} else {
r = sdma_v4_0_rlc_resume(adev); if (r) return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring); if (r) return r;
if (adev->sdma.has_page_queue) { struct amdgpu_ring *page = &adev->sdma.instance[i].page;
r = amdgpu_ring_test_helper(page); if (r) return r;
}
}
return r;
}
/** * sdma_v4_0_ring_test_ring - simple async dma engine test * * @ring: amdgpu_ring structure holding ring information * * Test the DMA engine by writing using it to write an * value to memory. (VEGA10). * Returns 0 for success, error for failure.
*/ staticint sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev; unsigned i; unsigned index; int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index); if (r) return r;
/** * sdma_v4_0_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VEGA10). * Returns 0 on success, error on failure.
*/ staticint sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; unsigned index; long r;
u32 tmp = 0;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index); if (r) return r;
/** * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using sDMA (VEGA10).
*/ staticvoid sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src, unsigned count)
{ unsigned bytes = count * 8;
/** * sdma_v4_0_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * * Update PTEs by writing them manually using sDMA (VEGA10).
*/ staticvoid sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{ unsigned ndw = count * 2;
/** * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (VEGA10).
*/ staticvoid sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
}
/** * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw * * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding
*/ staticvoid sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count; int i;
pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/** * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * * Make sure all previous operations are completed (CIK).
*/ staticvoid sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/** * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA * * @ring: amdgpu_ring pointer * @vmid: vmid number to use * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VEGA10).
*/ staticvoid sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
}
/* SDMA trap event */ for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq); if (r) return r;
}
/* SDMA SRAM ECC event */ for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
&adev->sdma.ecc_irq); if (r) return r;
}
/* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_VM_HOLE,
&adev->sdma.vm_hole_irq); if (r) return r;
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
&adev->sdma.doorbell_invalid_irq); if (r) return r;
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
&adev->sdma.pool_timeout_irq); if (r) return r;
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
&adev->sdma.srbm_write_irq); if (r) return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
/* * On Arcturus, SDMA instance 5~7 has a different vmhub * type(AMDGPU_MMHUB1).
*/ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 2, 2) &&
i >= 5)
ring->vm_hub = AMDGPU_MMHUB1(0); else
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r;
if (adev->sdma.has_page_queue) {
ring = &adev->sdma.instance[i].page;
ring->ring_obj = NULL;
ring->use_doorbell = true;
/* paging queue use same doorbell index/routing as gfx queue * with 0x400 (4096 dwords) offset on second doorbell page
*/ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
IP_VERSION(4, 0, 0) &&
amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.