/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher
*/
/* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous * DMA engines. These engines are used for compute * and gfx. There are two DMA engines (SDMA0, SDMA1) * and each one supports 1 ring buffer used for gfx * and 2 queues used for compute. * * The programming model is very similar to the CP * (ring buffer, IBs, etc.), but sDMA has it's own * packet format that is different from the PM4 format * used by the CP. sDMA supports copying data, writing * embedded data, solid fills, and a number of other * things. It also has support for tiling/detiling of * buffers.
*/
out: if (err) {
pr_err("sdma_v2_4: Failed to load firmware \"%s_sdma%s.bin\"\n",
chip_name, i == 0 ? "" : "1"); for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
} return err;
}
/** * sdma_v2_4_ring_get_rptr - get the current read pointer * * @ring: amdgpu ring pointer * * Get the current rptr from the hardware (VI+).
*/ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
{ /* XXX check if swapping is necessary on BE */ return *ring->rptr_cpu_addr >> 2;
}
/** * sdma_v2_4_ring_get_wptr - get the current write pointer * * @ring: amdgpu ring pointer * * Get the current wptr from the hardware (VI+).
*/ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
return wptr;
}
/** * sdma_v2_4_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * * Write the wptr back to the hardware (VI+).
*/ staticvoid sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
for (i = 0; i < count; i++) if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1)); else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/** * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine * * @ring: amdgpu ring pointer * @job: job to retrieve vmid from * @ib: IB object to schedule * @flags: unused * * Schedule an IB in the DMA ring (VI).
*/ staticvoid sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib,
uint32_t flags)
{ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); /* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
}
/** * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer * * Emit an hdp flush packet on the requested DMA ring.
*/ staticvoid sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
/** * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer * @addr: address * @seq: sequence number * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate * an interrupt if needed (VI).
*/ staticvoid sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags)
{ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; /* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */ if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
/** * sdma_v2_4_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer * * Stop the gfx async dma ring buffers (VI).
*/ staticvoid sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl; int i;
/** * sdma_v2_4_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (VI).
*/ staticvoid sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl; int i;
if (!enable) {
sdma_v2_4_gfx_stop(adev);
sdma_v2_4_rlc_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); else
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
}
}
/** * sdma_v2_4_gfx_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them (VI). * Returns 0 for success, error for failure.
*/ staticint sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
{ struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz; int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
sdma_v2_4_enable(adev, true); for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring); if (r) return r;
}
return 0;
}
/** * sdma_v2_4_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the compute DMA queues and enable them (VI). * Returns 0 for success, error for failure.
*/ staticint sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
{ /* XXX todo */ return 0;
}
/** * sdma_v2_4_start - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the DMA engines and enable them (VI). * Returns 0 for success, error for failure.
*/ staticint sdma_v2_4_start(struct amdgpu_device *adev)
{ int r;
/* halt the engine before programing */
sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev); if (r) return r;
r = sdma_v2_4_rlc_resume(adev); if (r) return r;
return 0;
}
/** * sdma_v2_4_ring_test_ring - simple async dma engine test * * @ring: amdgpu_ring structure holding ring information * * Test the DMA engine by writing using it to write an * value to memory. (VI). * Returns 0 for success, error for failure.
*/ staticint sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev; unsigned i; unsigned index; int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index); if (r) return r;
/** * sdma_v2_4_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure.
*/ staticint sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; unsigned index;
u32 tmp = 0;
u64 gpu_addr; long r;
r = amdgpu_device_wb_get(adev, &index); if (r) return r;
/** * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using sDMA (CIK).
*/ staticvoid sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src, unsigned count)
{ unsigned bytes = count * 8;
/** * sdma_v2_4_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * * Update PTEs by writing them manually using sDMA (CIK).
*/ staticvoid sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{ unsigned ndw = count * 2;
/** * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK).
*/ staticvoid sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/** * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw * * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding *
*/ staticvoid sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count; int i;
pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/** * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * * Make sure all previous operations are completed (CIK).
*/ staticvoid sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/** * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer * @vmid: vmid number to use * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (VI).
*/ staticvoid sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
staticint sdma_v2_4_set_clockgating_state(struct amdgpu_ip_block *ip_block, enum amd_clockgating_state state)
{ /* XXX handled via the smc on VI */ return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.