/* * Copyright 2013 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher
*/
staticvoid cik_sdma_free_microcode(struct amdgpu_device *adev)
{ int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous * DMA engines. These engines are used for compute * and gfx. There are two DMA engines (SDMA0, SDMA1) * and each one supports 1 ring buffer used for gfx * and 2 queues used for compute. * * The programming model is very similar to the CP * (ring buffer, IBs, etc.), but sDMA has it's own * packet format that is different from the PM4 format * used by the CP. sDMA supports copying data, writing * embedded data, solid fills, and a number of other * things. It also has support for tiling/detiling of * buffers.
*/
/** * cik_sdma_init_microcode - load ucode images from disk * * @adev: amdgpu_device pointer * * Use the firmware interface to load the ucode images into * the driver (not loaded into hw). * Returns 0 on success, error on failure.
*/ staticint cik_sdma_init_microcode(struct amdgpu_device *adev)
{ constchar *chip_name; int err = 0, i;
DRM_DEBUG("\n");
switch (adev->asic_type) { case CHIP_BONAIRE:
chip_name = "bonaire"; break; case CHIP_HAWAII:
chip_name = "hawaii"; break; case CHIP_KAVERI:
chip_name = "kaveri"; break; case CHIP_KABINI:
chip_name = "kabini"; break; case CHIP_MULLINS:
chip_name = "mullins"; break; default: BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) { if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
AMDGPU_UCODE_REQUIRED, "amdgpu/%s_sdma.bin", chip_name); else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
AMDGPU_UCODE_REQUIRED, "amdgpu/%s_sdma1.bin", chip_name); if (err) goto out;
}
out: if (err) {
pr_err("cik_sdma: Failed to load firmware \"%s_sdma%s.bin\"\n",
chip_name, i == 0 ? "" : "1"); for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
} return err;
}
/** * cik_sdma_ring_get_rptr - get the current read pointer * * @ring: amdgpu ring pointer * * Get the current rptr from the hardware (CIK+).
*/ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
{
u32 rptr;
rptr = *ring->rptr_cpu_addr;
return (rptr & 0x3fffc) >> 2;
}
/** * cik_sdma_ring_get_wptr - get the current write pointer * * @ring: amdgpu ring pointer * * Get the current wptr from the hardware (CIK+).
*/ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
/** * cik_sdma_ring_emit_fence - emit a fence on the DMA ring * * @ring: amdgpu ring pointer * @addr: address * @seq: sequence number * @flags: fence related flags * * Add a DMA fence packet to the ring to write * the fence seq number and DMA trap packet to generate * an interrupt if needed (CIK).
*/ staticvoid cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags)
{ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; /* write the fence */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */ if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
}
/** * cik_sdma_gfx_stop - stop the gfx async dma engines * * @adev: amdgpu_device pointer * * Stop the gfx async dma ring buffers (CIK).
*/ staticvoid cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl; int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
}
/** * cik_sdma_enable - stop the async dma engines * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs. * * Halt or unhalt the async dma engines (CIK).
*/ staticvoid cik_sdma_enable(struct amdgpu_device *adev, bool enable)
{
u32 me_cntl; int i;
if (!enable) {
cik_sdma_gfx_stop(adev);
cik_sdma_rlc_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); if (enable)
me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; else
me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
}
}
/** * cik_sdma_gfx_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them (CIK). * Returns 0 for success, error for failure.
*/ staticint cik_sdma_gfx_resume(struct amdgpu_device *adev)
{ struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz; int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring); if (r) return r;
}
return 0;
}
/** * cik_sdma_rlc_resume - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the compute DMA queues and enable them (CIK). * Returns 0 for success, error for failure.
*/ staticint cik_sdma_rlc_resume(struct amdgpu_device *adev)
{ /* XXX todo */ return 0;
}
/** * cik_sdma_load_microcode - load the sDMA ME ucode * * @adev: amdgpu_device pointer * * Loads the sDMA0/1 ucode. * Returns 0 for success, -EINVAL if the ucode is not available.
*/ staticint cik_sdma_load_microcode(struct amdgpu_device *adev)
{ conststruct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data;
u32 fw_size; int i, j;
/** * cik_sdma_start - setup and start the async dma engines * * @adev: amdgpu_device pointer * * Set up the DMA engines and enable them (CIK). * Returns 0 for success, error for failure.
*/ staticint cik_sdma_start(struct amdgpu_device *adev)
{ int r;
r = cik_sdma_load_microcode(adev); if (r) return r;
/* halt the engine before programing */
cik_sdma_enable(adev, false); /* enable sdma ring preemption */
cik_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev); if (r) return r;
r = cik_sdma_rlc_resume(adev); if (r) return r;
return 0;
}
/** * cik_sdma_ring_test_ring - simple async dma engine test * * @ring: amdgpu_ring structure holding ring information * * Test the DMA engine by writing using it to write an * value to memory. (CIK). * Returns 0 for success, error for failure.
*/ staticint cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev; unsigned i; unsigned index; int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index); if (r) return r;
/** * cik_sdma_ring_test_ib - test an IB on the DMA engine * * @ring: amdgpu_ring structure holding ring information * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure.
*/ staticint cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; unsigned index;
u32 tmp = 0;
u64 gpu_addr; long r;
r = amdgpu_device_wb_get(adev, &index); if (r) return r;
/** * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @src: src addr to copy from * @count: number of page entries to update * * Update PTEs by copying them from the GART using sDMA (CIK).
*/ staticvoid cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src, unsigned count)
{ unsigned bytes = count * 8;
/** * cik_sdma_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * * Update PTEs by writing them manually using sDMA (CIK).
*/ staticvoid cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{ unsigned ndw = count * 2;
/** * cik_sdma_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK).
*/ staticvoid cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/** * cik_sdma_ring_pad_ib - pad the IB to the required number of dw * * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding *
*/ staticvoid cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count; int i;
pad_count = (-ib->length_dw) & 7; for (i = 0; i < pad_count; i++) if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
SDMA_NOP_COUNT(pad_count - 1); else
ib->ptr[ib->length_dw++] =
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
/** * cik_sdma_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * * Make sure all previous operations are completed (CIK).
*/ staticvoid cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/** * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA * * @ring: amdgpu_ring pointer * @vmid: vmid number to use * @pd_addr: address * * Update the page table base and flush the VM TLB * using sDMA (CIK).
*/ staticvoid cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.