/* * Copyright 2013 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * Authors: Christian König <christian.koenig@amd.com>
*/
r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); if (r) {
dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); return r;
}
r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); if (r) {
radeon_bo_unreserve(rdev->vce.vcpu_bo);
dev_err(rdev->dev, "(%d) VCE map failed\n", r); return r;
}
memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); if (rdev->family < CHIP_BONAIRE)
r = vce_v1_0_load_fw(rdev, cpu_addr); else
memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
radeon_bo_kunmap(rdev->vce.vcpu_bo);
radeon_bo_unreserve(rdev->vce.vcpu_bo);
return r;
}
/** * radeon_vce_idle_work_handler - power off VCE * * @work: pointer to work structure * * power of VCE when it's not used any more
*/ staticvoid radeon_vce_idle_work_handler(struct work_struct *work)
{ struct radeon_device *rdev =
container_of(work, struct radeon_device, vce.idle_work.work);
/** * radeon_vce_note_usage - power up VCE * * @rdev: radeon_device pointer * * Make sure VCE is powerd up when we want to use it
*/ void radeon_vce_note_usage(struct radeon_device *rdev)
{ bool streams_changed = false; bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { /* XXX figure out if the streams changed */
streams_changed = false;
}
/** * radeon_vce_free_handles - free still open VCE handles * * @rdev: radeon_device pointer * @filp: drm file pointer * * Close all VCE handles still open by this file pointer
*/ void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{ int i, r; for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&rdev->vce.handles[i]); if (!handle || rdev->vce.filp[i] != filp) continue;
radeon_vce_note_usage(rdev);
r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
handle, NULL); if (r)
DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
/** * radeon_vce_get_create_msg - generate a VCE create msg * * @rdev: radeon_device pointer * @ring: ring we should submit the msg to * @handle: VCE session handle to use * @fence: optional fence to return * * Open up a stream for HW test
*/ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{ constunsigned ib_size_dw = 1024; struct radeon_ib ib;
uint64_t dummy; int i, r;
r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r); return r;
}
dummy = ib.gpu_addr + 1024;
/* stitch together an VCE create msg */
ib.length_dw = 0;
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = cpu_to_le32(0x0);
r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
if (fence)
*fence = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
return r;
}
/** * radeon_vce_get_destroy_msg - generate a VCE destroy msg * * @rdev: radeon_device pointer * @ring: ring we should submit the msg to * @handle: VCE session handle to use * @fence: optional fence to return * * Close up a stream for HW test or if userspace failed to do so
*/ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{ constunsigned ib_size_dw = 1024; struct radeon_ib ib;
uint64_t dummy; int i, r;
r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r); return r;
}
dummy = ib.gpu_addr + 1024;
/* stitch together an VCE destroy msg */
ib.length_dw = 0;
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
if (end <= start) {
DRM_ERROR("invalid reloc offset %llX!\n", offset); return -EINVAL;
} if ((end - start) < size) {
DRM_ERROR("buffer to small (%d / %d)!\n",
(unsigned)(end - start), size); return -EINVAL;
}
return 0;
}
/** * radeon_vce_validate_handle - validate stream handle * * @p: parser context * @handle: handle to validate * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL * we don't have another free session index.
*/ staticint radeon_vce_validate_handle(struct radeon_cs_parser *p,
uint32_t handle, bool *allocated)
{ unsigned i;
*allocated = false;
/* validate the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { if (atomic_read(&p->rdev->vce.handles[i]) == handle) { if (p->rdev->vce.filp[i] != p->filp) {
DRM_ERROR("VCE handle collision detected!\n"); return -EINVAL;
} return i;
}
}
/* handle not found try to alloc a new one */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
p->rdev->vce.filp[i] = p->filp;
p->rdev->vce.img_size[i] = 0;
*allocated = true; return i;
}
}
DRM_ERROR("No more free VCE handles!\n"); return -EINVAL;
}
/** * radeon_vce_cs_parse - parse and validate the command stream * * @p: parser context *
*/ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{ int session_idx = -1; bool destroyed = false, created = false, allocated = false;
uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp; int i, r = 0;
while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
case 0x04000001: // config extension case 0x04000002: // pic control case 0x04000005: // rate control case 0x04000007: // motion estimation case 0x04000008: // rdo case 0x04000009: // vui break;
case 0x03000001: // encode
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
*size); if (r) goto out;
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
*size / 3); if (r) goto out; break;
case 0x02000001: // destroy
destroyed = true; break;
case 0x05000001: // context buffer
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
*size * 2); if (r) goto out; break;
case 0x05000004: // video bitstream buffer
tmp = radeon_get_ib_value(p, p->idx + 4);
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
tmp); if (r) goto out; break;
case 0x05000005: // feedback buffer
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
4096); if (r) goto out; break;
if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n");
r = -EINVAL; goto out;
}
p->idx += len / 4;
}
if (allocated && !created) {
DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out: if ((!r && destroyed) || (r && allocated)) { /* * IB contains a destroy msg or we have allocated an * handle and got an error, anyway free the handle
*/ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
}
return r;
}
/** * radeon_vce_semaphore_emit - emit a semaphore command * * @rdev: radeon_device pointer * @ring: engine to use * @semaphore: address of semaphore * @emit_wait: true=emit wait, false=emit signal *
*/ bool radeon_vce_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
/** * radeon_vce_ring_test - test if VCE ring is working * * @rdev: radeon_device pointer * @ring: the engine to test on *
*/ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); unsigned i; int r;
r = radeon_ring_lock(rdev, ring, 16); if (r) {
DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
ring->idx, r); return r;
}
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { if (vce_v1_0_get_rptr(rdev, ring) != rptr) break;
udelay(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("radeon: ring %d test failed\n",
ring->idx);
r = -ETIMEDOUT;
}
return r;
}
/** * radeon_vce_ib_test - test if VCE IBs are working * * @rdev: radeon_device pointer * @ring: the engine to test on *
*/ int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{ struct radeon_fence *fence = NULL; int r;
r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL); if (r) {
DRM_ERROR("radeon: failed to get create msg (%d).\n", r); goto error;
}
r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); if (r) {
DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); goto error;
}
r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies(
RADEON_USEC_IB_TEST_TIMEOUT)); if (r < 0) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
} elseif (r == 0) {
DRM_ERROR("radeon: fence wait timed out.\n");
r = -ETIMEDOUT;
} else {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
r = 0;
}
error:
radeon_fence_unref(&fence); return r;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.