/* * Copyright 2011 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. *
*/ /* * Authors: * Christian König <deathsimple@vodafone.de>
*/
switch (rdev->family) { case CHIP_RV610: case CHIP_RV630: case CHIP_RV670: case CHIP_RV620: case CHIP_RV635:
legacy_fw_name = FIRMWARE_R600; break;
case CHIP_RS780: case CHIP_RS880:
legacy_fw_name = FIRMWARE_RS780; break;
case CHIP_RV770:
legacy_fw_name = FIRMWARE_RV770; break;
case CHIP_RV710: case CHIP_RV730: case CHIP_RV740:
legacy_fw_name = FIRMWARE_RV710; break;
case CHIP_CYPRESS: case CHIP_HEMLOCK: case CHIP_JUNIPER: case CHIP_REDWOOD: case CHIP_CEDAR:
legacy_fw_name = FIRMWARE_CYPRESS; break;
case CHIP_SUMO: case CHIP_SUMO2: case CHIP_PALM: case CHIP_CAYMAN: case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS:
legacy_fw_name = FIRMWARE_SUMO; break;
case CHIP_TAHITI: case CHIP_VERDE: case CHIP_PITCAIRN: case CHIP_ARUBA: case CHIP_OLAND:
legacy_fw_name = FIRMWARE_TAHITI; break;
case CHIP_BONAIRE: case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_MULLINS:
legacy_fw_name = FIRMWARE_BONAIRE_LEGACY;
fw_name = FIRMWARE_BONAIRE; break;
default: return -EINVAL;
}
rdev->uvd.fw_header_present = false;
rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES; if (fw_name) { /* Let's try to load the newer firmware first */
r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
fw_name);
} else { struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data; unsigned version_major, version_minor, family_id;
r = radeon_ucode_validate(rdev->uvd_fw); if (r) return r;
/* * Limit the number of UVD handles depending on * microcode major and minor versions.
*/ if ((version_major >= 0x01) && (version_minor >= 0x37))
rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
}
}
/* * In case there is only legacy firmware, or we encounter an error * while loading the new firmware, we fall back to loading the legacy * firmware now.
*/ if (!fw_name || r) {
r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev); if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
legacy_fw_name); return r;
}
}
r = radeon_uvd_validate_codec(p, msg[4]);
radeon_bo_kunmap(bo); if (r) return r;
/* try to alloc a new handle */ for (i = 0; i < p->rdev->uvd.max_handles; ++i) { if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle); return -EINVAL;
}
DRM_ERROR("No more free UVD handles!\n"); return -EINVAL;
case 1: /* it's a decode msg, validate codec and calc buffer sizes */
r = radeon_uvd_validate_codec(p, msg[4]); if (!r)
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
radeon_bo_kunmap(bo); if (r) return r;
/* validate the handle */ for (i = 0; i < p->rdev->uvd.max_handles; ++i) { if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { if (p->rdev->uvd.filp[i] != p->filp) {
DRM_ERROR("UVD handle collision detected!\n"); return -EINVAL;
} return 0;
}
}
case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < p->rdev->uvd.max_handles; ++i)
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
radeon_bo_kunmap(bo); return 0;
default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
}
radeon_bo_kunmap(bo); return -EINVAL;
}
staticint radeon_uvd_cs_reloc(struct radeon_cs_parser *p, int data0, int data1, unsigned buf_sizes[], bool *has_msg_cmd)
{ struct radeon_cs_chunk *relocs_chunk; struct radeon_bo_list *reloc; unsigned idx, cmd, offset;
uint64_t start, end; int r;
relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, data0);
idx = radeon_get_ib_value(p, data1); if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw); return -EINVAL;
}
/* TODO: is this still necessary on NI+ ? */ if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end); return -EINVAL;
}
if (cmd == 0) { if (*has_msg_cmd) {
DRM_ERROR("More than one message in a UVD-IB!\n"); return -EINVAL;
}
*has_msg_cmd = true;
r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); if (r) return r;
} elseif (!*has_msg_cmd) {
DRM_ERROR("Message needed before other commands are send!\n"); return -EINVAL;
}
return 0;
}
staticint radeon_uvd_cs_reg(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, int *data0, int *data1, unsigned buf_sizes[], bool *has_msg_cmd)
{ int i, r;
p->idx++; for (i = 0; i <= pkt->count; ++i) { switch (pkt->reg + i*4) { case UVD_GPCOM_VCPU_DATA0:
*data0 = p->idx; break; case UVD_GPCOM_VCPU_DATA1:
*data1 = p->idx; break; case UVD_GPCOM_VCPU_CMD:
r = radeon_uvd_cs_reloc(p, *data0, *data1,
buf_sizes, has_msg_cmd); if (r) return r; break; case UVD_ENGINE_CNTL: case UVD_NO_OP: break; default:
DRM_ERROR("Invalid reg 0x%X!\n",
pkt->reg + i*4); return -EINVAL;
}
p->idx++;
} return 0;
}
int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
{ struct radeon_cs_packet pkt; int r, data0 = 0, data1 = 0;
/* does the IB has a msg command */ bool has_msg_cmd = false;
/* * multiple fence commands without any stream commands in between can * crash the vcpu so just try to emmit a dummy create/destroy msg to * avoid this
*/ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{ /* we use the last page of the vcpu bo for the UVD message */
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); if (r) return r;
/* stitch together an UVD create msg */
writel((__force u32)cpu_to_le32(0x00000de4), &msg[0]);
writel(0x0, (void __iomem *)&msg[1]);
writel((__force u32)cpu_to_le32(handle), &msg[2]);
writel(0x0, &msg[3]);
writel(0x0, &msg[4]);
writel(0x0, &msg[5]);
writel(0x0, &msg[6]);
writel((__force u32)cpu_to_le32(0x00000780), &msg[7]);
writel((__force u32)cpu_to_le32(0x00000440), &msg[8]);
writel(0x0, &msg[9]);
writel((__force u32)cpu_to_le32(0x01b37000), &msg[10]); for (i = 11; i < 1024; ++i)
writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo); return r;
}
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{ /* we use the last page of the vcpu bo for the UVD message */
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); if (r) return r;
/* stitch together an UVD destroy msg */
writel((__force u32)cpu_to_le32(0x00000de4), &msg[0]);
writel((__force u32)cpu_to_le32(0x00000002), &msg[1]);
writel((__force u32)cpu_to_le32(handle), &msg[2]);
writel(0x0, &msg[3]); for (i = 4; i < 1024; ++i)
writel(0x0, &msg[i]);
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo); return r;
}
/** * radeon_uvd_count_handles - count number of open streams * * @rdev: radeon_device pointer * @sd: number of SD streams * @hd: number of HD streams * * Count the number of open SD/HD streams as a hint for power mangement
*/ staticvoid radeon_uvd_count_handles(struct radeon_device *rdev, unsigned *sd, unsigned *hd)
{ unsigned i;
*sd = 0;
*hd = 0;
for (i = 0; i < rdev->uvd.max_handles; ++i) { if (!atomic_read(&rdev->uvd.handles[i])) continue;
if (rdev->uvd.img_size[i] >= 720*576)
++(*hd); else
++(*sd);
}
}
/* fb div out of range ? */ if (fb_div > fb_mask) break; /* it can oly get worse */
fb_div &= fb_mask;
/* calc vclk divider with current vco freq */
vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
pd_min, pd_even); if (vclk_div > pd_max) break; /* vco is too big, it has to stop */
/* calc dclk divider with current vco freq */
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
pd_min, pd_even); if (dclk_div > pd_max) break; /* vco is too big, it has to stop */
/* determine if this vco setting is better than current optimal settings */ if (score < optimal_score) {
*optimal_fb_div = fb_div;
*optimal_vclk_div = vclk_div;
*optimal_dclk_div = dclk_div;
optimal_score = score; if (optimal_score == 0) break; /* it can't get better than this */
}
}
/* did we found a valid setup ? */ if (optimal_score == ~0) return -EINVAL;
return 0;
}
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, unsigned cg_upll_func_cntl)
{ unsigned i;
/* make sure UPLL_CTLREQ is deasserted */
WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
/* wait for CTLACK and CTLACK2 to get asserted */ for (i = 0; i < 100; ++i) {
uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; if ((RREG32(cg_upll_func_cntl) & mask) == mask) break;
mdelay(10);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.