/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse
*/
staticint r200_get_vtx_size_0(uint32_t vtx_fmt_0)
{ int vtx_size, i;
vtx_size = 2;
if (vtx_fmt_0 & R200_VTX_Z0)
vtx_size++; if (vtx_fmt_0 & R200_VTX_W0)
vtx_size++; /* blend weight */ if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7; if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
vtx_size++; if (vtx_fmt_0 & R200_VTX_N0)
vtx_size += 3; if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
vtx_size++; if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
vtx_size++; if (vtx_fmt_0 & R200_VTX_SHININESS_0)
vtx_size++; if (vtx_fmt_0 & R200_VTX_SHININESS_1)
vtx_size++; for (i = 0; i < 8; i++) { int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3; switch (color_size) { case 0: break; case 1: vtx_size++; break; case 2: vtx_size += 3; break; case 3: vtx_size += 4; break;
}
} if (vtx_fmt_0 & R200_VTX_XY1)
vtx_size += 2; if (vtx_fmt_0 & R200_VTX_Z1)
vtx_size++; if (vtx_fmt_0 & R200_VTX_W1)
vtx_size++; if (vtx_fmt_0 & R200_VTX_N1)
vtx_size += 3; return vtx_size;
}
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset, unsigned num_gpu_pages, struct dma_resv *resv)
{ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_fence *fence;
uint32_t size;
uint32_t cur_size; int i, num_loops; int r = 0;
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); return ERR_PTR(r);
} /* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, (1 << 16)); for (i = 0; i < num_loops; i++) {
cur_size = size; if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(ring, PACKET0(0x720, 2));
radeon_ring_write(ring, src_offset);
radeon_ring_write(ring, dst_offset);
radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); if (r) {
radeon_ring_unlock_undo(rdev, ring); return ERR_PTR(r);
}
radeon_ring_unlock_commit(rdev, ring, false); return fence;
}
staticint r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{ int vtx_size, i, tex_size;
vtx_size = 0; for (i = 0; i < 6; i++) {
tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7; if (tex_size > 4) continue;
vtx_size += tex_size;
} return vtx_size;
}
int r200_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg)
{ struct radeon_bo_list *reloc; struct r100_cs_track *track; volatile uint32_t *ib;
uint32_t tmp; int r; int i; int face;
u32 tile_flags = 0;
u32 idx_value;
ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx); switch (reg) { case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p); if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt); return r;
} break; /* FIXME: only allow PACKET3 blit? easier to check for out of
* range access */ case RADEON_DST_PITCH_OFFSET: case RADEON_SRC_PITCH_OFFSET:
r = r100_reloc_pitch_offset(p, pkt, idx, reg); if (r) return r; break; case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt); return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt); return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset); break; case R200_PP_TXOFFSET_0: case R200_PP_TXOFFSET_1: case R200_PP_TXOFFSET_2: case R200_PP_TXOFFSET_3: case R200_PP_TXOFFSET_4: case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt); return r;
} if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE; if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true; break; case R200_PP_CUBIC_OFFSET_F1_0: case R200_PP_CUBIC_OFFSET_F2_0: case R200_PP_CUBIC_OFFSET_F3_0: case R200_PP_CUBIC_OFFSET_F4_0: case R200_PP_CUBIC_OFFSET_F5_0: case R200_PP_CUBIC_OFFSET_F1_1: case R200_PP_CUBIC_OFFSET_F2_1: case R200_PP_CUBIC_OFFSET_F3_1: case R200_PP_CUBIC_OFFSET_F4_1: case R200_PP_CUBIC_OFFSET_F5_1: case R200_PP_CUBIC_OFFSET_F1_2: case R200_PP_CUBIC_OFFSET_F2_2: case R200_PP_CUBIC_OFFSET_F3_2: case R200_PP_CUBIC_OFFSET_F4_2: case R200_PP_CUBIC_OFFSET_F5_2: case R200_PP_CUBIC_OFFSET_F1_3: case R200_PP_CUBIC_OFFSET_F2_3: case R200_PP_CUBIC_OFFSET_F3_3: case R200_PP_CUBIC_OFFSET_F4_3: case R200_PP_CUBIC_OFFSET_F5_3: case R200_PP_CUBIC_OFFSET_F1_4: case R200_PP_CUBIC_OFFSET_F2_4: case R200_PP_CUBIC_OFFSET_F3_4: case R200_PP_CUBIC_OFFSET_F4_4: case R200_PP_CUBIC_OFFSET_F5_4: case R200_PP_CUBIC_OFFSET_F1_5: case R200_PP_CUBIC_OFFSET_F2_5: case R200_PP_CUBIC_OFFSET_F3_5: case R200_PP_CUBIC_OFFSET_F4_5: case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt); return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true; break; case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
track->cb_dirty = true;
track->zb_dirty = true; break; case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt); return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { if (reloc->tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE; if (reloc->tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.