/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse
*/
/** * r100_wait_for_vblank - vblank wait asic callback. * * @rdev: radeon_device pointer * @crtc: crtc to wait for vblank on * * Wait for vblank on the requested crtc (r1xx-r4xx).
*/ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{ unsigned i = 0;
if (crtc >= rdev->num_crtc) return;
if (crtc == 0) { if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) return;
} else { if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) return;
}
/* depending on when we hit vblank, we may be close to active; if so, * wait for another frame.
*/ while (r100_is_in_vblank(rdev, crtc)) { if (i++ % 100 == 0) { if (!r100_is_counter_moving(rdev, crtc)) break;
}
}
while (!r100_is_in_vblank(rdev, crtc)) { if (i++ % 100 == 0) { if (!r100_is_counter_moving(rdev, crtc)) break;
}
}
}
/** * r100_page_flip - pageflip callback. * * @rdev: radeon_device pointer * @crtc_id: crtc to cleanup pageflip on * @crtc_base: new address of the crtc (GPU MC address) * @async: asynchronous flip * * Does the actual pageflip (r1xx-r4xx). * During vblank we take the crtc lock and wait for the update_pending * bit to go high, when it does, we release the lock, and allow the * double buffered update to take place.
*/ void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
{ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
uint32_t crtc_pitch, pitch_pixels; struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; int i;
/* Lock the graphics update lock */ /* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */ for (i = 0; i < rdev->usec_timeout; i++) { if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) break;
udelay(1);
}
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
}
/** * r100_page_flip_pending - check if page flip is still pending * * @rdev: radeon_device pointer * @crtc_id: crtc to check * * Check if the last pagefilp is still pending (r1xx-r4xx). * Returns the current update pending status.
*/ bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
{ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/** * r100_pm_get_dynpm_state - look up dynpm power state callback. * * @rdev: radeon_device pointer * * Look up the optimal power state based on the * current state of the GPU (r1xx-r5xx). * Used for dynpm only.
*/ void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{ int i;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
switch (rdev->pm.dynpm_planned_action) { case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_power_state_index = 0;
rdev->pm.dynpm_can_downclock = false; break; case DYNPM_ACTION_DOWNCLOCK: if (rdev->pm.current_power_state_index == 0) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_downclock = false;
} else { if (rdev->pm.active_crtc_count > 1) { for (i = 0; i < rdev->pm.num_power_states; i++) { if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) continue; elseif (i >= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; break;
} else {
rdev->pm.requested_power_state_index = i; break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
} /* don't use the power state if crtcs are active and no display flag is set */ if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_power_state_index++;
} break; case DYNPM_ACTION_UPCLOCK: if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_upclock = false;
} else { if (rdev->pm.active_crtc_count > 1) { for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) continue; elseif (i <= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; break;
} else {
rdev->pm.requested_power_state_index = i; break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index + 1;
} break; case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.dynpm_can_upclock = false; break; case DYNPM_ACTION_NONE: default:
DRM_ERROR("Requested mode for not defined action\n"); return;
} /* only one clock mode per power state */
rdev->pm.requested_clock_mode_index = 0;
/* * PCI GART
*/ void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
{ /* TODO: can we do somethings here ? */ /* It seems hw only cache one entry so we should discard this * entry otherwise if first GPU GART read hit this entry it
* could end up in wrong address. */
}
int r100_pci_gart_init(struct radeon_device *rdev)
{ int r;
/* Load the microcode for the CP */ staticint r100_cp_init_microcode(struct radeon_device *rdev)
{ constchar *fw_name = NULL; int err;
DRM_DEBUG_KMS("\n");
switch (rdev->family) { case CHIP_R100: case CHIP_RV100: case CHIP_RV200: case CHIP_RS100: case CHIP_RS200:
DRM_INFO("Loading R100 Microcode\n");
fw_name = FIRMWARE_R100; break;
case CHIP_R200: case CHIP_RV250: case CHIP_RV280: case CHIP_RS300:
DRM_INFO("Loading R200 Microcode\n");
fw_name = FIRMWARE_R200; break;
case CHIP_R300: case CHIP_R350: case CHIP_RV350: case CHIP_RV380: case CHIP_RS400: case CHIP_RS480:
DRM_INFO("Loading R300 Microcode\n");
fw_name = FIRMWARE_R300; break;
case CHIP_R420: case CHIP_R423: case CHIP_RV410:
DRM_INFO("Loading R400 Microcode\n");
fw_name = FIRMWARE_R420; break;
case CHIP_RS690: case CHIP_RS740:
DRM_INFO("Loading RS690/RS740 Microcode\n");
fw_name = FIRMWARE_RS690; break;
case CHIP_RS600:
DRM_INFO("Loading RS600 Microcode\n");
fw_name = FIRMWARE_RS600; break;
case CHIP_RV515: case CHIP_R520: case CHIP_RV530: case CHIP_R580: case CHIP_RV560: case CHIP_RV570:
DRM_INFO("Loading R500 Microcode\n");
fw_name = FIRMWARE_R520; break;
default:
DRM_ERROR("Unsupported Radeon family %u\n", rdev->family); return -EINVAL;
}
r100_debugfs_cp_init(rdev); if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev); if (r) {
DRM_ERROR("Failed to load firmware!\n"); return r;
}
}
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2); if (r) { return r;
} /* Each time the cp read 1024 bytes (16 dword/quadword) update
* the rptr copy in system ram */
rb_blksz = 9; /* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
ring->align_mask = 16 - 1; /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64; /* Force CP_RB_WPTR write if written more than one time before the * delay expire
*/
pre_write_limit = 0; /* Setup the cp cache like this (cache size is 96 dwords) : * RING 0 to 15 * INDIRECT1 16 to 79 * INDIRECT2 80 to 95 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords)) * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords)) * Idea being that most of the gpu cmd will be through indirect1 buffer * so it gets the bigger cache.
*/
indirect2_start = 80;
indirect1_start = 16; /* cp setup */
WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
REG_SET(RADEON_MAX_FETCH, max_fetch)); #ifdef __BIG_ENDIAN
tmp |= RADEON_BUF_SWAP_32BIT; #endif
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsignedlong)ring->gpu_addr);
WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); /* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
ring->wptr = 0;
WREG32(RADEON_CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10); /* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
REG_SET(RADEON_INDIRECT1_START, indirect1_start));
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
/* at this point everything should be setup correctly to enable master */
pci_set_master(rdev->pdev);
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r); return r;
}
ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
if (!ring->rptr_save_reg /* not resuming from suspend */
&& radeon_ring_supports_scratch_reg(rdev, ring)) {
r = radeon_scratch_get(rdev, &ring->rptr_save_reg); if (r) {
DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
ring->rptr_save_reg = 0;
}
} return 0;
}
void r100_cp_fini(struct radeon_device *rdev)
{ if (r100_cp_wait_for_idle(rdev)) {
DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
} /* Disable ring */
r100_cp_disable(rdev);
radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
void r100_cp_disable(struct radeon_device *rdev)
{ /* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0); if (r100_gui_wait_for_idle(rdev)) {
pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
}
}
int r100_cs_parse_packet0(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, constunsigned *auth, unsigned n,
radeon_packet0_check_t check)
{ unsigned reg; unsigned i, j, m; unsigned idx; int r;
idx = pkt->idx + 1;
reg = pkt->reg; /* Check that register fall into register range * determined by the number of entry (n) in the * safe register bitmap.
*/ if (pkt->one_reg_wr) { if ((reg >> 7) > n) { return -EINVAL;
}
} else { if (((reg + (pkt->count << 2)) >> 7) > n) { return -EINVAL;
}
} for (i = 0; i <= pkt->count; i++, idx++) {
j = (reg >> 7);
m = 1 << ((reg >> 2) & 31); if (auth[j] & m) {
r = check(p, pkt, idx, reg); if (r) { return r;
}
} if (pkt->one_reg_wr) { if (!(auth[j] & m)) { break;
}
} else {
reg += 4;
}
} return 0;
}
/** * r100_cs_packet_parse_vline() - parse userspace VLINE packet * @p: parser structure holding parsing context. * * Userspace sends a special sequence for VLINE waits. * PACKET0 - VLINE_START_END + value * PACKET0 - WAIT_UNTIL +_value * RELOC (P3) - crtc_id in reloc. * * This function parses this and relocates the VLINE START END * and WAIT UNTIL packets to the correct crtc. * It also detects a switched off crtc and nulls out the * wait in that case.
*/ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{ struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, waitreloc; int crtc_id; int r;
uint32_t header, h_idx, reg; volatile uint32_t *ib;
ib = p->ib.ptr;
/* parse the wait until */
r = radeon_cs_packet_parse(p, &waitreloc, p->idx); if (r) return r;
/* check its a wait until and only 1 count */ if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
DRM_ERROR("vline wait had illegal wait until segment\n"); return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n"); return -EINVAL;
}
/* jump over the NOP */
r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); if (r) return r;
track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; case PACKET3_3D_DRAW_IMMD: if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL;
}
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; /* triggers drawing using in-packet vertex data */ case PACKET3_3D_DRAW_IMMD_2: if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
track->immd_dwords = pkt->count;
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; /* triggers drawing using in-packet vertex data */ case PACKET3_3D_DRAW_VBUF_2:
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; /* triggers drawing of vertex buffers setup elsewhere */ case PACKET3_3D_DRAW_INDX_2:
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; /* triggers drawing using indices to vertex buffer */ case PACKET3_3D_DRAW_VBUF:
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; /* triggers drawing of vertex buffers setup elsewhere */ case PACKET3_3D_DRAW_INDX:
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track); if (r) return r; break; /* triggers drawing using indices to vertex buffer */ case PACKET3_3D_CLEAR_HIZ: case PACKET3_3D_CLEAR_ZMASK: if (p->rdev->hyperz_filp != p->filp) return -EINVAL; break; case PACKET3_NOP: break; default:
DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL;
} return 0;
}
int r100_cs_parse(struct radeon_cs_parser *p)
{ struct radeon_cs_packet pkt; struct r100_cs_track *track; int r;
track = kzalloc(sizeof(*track), GFP_KERNEL); if (!track) return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track; do {
r = radeon_cs_packet_parse(p, &pkt, p->idx); if (r) { return r;
}
p->idx += pkt.count + 2; switch (pkt.type) { case RADEON_PACKET_TYPE0: if (p->rdev->family >= CHIP_R200)
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r100.reg_safe_bm,
p->rdev->config.r100.reg_safe_bm_size,
&r200_packet0_check); else
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r100.reg_safe_bm,
p->rdev->config.r100.reg_safe_bm_size,
&r100_packet0_check); break; case RADEON_PACKET_TYPE2: break; case RADEON_PACKET_TYPE3:
r = r100_packet3_check(p, &pkt); break; default:
DRM_ERROR("Unknown packet type %d !\n",
pkt.type); return -EINVAL;
} if (r) return r;
} while (p->idx < p->chunk_ib->length_dw); return 0;
}
staticvoid r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
DRM_ERROR("compress format %d\n", t->compress_format);
}
staticint r100_track_compress_size(int compress_format, int w, int h)
{ int block_width, block_height, block_bytes; int wblocks, hblocks; int min_wblocks; int sz;
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
staticint r100_cs_track_texture_check(struct radeon_device *rdev, struct r100_cs_track *track)
{ struct radeon_bo *robj; unsignedlong size; unsigned u, i, w, h, d; int ret;
for (u = 0; u < track->num_texture; u++) { if (!track->textures[u].enabled) continue; if (track->textures[u].lookup_disable) continue;
robj = track->textures[u].robj; if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u); return -EINVAL;
}
size = 0; for (i = 0; i <= track->textures[u].num_levels; i++) { if (track->textures[u].use_pitch) { if (rdev->family < CHIP_R300)
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i); else
w = track->textures[u].pitch / (1 << i);
} else {
w = track->textures[u].width; if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
w = w / (1 << i); if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
h = track->textures[u].height; if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
h = h / (1 << i); if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h); if (track->textures[u].tex_coord_type == 1) {
d = (1 << track->textures[u].txdepth) / (1 << i); if (!d)
d = 1;
} else {
d = 1;
} if (track->textures[u].compress_format) {
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d; /* compressed textures are block based */
} else
size += w * h * d;
}
size *= track->textures[u].cpp;
switch (track->textures[u].tex_coord_type) { case 0: case 1: break; case 2: if (track->separate_cube) {
ret = r100_cs_track_cube(rdev, track, u); if (ret) return ret;
} else
size *= 6; break; default:
DRM_ERROR("Invalid texture coordinate type %u for unit " "%u\n", track->textures[u].tex_coord_type, u); return -EINVAL;
} if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is " "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]); return -EINVAL;
}
} return 0;
}
for (i = 0; i < num_cb; i++) { if (track->cb[i].robj == NULL) {
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset; if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d " "(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy); return -EINVAL;
}
}
track->cb_dirty = false;
if (track->zb_dirty && track->z_enabled) { if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n"); return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset; if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer " "(need %lu have %lu) !\n", size,
radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy); return -EINVAL;
}
}
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.