From: Dave Airlie Date: Fri, 20 Mar 2009 00:52:17 +0000 (+1000) Subject: Merge remote branch 'main/master' into radeon-rewrite X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=407e8ae5b167b0193e1e5b1266a5d61ed836dfb5;p=mesa.git Merge remote branch 'main/master' into radeon-rewrite Conflicts: src/mesa/drivers/dri/r300/r300_cmdbuf.c src/mesa/drivers/dri/r300/r300_state.c src/mesa/drivers/dri/r300/r300_swtcl.c src/mesa/drivers/dri/r300/radeon_ioctl.c src/mesa/drivers/dri/radeon/radeon_screen.c --- 407e8ae5b167b0193e1e5b1266a5d61ed836dfb5 diff --cc configure.ac index ea2992d38b0,46070fd73c9..4164d377ce8 --- a/configure.ac +++ b/configure.ac @@@ -443,11 -453,9 +453,11 @@@ AC_SUBST([WINDOW_SYSTEM] AC_SUBST([GALLIUM_DIRS]) AC_SUBST([GALLIUM_WINSYS_DIRS]) AC_SUBST([GALLIUM_WINSYS_DRM_DIRS]) - AC_SUBST([GALLIUM_DRIVER_DIRS]) + AC_SUBST([GALLIUM_DRIVERS_DIRS]) AC_SUBST([GALLIUM_AUXILIARY_DIRS]) AC_SUBST([GALLIUM_STATE_TRACKERS_DIRS]) +AC_SUBST([RADEON_CFLAGS]) +AC_SUBST([RADEON_LDFLAGS]) dnl dnl User supplied program configuration diff --cc src/mesa/drivers/dri/r200/r200_texstate.c index 058e0ee5bec,0ad5651cd47..e9cee1a637d --- a/src/mesa/drivers/dri/r200/r200_texstate.c +++ b/src/mesa/drivers/dri/r200/r200_texstate.c @@@ -1245,8 -1377,9 +1245,8 @@@ static GLboolean r200_validate_texgen( } else { tgcm |= R200_TEXGEN_COMP_T << (unit * 4); } - if (texUnit->TexGenEnabled & R_BIT) { - if (texUnit->GenModeR != mode) + if (texUnit->GenR.Mode != mode) mixed_fallback = GL_TRUE; } else { tgcm |= R200_TEXGEN_COMP_R << (unit * 4); diff --cc src/mesa/drivers/dri/r300/r300_state.c index 17e7b5227ab,8095538ff94..f423029ee68 --- a/src/mesa/drivers/dri/r300/r300_state.c +++ b/src/mesa/drivers/dri/r300/r300_state.c @@@ -1654,31 -1589,27 +1525,27 @@@ static void r300SetupRSUnit(GLcontext } } - if (InputsRead & FRAG_BIT_COL1) { - if (R300_OUTPUTS_WRITTEN_TEST(OutputsWritten, VERT_RESULT_COL1, _TNL_ATTRIB_COLOR1)) { - r300->hw.rr.cmd[R300_RR_INST_1] |= R300_RS_INST_COL_ID(1) | R300_RS_INST_COL_CN_WRITE | (fp_reg++ << R300_RS_INST_COL_ADDR_SHIFT); - InputsRead &= ~FRAG_BIT_COL1; - if (high_rr < 1) - high_rr = 1; - col_interp_nr++; - } else { - WARN_ONCE("fragprog wants col1, vp doesn't provide it\n"); - } + if (InputsRead & FRAG_BIT_WPOS) { + r300->hw.ri.cmd[R300_RI_INTERP_0 + tex_ip] |= R300_RS_SEL_S(0) | R300_RS_SEL_T(1) | R300_RS_SEL_R(2) | R300_RS_SEL_Q(3) | R300_RS_TEX_PTR(rs_tex_count); + r300->hw.rr.cmd[R300_RR_INST_0 + tex_ip] |= R300_RS_INST_TEX_ID(tex_ip) | R300_RS_INST_TEX_CN_WRITE | R300_RS_INST_TEX_ADDR(fp_reg); + InputsRead &= ~FRAG_BIT_WPOS; + rs_tex_count += 4; + ++tex_ip; + ++fp_reg; } + InputsRead &= ~FRAG_BIT_WPOS; - /* Need at least one. This might still lock as the values are undefined... */ - if (rs_tex_count == 0 && col_interp_nr == 0) { - r300->hw.rr.cmd[R300_RR_INST_0] |= R300_RS_INST_COL_ID(0) | R300_RS_INST_COL_CN_WRITE | (fp_reg++ << R300_RS_INST_COL_ADDR_SHIFT); - col_interp_nr++; + /* Setup default color if no color or tex was set */ + if (rs_tex_count == 0 && col_ip == 0) { + r300->hw.rr.cmd[R300_RR_INST_0] = R300_RS_INST_COL_ID(0) | R300_RS_INST_COL_CN_WRITE | R300_RS_INST_COL_ADDR(0) | R300_RS_COL_FMT(R300_RS_COL_FMT_0001); + ++col_ip; } - r300->hw.rc.cmd[1] = 0 | (rs_tex_count << R300_IT_COUNT_SHIFT) - | (col_interp_nr << R300_IC_COUNT_SHIFT) - | R300_HIRES_EN; + high_rr = (col_ip > tex_ip) ? col_ip : tex_ip; + r300->hw.rc.cmd[1] |= (rs_tex_count << R300_IT_COUNT_SHIFT) | (col_ip << R300_IC_COUNT_SHIFT) | R300_HIRES_EN; + r300->hw.rc.cmd[2] |= high_rr - 1; - assert(high_rr >= 0); - r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(r300->radeon.radeonScreen, R300_RS_INST_0, high_rr + 1); - r300->hw.rc.cmd[2] = high_rr; - r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(R300_RS_INST_0, high_rr); ++ r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(r300->radeon.radeonScreen, R300_RS_INST_0, high_rr); if (InputsRead) WARN_ONCE("Don't know how to satisfy InputsRead=0x%08x\n", InputsRead); @@@ -1826,19 -1764,30 +1700,30 @@@ static void r500SetupRSUnit(GLcontext } } - /* Need at least one. This might still lock as the values are undefined... */ - if (in_texcoords == 0 && col_interp_nr == 0) { - r300->hw.rr.cmd[R300_RR_INST_0] |= 0 | R500_RS_INST_COL_CN_WRITE | (fp_reg++ << R500_RS_INST_COL_ADDR_SHIFT); - col_interp_nr++; + if (InputsRead & FRAG_BIT_WPOS) { + r300->hw.ri.cmd[R300_RI_INTERP_0 + tex_ip] |= ((rs_tex_count + 0) << R500_RS_IP_TEX_PTR_S_SHIFT) | + ((rs_tex_count + 1) << R500_RS_IP_TEX_PTR_T_SHIFT) | + ((rs_tex_count + 2) << R500_RS_IP_TEX_PTR_R_SHIFT) | + ((rs_tex_count + 3) << R500_RS_IP_TEX_PTR_Q_SHIFT); + + r300->hw.rr.cmd[R300_RR_INST_0 + tex_ip] |= R500_RS_INST_TEX_ID(tex_ip) | R500_RS_INST_TEX_CN_WRITE | R500_RS_INST_TEX_ADDR(fp_reg); + InputsRead &= ~FRAG_BIT_WPOS; + rs_tex_count += 4; + ++tex_ip; + ++fp_reg; } - r300->hw.rc.cmd[1] = 0 | (in_texcoords << R300_IT_COUNT_SHIFT) - | (col_interp_nr << R300_IC_COUNT_SHIFT) - | R300_HIRES_EN; + /* Setup default color if no color or tex was set */ + if (rs_tex_count == 0 && col_ip == 0) { + r300->hw.rr.cmd[R300_RR_INST_0] |= R500_RS_INST_COL_ID(0) | R500_RS_INST_COL_CN_WRITE | R500_RS_INST_COL_ADDR(0) | R500_RS_COL_FMT(R300_RS_COL_FMT_0001); + ++col_ip; + } + + high_rr = (col_ip > tex_ip) ? col_ip : tex_ip; + r300->hw.rc.cmd[1] |= (rs_tex_count << R300_IT_COUNT_SHIFT) | (col_ip << R300_IC_COUNT_SHIFT) | R300_HIRES_EN; + r300->hw.rc.cmd[2] |= 0xC0 | (high_rr - 1); - assert(high_rr >= 0); - r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(r300->radeon.radeonScreen, R500_RS_INST_0, high_rr + 1); - r300->hw.rc.cmd[2] = 0xC0 | high_rr; - r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(R500_RS_INST_0, high_rr); ++ r300->hw.rr.cmd[R300_RR_CMD_0] = cmdpacket0(r300->radeon.radeonScreen, R500_RS_INST_0, high_rr); if (InputsRead) WARN_ONCE("Don't know how to satisfy InputsRead=0x%08x\n", InputsRead); @@@ -2579,11 -2534,25 +2446,24 @@@ void r300UpdateShaderStates(r300Context GLcontext *ctx; ctx = rmesa->radeon.glCtx; - r300UpdateTextureState(ctx); r300SetEarlyZState(ctx); - GLuint fgdepthsrc = R300_FG_DEPTH_SRC_SCAN; - if (current_fragment_program_writes_depth(ctx)) + /* w_fmt value is set to get best performance + * see p.130 R5xx 3D acceleration guide v1.3 */ + GLuint w_fmt, fgdepthsrc; + if (current_fragment_program_writes_depth(ctx)) { fgdepthsrc = R300_FG_DEPTH_SRC_SHADER; + w_fmt = R300_W_FMT_W24 | R300_W_SRC_US; + } else { + fgdepthsrc = R300_FG_DEPTH_SRC_SCAN; + w_fmt = R300_W_FMT_W0 | R300_W_SRC_US; + } + + if (w_fmt != rmesa->hw.us_out_fmt.cmd[5]) { + R300_STATECHANGE(rmesa, us_out_fmt); + rmesa->hw.us_out_fmt.cmd[5] = w_fmt; + } + if (fgdepthsrc != rmesa->hw.fg_depth_src.cmd[1]) { R300_STATECHANGE(rmesa, fg_depth_src); rmesa->hw.fg_depth_src.cmd[1] = fgdepthsrc; diff --cc src/mesa/drivers/dri/r300/r300_swtcl.c index 153582ce489,d463ab3a2d6..f57516acf41 --- a/src/mesa/drivers/dri/r300/r300_swtcl.c +++ b/src/mesa/drivers/dri/r300/r300_swtcl.c @@@ -203,27 -266,15 +263,28 @@@ static void r300SetVertexFormat( GLcont } R300_NEWPRIM(rmesa); - R300_STATECHANGE(rmesa, vir[0]); - ((drm_r300_cmd_header_t *) rmesa->hw.vir[0].cmd)->packet0.count = + if (rmesa->radeon.radeonScreen->kernel_mm) { + R300_STATECHANGE(rmesa, vir[0]); + rmesa->hw.vir[0].cmd[0] &= 0xC000FFFF; + rmesa->hw.vir[1].cmd[0] &= 0xC000FFFF; + rmesa->hw.vir[0].cmd[0] |= + (r300VAPInputRoute0(&rmesa->hw.vir[0].cmd[R300_VIR_CNTL_0], + VB->AttribPtr, inputs, tab, nr) & 0x3FFF) << 16; + R300_STATECHANGE(rmesa, vir[1]); + rmesa->hw.vir[1].cmd[0] |= + (r300VAPInputRoute1(&rmesa->hw.vir[1].cmd[R300_VIR_CNTL_0], swizzle, + nr) & 0x3FFF) << 16; + } else { + R300_STATECHANGE(rmesa, vir[0]); + ((drm_r300_cmd_header_t *) rmesa->hw.vir[0].cmd)->packet0.count = r300VAPInputRoute0(&rmesa->hw.vir[0].cmd[R300_VIR_CNTL_0], VB->AttribPtr, inputs, tab, nr); - R300_STATECHANGE(rmesa, vir[1]); - ((drm_r300_cmd_header_t *) rmesa->hw.vir[1].cmd)->packet0.count = + R300_STATECHANGE(rmesa, vir[1]); + ((drm_r300_cmd_header_t *) rmesa->hw.vir[1].cmd)->packet0.count = r300VAPInputRoute1(&rmesa->hw.vir[1].cmd[R300_VIR_CNTL_0], swizzle, nr); + } + R300_STATECHANGE(rmesa, vic); rmesa->hw.vic.cmd[R300_VIC_CNTL_0] = r300VAPInputCntl0(ctx, InputsRead); rmesa->hw.vic.cmd[R300_VIC_CNTL_1] = r300VAPInputCntl1(ctx, InputsRead); @@@ -520,9 -638,11 +581,12 @@@ static void r300RenderStart(GLcontext * r300UpdateShaderStates(rmesa); r300EmitCacheFlush(rmesa); + - if (rmesa->dma.flush != 0 && - rmesa->dma.flush != flush_last_swtcl_prim) - rmesa->dma.flush( rmesa ); ++ /* investigate if we can put back flush optimisation if needed */ + if (rmesa->radeon.dma.flush != NULL) { + rmesa->radeon.dma.flush(ctx); + } + } static void r300RenderFinish(GLcontext *ctx) @@@ -628,29 -751,6 +692,28 @@@ void r300EmitVbufPrim(r300ContextPtr rm type = r300PrimitiveType(rmesa, primitive); num_verts = r300NumVerts(rmesa, vertex_nr, primitive); - start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0), 0); - e32(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type); + BEGIN_BATCH(3); + OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0); + OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type); + END_BATCH(); +} + +void r300_swtcl_flush(GLcontext *ctx, uint32_t current_offset) +{ + r300ContextPtr rmesa = R300_CONTEXT(ctx); + + rcommonEnsureCmdBufSpace(&rmesa->radeon, + rmesa->radeon.hw.max_state_size + (12*sizeof(int)), + __FUNCTION__); + radeonEmitState(&rmesa->radeon); + r300EmitVertexAOS(rmesa, + rmesa->radeon.swtcl.vertex_size, + rmesa->radeon.dma.current, + current_offset); + + r300EmitVbufPrim(rmesa, + rmesa->radeon.swtcl.hw_primitive, + rmesa->radeon.swtcl.numverts); + r300EmitCacheFlush(rmesa); + COMMIT_BATCH(); - } diff --cc src/mesa/drivers/dri/radeon/radeon_common.c index 5c34ca89fab,00000000000..840233ff896 mode 100644,000000..100644 --- a/src/mesa/drivers/dri/radeon/radeon_common.c +++ b/src/mesa/drivers/dri/radeon/radeon_common.c @@@ -1,1107 -1,0 +1,1107 @@@ +/************************************************************************** + +Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + +The Weather Channel (TM) funded Tungsten Graphics to develop the +initial release of the Radeon 8500 driver under the XFree86 license. +This notice must be preserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +**************************************************************************/ + +/* + * Authors: + * Keith Whitwell + */ + +/* + - Scissor implementation + - buffer swap/copy ioctls + - finish/flush + - state emission + - cmdbuffer management +*/ + +#include +#include "main/glheader.h" +#include "main/imports.h" +#include "main/context.h" +#include "main/api_arrayelt.h" +#include "main/enums.h" +#include "main/colormac.h" +#include "main/light.h" +#include "main/framebuffer.h" +#include "main/simple_list.h" +#include "main/renderbuffer.h" +#include "swrast/swrast.h" +#include "vbo/vbo.h" +#include "tnl/tnl.h" +#include "tnl/t_pipeline.h" +#include "swrast_setup/swrast_setup.h" + +#include "dri_util.h" +#include "vblank.h" + +#include "radeon_common.h" +#include "radeon_bocs_wrapper.h" +#include "radeon_lock.h" +#include "radeon_drm.h" +#include "radeon_mipmap_tree.h" + +#define DEBUG_CMDBUF 0 + +/* ============================================================= + * Scissoring + */ + +static GLboolean intersect_rect(drm_clip_rect_t * out, + drm_clip_rect_t * a, drm_clip_rect_t * b) +{ + *out = *a; + if (b->x1 > out->x1) + out->x1 = b->x1; + if (b->y1 > out->y1) + out->y1 = b->y1; + if (b->x2 < out->x2) + out->x2 = b->x2; + if (b->y2 < out->y2) + out->y2 = b->y2; + if (out->x1 >= out->x2) + return GL_FALSE; + if (out->y1 >= out->y2) + return GL_FALSE; + return GL_TRUE; +} + +void radeonRecalcScissorRects(radeonContextPtr radeon) +{ + drm_clip_rect_t *out; + int i; + + /* Grow cliprect store? + */ + if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) { + while (radeon->state.scissor.numAllocedClipRects < + radeon->numClipRects) { + radeon->state.scissor.numAllocedClipRects += 1; /* zero case */ + radeon->state.scissor.numAllocedClipRects *= 2; + } + + if (radeon->state.scissor.pClipRects) + FREE(radeon->state.scissor.pClipRects); + + radeon->state.scissor.pClipRects = + MALLOC(radeon->state.scissor.numAllocedClipRects * + sizeof(drm_clip_rect_t)); + + if (radeon->state.scissor.pClipRects == NULL) { + radeon->state.scissor.numAllocedClipRects = 0; + return; + } + } + + out = radeon->state.scissor.pClipRects; + radeon->state.scissor.numClipRects = 0; + + for (i = 0; i < radeon->numClipRects; i++) { + if (intersect_rect(out, + &radeon->pClipRects[i], + &radeon->state.scissor.rect)) { + radeon->state.scissor.numClipRects++; + out++; + } + } +} + +static void radeon_get_cliprects(radeonContextPtr radeon, + struct drm_clip_rect **cliprects, + unsigned int *num_cliprects, + int *x_off, int *y_off) +{ + __DRIdrawablePrivate *dPriv = radeon->dri.drawable; + struct radeon_framebuffer *rfb = dPriv->driverPrivate; + + if (radeon->constant_cliprect) { + radeon->fboRect.x1 = 0; + radeon->fboRect.y1 = 0; + radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width; + radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height; + + *cliprects = &radeon->fboRect; + *num_cliprects = 1; + *x_off = 0; + *y_off = 0; + } else if (radeon->front_cliprects || + rfb->pf_active || dPriv->numBackClipRects == 0) { + *cliprects = dPriv->pClipRects; + *num_cliprects = dPriv->numClipRects; + *x_off = dPriv->x; + *y_off = dPriv->y; + } else { + *num_cliprects = dPriv->numBackClipRects; + *cliprects = dPriv->pBackClipRects; + *x_off = dPriv->backX; + *y_off = dPriv->backY; + } +} + +/** + * Update cliprects and scissors. + */ +void radeonSetCliprects(radeonContextPtr radeon) +{ + __DRIdrawablePrivate *const drawable = radeon->dri.drawable; + __DRIdrawablePrivate *const readable = radeon->dri.readable; + struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate; + struct radeon_framebuffer *const read_rfb = readable->driverPrivate; + int x_off, y_off; + + radeon_get_cliprects(radeon, &radeon->pClipRects, + &radeon->numClipRects, &x_off, &y_off); + + if ((draw_rfb->base.Width != drawable->w) || + (draw_rfb->base.Height != drawable->h)) { + _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base, + drawable->w, drawable->h); + draw_rfb->base.Initialized = GL_TRUE; + } + + if (drawable != readable) { + if ((read_rfb->base.Width != readable->w) || + (read_rfb->base.Height != readable->h)) { + _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base, + readable->w, readable->h); + read_rfb->base.Initialized = GL_TRUE; + } + } + + if (radeon->state.scissor.enabled) + radeonRecalcScissorRects(radeon); + +} + + + +void radeonUpdateScissor( GLcontext *ctx ) +{ + radeonContextPtr rmesa = RADEON_CONTEXT(ctx); + + if ( rmesa->dri.drawable ) { + __DRIdrawablePrivate *dPriv = rmesa->dri.drawable; + + int x = ctx->Scissor.X; + int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height; + int w = ctx->Scissor.X + ctx->Scissor.Width - 1; + int h = dPriv->h - ctx->Scissor.Y - 1; + + rmesa->state.scissor.rect.x1 = x + dPriv->x; + rmesa->state.scissor.rect.y1 = y + dPriv->y; + rmesa->state.scissor.rect.x2 = w + dPriv->x + 1; + rmesa->state.scissor.rect.y2 = h + dPriv->y + 1; + + radeonRecalcScissorRects( rmesa ); + } +} + +/* ============================================================= + * Scissoring + */ + +void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h) +{ + radeonContextPtr radeon = RADEON_CONTEXT(ctx); + if (ctx->Scissor.Enabled) { + /* We don't pipeline cliprect changes */ + radeon_firevertices(radeon); + radeonUpdateScissor(ctx); + } +} + + +/* ================================================================ + * SwapBuffers with client-side throttling + */ + +static uint32_t radeonGetLastFrame(radeonContextPtr radeon) +{ + drm_radeon_getparam_t gp; + int ret; - uint32_t frame; ++ uint32_t frame = 0; + + gp.param = RADEON_PARAM_LAST_FRAME; + gp.value = (int *)&frame; + ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM, + &gp, sizeof(gp)); + if (ret) { + fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, + ret); + exit(1); + } + + return frame; +} + +uint32_t radeonGetAge(radeonContextPtr radeon) +{ + drm_radeon_getparam_t gp; + int ret; + uint32_t age; + + gp.param = RADEON_PARAM_LAST_CLEAR; + gp.value = (int *)&age; + ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM, + &gp, sizeof(gp)); + if (ret) { + fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, + ret); + exit(1); + } + + return age; +} + +static void radeonEmitIrqLocked(radeonContextPtr radeon) +{ + drm_radeon_irq_emit_t ie; + int ret; + + ie.irq_seq = &radeon->iw.irq_seq; + ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT, + &ie, sizeof(ie)); + if (ret) { + fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__, + ret); + exit(1); + } +} + +static void radeonWaitIrq(radeonContextPtr radeon) +{ + int ret; + + do { + ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT, + &radeon->iw, sizeof(radeon->iw)); + } while (ret && (errno == EINTR || errno == EBUSY)); + + if (ret) { + fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, + ret); + exit(1); + } +} + +static void radeonWaitForFrameCompletion(radeonContextPtr radeon) +{ + drm_radeon_sarea_t *sarea = radeon->sarea; + + if (radeon->do_irqs) { + if (radeonGetLastFrame(radeon) < sarea->last_frame) { + if (!radeon->irqsEmitted) { + while (radeonGetLastFrame(radeon) < + sarea->last_frame) ; + } else { + UNLOCK_HARDWARE(radeon); + radeonWaitIrq(radeon); + LOCK_HARDWARE(radeon); + } + radeon->irqsEmitted = 10; + } + + if (radeon->irqsEmitted) { + radeonEmitIrqLocked(radeon); + radeon->irqsEmitted--; + } + } else { + while (radeonGetLastFrame(radeon) < sarea->last_frame) { + UNLOCK_HARDWARE(radeon); + if (radeon->do_usleeps) + DO_USLEEP(1); + LOCK_HARDWARE(radeon); + } + } +} + +/* wait for idle */ +void radeonWaitForIdleLocked(radeonContextPtr radeon) +{ + int ret; + int i = 0; + + do { + ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE); + if (ret) + DO_USLEEP(1); + } while (ret && ++i < 100); + + if (ret < 0) { + UNLOCK_HARDWARE(radeon); + fprintf(stderr, "Error: R300 timed out... exiting\n"); + exit(-1); + } +} + +static void radeonWaitForIdle(radeonContextPtr radeon) +{ + LOCK_HARDWARE(radeon); + radeonWaitForIdleLocked(radeon); + UNLOCK_HARDWARE(radeon); +} + +static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb) +{ + int current_page = rfb->pf_current_page; + int next_page = (current_page + 1) % rfb->pf_num_pages; + struct gl_renderbuffer *tmp_rb; + + /* Exchange renderbuffers if necessary but make sure their + * reference counts are preserved. + */ + if (rfb->color_rb[current_page] && + rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer != + &rfb->color_rb[current_page]->base) { + tmp_rb = NULL; + _mesa_reference_renderbuffer(&tmp_rb, + rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer); + tmp_rb = &rfb->color_rb[current_page]->base; + _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb); + _mesa_reference_renderbuffer(&tmp_rb, NULL); + } + + if (rfb->color_rb[next_page] && + rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer != + &rfb->color_rb[next_page]->base) { + tmp_rb = NULL; + _mesa_reference_renderbuffer(&tmp_rb, + rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer); + tmp_rb = &rfb->color_rb[next_page]->base; + _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb); + _mesa_reference_renderbuffer(&tmp_rb, NULL); + } +} + +/* Copy the back color buffer to the front color buffer. + */ +void radeonCopyBuffer( __DRIdrawablePrivate *dPriv, + const drm_clip_rect_t *rect) +{ + radeonContextPtr rmesa; + struct radeon_framebuffer *rfb; + GLint nbox, i, ret; + + assert(dPriv); + assert(dPriv->driContextPriv); + assert(dPriv->driContextPriv->driverPrivate); + + rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; + + LOCK_HARDWARE(rmesa); + + rfb = dPriv->driverPrivate; + + if ( RADEON_DEBUG & DEBUG_IOCTL ) { + fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx ); + } + + nbox = dPriv->numClipRects; /* must be in locked region */ + + for ( i = 0 ; i < nbox ; ) { + GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox ); + drm_clip_rect_t *box = dPriv->pClipRects; + drm_clip_rect_t *b = rmesa->sarea->boxes; + GLint n = 0; + + for ( ; i < nr ; i++ ) { + + *b = box[i]; + + if (rect) + { + if (rect->x1 > b->x1) + b->x1 = rect->x1; + if (rect->y1 > b->y1) + b->y1 = rect->y1; + if (rect->x2 < b->x2) + b->x2 = rect->x2; + if (rect->y2 < b->y2) + b->y2 = rect->y2; + + if (b->x1 >= b->x2 || b->y1 >= b->y2) + continue; + } + + b++; + n++; + } + rmesa->sarea->nbox = n; + + if (!n) + continue; + + ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP ); + + if ( ret ) { + fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret ); + UNLOCK_HARDWARE( rmesa ); + exit( 1 ); + } + } + + UNLOCK_HARDWARE( rmesa ); +} + +static int radeonScheduleSwap(__DRIdrawablePrivate *dPriv, GLboolean *missed_target) +{ + radeonContextPtr rmesa; + + rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; + radeon_firevertices(rmesa); + + LOCK_HARDWARE( rmesa ); + + if (!dPriv->numClipRects) { + UNLOCK_HARDWARE(rmesa); + usleep(10000); /* throttle invisible client 10ms */ + return 0; + } + + radeonWaitForFrameCompletion(rmesa); + + UNLOCK_HARDWARE(rmesa); + driWaitForVBlank(dPriv, missed_target); + + return 0; +} + +static GLboolean radeonPageFlip( __DRIdrawablePrivate *dPriv ) +{ + radeonContextPtr radeon; + GLint ret; + __DRIscreenPrivate *psp; + struct radeon_renderbuffer *rrb; + struct radeon_framebuffer *rfb; + + assert(dPriv); + assert(dPriv->driContextPriv); + assert(dPriv->driContextPriv->driverPrivate); + + radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; + rfb = dPriv->driverPrivate; + rrb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer; + + psp = dPriv->driScreenPriv; + + LOCK_HARDWARE(radeon); + + if ( RADEON_DEBUG & DEBUG_IOCTL ) { + fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__, + radeon->sarea->pfCurrentPage, radeon->sarea->pfState); + } + drm_clip_rect_t *box = dPriv->pClipRects; + drm_clip_rect_t *b = radeon->sarea->boxes; + b[0] = box[0]; + radeon->sarea->nbox = 1; + + ret = drmCommandNone( radeon->dri.fd, DRM_RADEON_FLIP ); + + UNLOCK_HARDWARE(radeon); + + if ( ret ) { + fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret ); + return GL_FALSE; + } + + if (!rfb->pf_active) + return GL_FALSE; + + rfb->pf_current_page = radeon->sarea->pfCurrentPage; + radeon_flip_renderbuffers(rfb); + radeon_draw_buffer(radeon->glCtx, &rfb->base); + + return GL_TRUE; +} + + +/** + * Swap front and back buffer. + */ +void radeonSwapBuffers(__DRIdrawablePrivate * dPriv) +{ + int64_t ust; + __DRIscreenPrivate *psp; + + if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) { + radeonContextPtr radeon; + GLcontext *ctx; + + radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; + ctx = radeon->glCtx; + + if (ctx->Visual.doubleBufferMode) { + GLboolean missed_target; + struct radeon_framebuffer *rfb = dPriv->driverPrivate; + _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */ + + radeonScheduleSwap(dPriv, &missed_target); + + if (rfb->pf_active) { + radeonPageFlip(dPriv); + } else { + radeonCopyBuffer(dPriv, NULL); + } + + psp = dPriv->driScreenPriv; + + rfb->swap_count++; + (*psp->systemTime->getUST)( & ust ); + if ( missed_target ) { + rfb->swap_missed_count++; + rfb->swap_missed_ust = ust - rfb->swap_ust; + } + + rfb->swap_ust = ust; + radeon->hw.all_dirty = GL_TRUE; + } + } else { + /* XXX this shouldn't be an error but we can't handle it for now */ + _mesa_problem(NULL, "%s: drawable has no context!", + __FUNCTION__); + } +} + +void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv, + int x, int y, int w, int h ) +{ + if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) { + radeonContextPtr radeon; + GLcontext *ctx; + + radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; + ctx = radeon->glCtx; + + if (ctx->Visual.doubleBufferMode) { + drm_clip_rect_t rect; + rect.x1 = x + dPriv->x; + rect.y1 = (dPriv->h - y - h) + dPriv->y; + rect.x2 = rect.x1 + w; + rect.y2 = rect.y1 + h; + _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */ + radeonCopyBuffer(dPriv, &rect); + } + } else { + /* XXX this shouldn't be an error but we can't handle it for now */ + _mesa_problem(NULL, "%s: drawable has no context!", + __FUNCTION__); + } +} + +void radeon_draw_buffer(GLcontext *ctx, struct gl_framebuffer *fb) +{ + radeonContextPtr radeon = RADEON_CONTEXT(ctx); + struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL, + *rrbColor = NULL; + + + if (!fb) { + /* this can happen during the initial context initialization */ + return; + } + + /* radeons only handle 1 color draw so far */ + if (fb->_NumColorDrawBuffers != 1) { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE); + return; + } + + /* Do this here, note core Mesa, since this function is called from + * many places within the driver. + */ + if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) { + /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */ + _mesa_update_framebuffer(ctx); + /* this updates the DrawBuffer's Width/Height if it's a FBO */ + _mesa_update_draw_buffer_bounds(ctx); + } + + if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) { + /* this may occur when we're called by glBindFrameBuffer() during + * the process of someone setting up renderbuffers, etc. + */ + /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/ + return; + } + + if (fb->Name) + ;/* do something depthy/stencily TODO */ + + + /* none */ + if (fb->Name == 0) { + if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) { + rrbColor = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer; + radeon->front_cliprects = GL_TRUE; + } else { + rrbColor = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer; + radeon->front_cliprects = GL_FALSE; + } + } else { + /* user FBO in theory */ + struct radeon_renderbuffer *rrb; + rrb = (void *)fb->_ColorDrawBuffers[0]; + rrbColor = rrb; + radeon->constant_cliprect = GL_TRUE; + } + + if (rrbColor == NULL) + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE); + else + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE); + + + + if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) { + rrbDepth = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped; + if (rrbDepth && rrbDepth->bo) { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE); + } else { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE); + } + } else { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE); + rrbDepth = NULL; + } + + /* TODO stencil things */ + if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) { + rrbStencil = (struct radeon_renderbuffer *)fb->_DepthBuffer->Wrapped; + if (rrbStencil && rrbStencil->bo) { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE); + /* need to re-compute stencil hw state */ + if (ctx->Driver.Enable != NULL) + ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled); + else + ctx->NewState |= _NEW_STENCIL; + if (!rrbDepth) + rrbDepth = rrbStencil; + } else { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE); + } + } else { + radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE); + if (ctx->Driver.Enable != NULL) + ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled); + else + ctx->NewState |= _NEW_STENCIL; + } + + /* Update culling direction which changes depending on the + * orientation of the buffer: + */ + if (ctx->Driver.FrontFace) + ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace); + else + ctx->NewState |= _NEW_POLYGON; + + /* + * Update depth test state + */ + if (ctx->Driver.Enable) { + if (ctx->Depth.Test && fb->Visual.depthBits > 0) { + ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_TRUE); + } else { + ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_FALSE); + } + } else { + ctx->NewState |= _NEW_DEPTH; + } + + radeon->state.depth.rrb = rrbDepth; + + radeon->state.color.rrb = rrbColor; + + /* update viewport since it depends on window size */ + if (ctx->Driver.Viewport) { + ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y, + ctx->Viewport.Width, ctx->Viewport.Height); + } else { + ctx->NewState |= _NEW_VIEWPORT; + } + + /* Set state we know depends on drawable parameters: + */ + if (ctx->Driver.Scissor) + ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y, + ctx->Scissor.Width, ctx->Scissor.Height); + radeon->NewGLState |= _NEW_SCISSOR; +} + +/** + * Called via glDrawBuffer. + */ +void radeonDrawBuffer( GLcontext *ctx, GLenum mode ) +{ + radeonContextPtr radeon = RADEON_CONTEXT(ctx); + + if (RADEON_DEBUG & DEBUG_DRI) + fprintf(stderr, "%s %s\n", __FUNCTION__, + _mesa_lookup_enum_by_nr( mode )); + + radeon_firevertices(radeon); /* don't pipeline cliprect changes */ + + radeon_draw_buffer(ctx, ctx->DrawBuffer); +} + +void radeonReadBuffer( GLcontext *ctx, GLenum mode ) +{ + /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */ + if (ctx->ReadBuffer == ctx->DrawBuffer) { + /* This will update FBO completeness status. + * A framebuffer will be incomplete if the GL_READ_BUFFER setting + * refers to a missing renderbuffer. Calling glReadBuffer can set + * that straight and can make the drawing buffer complete. + */ + radeon_draw_buffer(ctx, ctx->DrawBuffer); + } +} + + +/* Turn on/off page flipping according to the flags in the sarea: + */ +void radeonUpdatePageFlipping(radeonContextPtr radeon) +{ + struct radeon_framebuffer *rfb = radeon->dri.drawable->driverPrivate; + + rfb->pf_active = radeon->sarea->pfState; + rfb->pf_current_page = radeon->sarea->pfCurrentPage; + rfb->pf_num_pages = 2; + radeon_flip_renderbuffers(rfb); + radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer); +} + +void radeon_window_moved(radeonContextPtr radeon) +{ + GLcontext *ctx = radeon->glCtx; + __DRIdrawablePrivate *dPriv = radeon->dri.drawable; + struct radeon_framebuffer *rfb = dPriv->driverPrivate; + + if (!radeon->radeonScreen->driScreen->dri2.enabled) { + radeonUpdatePageFlipping(radeon); + } + radeonSetCliprects(radeon); +} + +void radeon_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height) +{ + radeonContextPtr radeon = RADEON_CONTEXT(ctx); + __DRIcontext *driContext = radeon->dri.context; + void (*old_viewport)(GLcontext *ctx, GLint x, GLint y, + GLsizei w, GLsizei h); + + if (!driContext->driScreenPriv->dri2.enabled) + return; + + radeon_update_renderbuffers(driContext, driContext->driDrawablePriv); + if (driContext->driDrawablePriv != driContext->driReadablePriv) + radeon_update_renderbuffers(driContext, driContext->driReadablePriv); + + old_viewport = ctx->Driver.Viewport; + ctx->Driver.Viewport = NULL; + radeon->dri.drawable = driContext->driDrawablePriv; + radeon_window_moved(radeon); + radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer); + ctx->Driver.Viewport = old_viewport; + + +} +static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state ) +{ + int i; + int dwords = (*state->check)(radeon->glCtx, state); + + fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords); + + if (RADEON_DEBUG & DEBUG_VERBOSE) + for (i = 0 ; i < dwords; i++) + fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]); + +} + +static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty) +{ + BATCH_LOCALS(radeon); + struct radeon_state_atom *atom; + int dwords; + + if (radeon->vtbl.pre_emit_atoms) + radeon->vtbl.pre_emit_atoms(radeon); + + /* Emit actual atoms */ + foreach(atom, &radeon->hw.atomlist) { + if ((atom->dirty || radeon->hw.all_dirty) == dirty) { + dwords = (*atom->check) (radeon->glCtx, atom); + if (dwords) { + if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) { + radeon_print_state_atom(radeon, atom); + } + if (atom->emit) { + (*atom->emit)(radeon->glCtx, atom); + } else { + BEGIN_BATCH_NO_AUTOSTATE(dwords); + OUT_BATCH_TABLE(atom->cmd, dwords); + END_BATCH(); + } + atom->dirty = GL_FALSE; + } else { + if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) { + fprintf(stderr, " skip state %s\n", + atom->name); + } + } + } + } + + COMMIT_BATCH(); +} + +void radeonEmitState(radeonContextPtr radeon) +{ + if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS)) + fprintf(stderr, "%s\n", __FUNCTION__); + + if (radeon->vtbl.pre_emit_state) + radeon->vtbl.pre_emit_state(radeon); + + /* this code used to return here but now it emits zbs */ + if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty) + return; + + /* To avoid going across the entire set of states multiple times, just check + * for enough space for the case of emitting all state, and inline the + * radeonAllocCmdBuf code here without all the checks. + */ + rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__); + + if (!radeon->cmdbuf.cs->cdw) { + if (RADEON_DEBUG & DEBUG_STATE) + fprintf(stderr, "Begin reemit state\n"); + + radeonEmitAtoms(radeon, GL_FALSE); + } + + if (RADEON_DEBUG & DEBUG_STATE) + fprintf(stderr, "Begin dirty state\n"); + + radeonEmitAtoms(radeon, GL_TRUE); + radeon->hw.is_dirty = GL_FALSE; + radeon->hw.all_dirty = GL_FALSE; + +} + + +void radeonFlush(GLcontext *ctx) +{ + radeonContextPtr radeon = RADEON_CONTEXT(ctx); + if (RADEON_DEBUG & DEBUG_IOCTL) + fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw); + + if (radeon->dma.flush) + radeon->dma.flush( ctx ); + + radeonEmitState(radeon); + + if (radeon->cmdbuf.cs->cdw) + rcommonFlushCmdBuf(radeon, __FUNCTION__); +} + +/* Make sure all commands have been sent to the hardware and have + * completed processing. + */ +void radeonFinish(GLcontext * ctx) +{ + radeonContextPtr radeon = RADEON_CONTEXT(ctx); + struct gl_framebuffer *fb = ctx->DrawBuffer; + int i; + + radeonFlush(ctx); + + if (radeon->radeonScreen->kernel_mm) { + for (i = 0; i < fb->_NumColorDrawBuffers; i++) { + struct radeon_renderbuffer *rrb; + rrb = (struct radeon_renderbuffer *)fb->_ColorDrawBuffers[i]; + if (rrb->bo) + radeon_bo_wait(rrb->bo); + } + } else if (radeon->do_irqs) { + LOCK_HARDWARE(radeon); + radeonEmitIrqLocked(radeon); + UNLOCK_HARDWARE(radeon); + radeonWaitIrq(radeon); + } else { + radeonWaitForIdle(radeon); + } +} + +/* cmdbuffer */ +/** + * Send the current command buffer via ioctl to the hardware. + */ +int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller) +{ + int ret = 0; + + if (rmesa->cmdbuf.flushing) { + fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n"); + exit(-1); + } + rmesa->cmdbuf.flushing = 1; + + if (RADEON_DEBUG & DEBUG_IOCTL) { + fprintf(stderr, "%s from %s - %i cliprects\n", + __FUNCTION__, caller, rmesa->numClipRects); + } + + if (rmesa->cmdbuf.cs->cdw) { + ret = radeon_cs_emit(rmesa->cmdbuf.cs); + rmesa->hw.all_dirty = GL_TRUE; + } + radeon_cs_erase(rmesa->cmdbuf.cs); + rmesa->cmdbuf.flushing = 0; + return ret; +} + +int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller) +{ + int ret; + + radeonReleaseDmaRegion(rmesa); + + LOCK_HARDWARE(rmesa); + ret = rcommonFlushCmdBufLocked(rmesa, caller); + UNLOCK_HARDWARE(rmesa); + + if (ret) { + fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret); + _mesa_exit(ret); + } + + return ret; +} + +/** + * Make sure that enough space is available in the command buffer + * by flushing if necessary. + * + * \param dwords The number of dwords we need to be free on the command buffer + */ +void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller) +{ + if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size || + radeon_cs_need_flush(rmesa->cmdbuf.cs)) { + rcommonFlushCmdBuf(rmesa, caller); + } +} + +void rcommonInitCmdBuf(radeonContextPtr rmesa) +{ + GLuint size; + /* Initialize command buffer */ + size = 256 * driQueryOptioni(&rmesa->optionCache, + "command_buffer_size"); + if (size < 2 * rmesa->hw.max_state_size) { + size = 2 * rmesa->hw.max_state_size + 65535; + } + if (size > 64 * 256) + size = 64 * 256; + + if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) { + fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n", + sizeof(drm_r300_cmd_header_t)); + fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", + sizeof(drm_radeon_cmd_buffer_t)); + fprintf(stderr, + "Allocating %d bytes command buffer (max state is %d bytes)\n", + size * 4, rmesa->hw.max_state_size * 4); + } + + if (rmesa->radeonScreen->kernel_mm) { + int fd = rmesa->radeonScreen->driScreen->fd; + rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd); + } else { + rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa); + } + if (rmesa->cmdbuf.csm == NULL) { + /* FIXME: fatal error */ + return; + } + rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size); + assert(rmesa->cmdbuf.cs != NULL); + rmesa->cmdbuf.size = size; + + if (!rmesa->radeonScreen->kernel_mm) { + radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]); + radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size); + } else { + struct drm_radeon_gem_info mminfo; + + if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo))) + { + radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_size); + radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size); + } + } + +} +/** + * Destroy the command buffer + */ +void rcommonDestroyCmdBuf(radeonContextPtr rmesa) +{ + radeon_cs_destroy(rmesa->cmdbuf.cs); + if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) { + radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm); + } else { + radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm); + } +} + +void rcommonBeginBatch(radeonContextPtr rmesa, int n, + int dostate, + const char *file, + const char *function, + int line) +{ + rcommonEnsureCmdBufSpace(rmesa, n, function); + if (!rmesa->cmdbuf.cs->cdw && dostate) { + if (RADEON_DEBUG & DEBUG_IOCTL) + fprintf(stderr, "Reemit state after flush (from %s)\n", function); + radeonEmitState(rmesa); + } + radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line); + + if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL) + fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n", + n, rmesa->cmdbuf.cs->cdw, function, line); + +} + + + diff --cc src/mesa/drivers/dri/radeon/radeon_screen.c index 02101978ac4,e964feb9cc4..1d4f008cbc7 --- a/src/mesa/drivers/dri/radeon/radeon_screen.c +++ b/src/mesa/drivers/dri/radeon/radeon_screen.c @@@ -714,162 -819,9 +714,162 @@@ static int radeon_set_screen_flags(rade default: fprintf(stderr, "unknown chip id 0x%x, can't guess.\n", - dri_priv->deviceID); + device_id); + return -1; + } + + return 0; +} + + +/* Create the device specific screen private data struct. + */ +static radeonScreenPtr +radeonCreateScreen( __DRIscreenPrivate *sPriv ) +{ + radeonScreenPtr screen; + RADEONDRIPtr dri_priv = (RADEONDRIPtr)sPriv->pDevPriv; + unsigned char *RADEONMMIO = NULL; + int i; + int ret; - uint32_t temp; ++ uint32_t temp = 0; + + if (sPriv->devPrivSize != sizeof(RADEONDRIRec)) { + fprintf(stderr,"\nERROR! sizeof(RADEONDRIRec) does not match passed size from device driver\n"); + return GL_FALSE; + } + + /* Allocate the private area */ + screen = (radeonScreenPtr) CALLOC( sizeof(*screen) ); + if ( !screen ) { + __driUtilMessage("%s: Could not allocate memory for screen structure", + __FUNCTION__); return NULL; } + +#if DO_DEBUG && RADEON_COMMON && defined(RADEON_COMMON_FOR_R300) + RADEON_DEBUG = driParseDebugString(getenv("RADEON_DEBUG"), debug_control); +#endif + + /* parse information in __driConfigOptions */ + driParseOptionInfo (&screen->optionCache, + __driConfigOptions, __driNConfigOptions); + + /* This is first since which regions we map depends on whether or + * not we are using a PCI card. + */ + screen->card_type = (dri_priv->IsPCI ? RADEON_CARD_PCI : RADEON_CARD_AGP); + { + int ret; + +#ifdef RADEON_PARAM_KERNEL_MM + ret = radeonGetParam( sPriv->fd, RADEON_PARAM_KERNEL_MM, + &screen->kernel_mm); + + if (ret && ret != -EINVAL) { + FREE( screen ); + fprintf(stderr, "drm_radeon_getparam_t (RADEON_OFFSET): %d\n", ret); + return NULL; + } + + if (ret == -EINVAL) + screen->kernel_mm = 0; +#endif + + ret = radeonGetParam( sPriv->fd, RADEON_PARAM_GART_BUFFER_OFFSET, + &screen->gart_buffer_offset); + + if (ret) { + FREE( screen ); + fprintf(stderr, "drm_radeon_getparam_t (RADEON_PARAM_GART_BUFFER_OFFSET): %d\n", ret); + return NULL; + } + + ret = radeonGetParam( sPriv->fd, RADEON_PARAM_GART_BASE, + &screen->gart_base); + if (ret) { + FREE( screen ); + fprintf(stderr, "drm_radeon_getparam_t (RADEON_PARAM_GART_BASE): %d\n", ret); + return NULL; + } + + ret = radeonGetParam( sPriv->fd, RADEON_PARAM_IRQ_NR, + &screen->irq); + if (ret) { + FREE( screen ); + fprintf(stderr, "drm_radeon_getparam_t (RADEON_PARAM_IRQ_NR): %d\n", ret); + return NULL; + } + screen->drmSupportsCubeMapsR200 = (sPriv->drm_version.minor >= 7); + screen->drmSupportsBlendColor = (sPriv->drm_version.minor >= 11); + screen->drmSupportsTriPerf = (sPriv->drm_version.minor >= 16); + screen->drmSupportsFragShader = (sPriv->drm_version.minor >= 18); + screen->drmSupportsPointSprites = (sPriv->drm_version.minor >= 13); + screen->drmSupportsCubeMapsR100 = (sPriv->drm_version.minor >= 15); + screen->drmSupportsVertexProgram = (sPriv->drm_version.minor >= 25); + } + + if (!screen->kernel_mm) { + screen->mmio.handle = dri_priv->registerHandle; + screen->mmio.size = dri_priv->registerSize; + if ( drmMap( sPriv->fd, + screen->mmio.handle, + screen->mmio.size, + &screen->mmio.map ) ) { + FREE( screen ); + __driUtilMessage("%s: drmMap failed\n", __FUNCTION__ ); + return NULL; + } + + RADEONMMIO = screen->mmio.map; + + screen->status.handle = dri_priv->statusHandle; + screen->status.size = dri_priv->statusSize; + if ( drmMap( sPriv->fd, + screen->status.handle, + screen->status.size, + &screen->status.map ) ) { + drmUnmap( screen->mmio.map, screen->mmio.size ); + FREE( screen ); + __driUtilMessage("%s: drmMap (2) failed\n", __FUNCTION__ ); + return NULL; + } + screen->scratch = (__volatile__ uint32_t *) + ((GLubyte *)screen->status.map + RADEON_SCRATCH_REG_OFFSET); + + screen->buffers = drmMapBufs( sPriv->fd ); + if ( !screen->buffers ) { + drmUnmap( screen->status.map, screen->status.size ); + drmUnmap( screen->mmio.map, screen->mmio.size ); + FREE( screen ); + __driUtilMessage("%s: drmMapBufs failed\n", __FUNCTION__ ); + return NULL; + } + + if ( dri_priv->gartTexHandle && dri_priv->gartTexMapSize ) { + screen->gartTextures.handle = dri_priv->gartTexHandle; + screen->gartTextures.size = dri_priv->gartTexMapSize; + if ( drmMap( sPriv->fd, + screen->gartTextures.handle, + screen->gartTextures.size, + (drmAddressPtr)&screen->gartTextures.map ) ) { + drmUnmapBufs( screen->buffers ); + drmUnmap( screen->status.map, screen->status.size ); + drmUnmap( screen->mmio.map, screen->mmio.size ); + FREE( screen ); + __driUtilMessage("%s: drmMap failed for GART texture area\n", __FUNCTION__); + return NULL; + } + + screen->gart_texture_offset = dri_priv->gartTexOffset + screen->gart_base; + } + } + + + ret = radeon_set_screen_flags(screen, dri_priv->deviceID); + if (ret == -1) + return NULL; + if ((screen->chip_family == CHIP_FAMILY_R350 || screen->chip_family == CHIP_FAMILY_R300) && sPriv->ddx_version.minor < 2) { fprintf(stderr, "xf86-video-ati-6.6.2 or newer needed for Radeon 9500/9700/9800 cards.\n"); @@@ -1340,26 -1117,7 +1340,26 @@@ radeonCreateBuffer( __DRIscreenPrivate static void radeonDestroyBuffer(__DRIdrawablePrivate *driDrawPriv) { - _mesa_reference_framebuffer((GLframebuffer **)(&(driDrawPriv->driverPrivate)), NULL); + struct radeon_renderbuffer *rb; + struct radeon_framebuffer *rfb; + + rfb = (void*)driDrawPriv->driverPrivate; + rb = (void *)rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer; + if (rb && rb->bo) { + radeon_bo_unref(rb->bo); + rb->bo = NULL; + } + rb = (void *)rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer; + if (rb && rb->bo) { + radeon_bo_unref(rb->bo); + rb->bo = NULL; + } + rb = (void *)rfb->base.Attachment[BUFFER_DEPTH].Renderbuffer; + if (rb && rb->bo) { + radeon_bo_unref(rb->bo); + rb->bo = NULL; + } - _mesa_unreference_framebuffer((GLframebuffer **)(&(driDrawPriv->driverPrivate))); ++ _mesa_reference_framebuffer((GLframebuffer **)(&(driDrawPriv->driverPrivate)), NULL); } #if RADEON_COMMON && defined(RADEON_COMMON_FOR_R300)