#include "r200_sanity.h"
#include "radeon_reg.h"
-static void print_state_atom( struct radeon_state_atom *state )
-{
- int i;
-
- fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
-
- if (0 & R200_DEBUG & DEBUG_VERBOSE)
- for (i = 0 ; i < state->cmd_size ; i++)
- fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
-
-}
+#define DEBUG_CMDBUF 0
/* The state atoms will be emitted in the order they appear in the atom list,
* so this step is important.
rmesa->backup_store.cmd_used += size;
}
if (R200_DEBUG & DEBUG_STATE)
- print_state_atom( atom );
+ radeon_print_state_atom( atom );
}
}
if ((atom->dirty || r200->hw.all_dirty) == dirty) {
dwords = (*atom->check) (r200->radeon.glCtx, atom);
if (dwords) {
- // if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
- // r300PrintStateAtom(r300, atom);
- // }
+ if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
+ radeon_print_state_atom(atom);
+ }
if (atom->emit) {
(*atom->emit)(r200->radeon.glCtx, atom);
} else {
}
atom->dirty = GL_FALSE;
} else {
- // if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
- // fprintf(stderr, " skip state %s\n",
- // atom->name);
- // }
+ if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
+ fprintf(stderr, " skip state %s\n",
+ atom->name);
+ }
}
}
}
#include "main/context.h"
#include "swrast/swrast.h"
+#include "radeon_cs.h"
#include "r200_context.h"
+
#include "common_cmdbuf.h"
#include "r200_state.h"
#include "r200_ioctl.h"
rmesa->dma.flush( ctx );
r200EmitState( rmesa );
-
- if (rmesa->store.cmd_used)
+
+ if (rmesa->radeon.cmdbuf.cs->cdw)
rcommonFlushCmdBuf( &rmesa->radeon, __FUNCTION__ );
}
}
#endif
-static inline uint32_t cmdpacket3_clip(int cmd_type)
+static inline uint32_t cmdpacket3(int cmd_type)
{
drm_radeon_cmd_header_t cmd;
cmd.i = 0;
- cmd.header.cmd_type = RADEON_CMD_PACKET3_CLIP;
+ cmd.header.cmd_type = cmd_type;
return (uint32_t)cmd.i;
}
-#define OUT_BATCH_PACKET3_CLIP(packet, num_extra) do { \
+
+#define OUT_BATCH_PACKET3(packet, num_extra) do { \
if (!b_l_rmesa->radeonScreen->kernel_mm) { \
- OUT_BATCH(cmdpacket3_clip(0)); \
- OUT_BATCH(packet); \
+ OUT_BATCH(cmdpacket3(RADEON_CMD_PACKET3)); \
+ OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
} else { \
OUT_BATCH(CP_PACKET2); \
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
} \
} while(0)
-static inline uint32_t cmdpacket3(int cmd_type)
-{
- drm_radeon_cmd_header_t cmd;
-
- cmd.i = 0;
- cmd.header.cmd_type = RADEON_CMD_PACKET3;
-
- return (uint32_t)cmd.i;
-
-}
-#define OUT_BATCH_PACKET3(packet, num_extra) do { \
+#define OUT_BATCH_PACKET3_CLIP(packet, num_extra) do { \
if (!b_l_rmesa->radeonScreen->kernel_mm) { \
- OUT_BATCH(cmdpacket3(0)); \
- OUT_BATCH(packet); \
+ OUT_BATCH(cmdpacket3(RADEON_CMD_PACKET3_CLIP)); \
+ OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
} else { \
OUT_BATCH(CP_PACKET2); \
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
GLuint count = VB->Count;
GLuint i, emitsize;
+ fprintf(stderr,"emit arrays\n");
for ( i = 0; i < 15; i++ ) {
GLubyte attrib = vimap_rev[i];
if (attrib != 255) {
if (!rmesa->tcl.vertex_data[i].buf) {
if (ctx->VertexProgram._Enabled)
rcommon_emit_vector( ctx,
- &(rmesa->tcl.aos[i]),
+ &(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[attrib]->data,
1,
VB->AttribPtr[attrib]->stride,
count);
else
r200_emit_vecfog( ctx,
- &(rmesa->tcl.aos[i]),
+ &(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[attrib]->data,
VB->AttribPtr[attrib]->stride,
count);
}
if (!rmesa->tcl.vertex_data[i].buf) {
rcommon_emit_vector( ctx,
- &(rmesa->tcl.aos[i]),
+ &(rmesa->tcl.aos[nr]),
(char *)VB->AttribPtr[attrib]->data,
emitsize,
VB->AttribPtr[attrib]->stride,
}
after_emit:
assert(nr < 12);
+ nr++;
// component[nr++] = &rmesa->tcl.vertex_data[i];
}
}
void r200ReleaseArrays( GLcontext *ctx, GLuint newinputs )
{
r200ContextPtr rmesa = R200_CONTEXT( ctx );
-
- /* only do it for changed inputs ? */
int i;
- for (i = 0; i < 15; i++) {
- // if (newinputs & (1 << i))
- // r200ReleaseDmaRegion( rmesa,
- // &rmesa->tcl.vertex_data[i], __FUNCTION__ );
+ for (i = 0; i < rmesa->tcl.nr_aos_components; i++) {
+ if (rmesa->tcl.aos[i].bo) {
+ rmesa->tcl.aos[i].bo = radeon_bo_unref(rmesa->tcl.aos[i].bo);
+ }
}
}
#include "tnl/t_pipeline.h"
#include "swrast_setup/swrast_setup.h"
+#include "radeon_buffer.h"
+#include "radeon_cs.h"
+#include "common_context.h"
+#include "common_cmdbuf.h"
#include "r200_context.h"
#include "r200_ioctl.h"
#include "r200_state.h"
VP_CHECK( tcl_vpp_size, ctx->VertexProgram.Current->Base.NumNativeParameters > 96 )
-#if 0
-static int ctx_emit(GLcontext *ctx, struct radeon_state_atom *atom)
+static void ctx_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
r200ContextPtr r200 = R200_CONTEXT(ctx);
BATCH_LOCALS(&r200->radeon);
struct radeon_renderbuffer *rrb;
uint32_t cbpitch;
+ uint32_t zbpitch;
+ uint32_t dwords = atom->cmd_size;
GLframebuffer *fb = r200->radeon.dri.drawable->driverPrivate;
-
+
+ /* output the first 7 bytes of context */
+ BEGIN_BATCH_NO_AUTOSTATE(dwords);
+ OUT_BATCH_TABLE(atom->cmd, 5);
+
+ rrb = r200->radeon.state.depth.rrb;
+ if (!rrb) {
+ OUT_BATCH(atom->cmd[CTX_RB3D_DEPTHOFFSET]);
+ OUT_BATCH(atom->cmd[CTX_RB3D_DEPTHPITCH]);
+ } else {
+ zbpitch = (rrb->pitch / rrb->cpp);
+ OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+ OUT_BATCH(zbpitch);
+ }
+
+ OUT_BATCH(atom->cmd[CTX_RB3D_ZSTENCILCNTL]);
+ OUT_BATCH(atom->cmd[CTX_CMD_1]);
+ OUT_BATCH(atom->cmd[CTX_PP_CNTL]);
+ OUT_BATCH(atom->cmd[CTX_RB3D_CNTL]);
+
rrb = r200->radeon.state.color.rrb;
if (r200->radeon.radeonScreen->driScreen->dri2.enabled) {
rrb = (struct radeon_renderbuffer *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
}
if (!rrb || !rrb->bo) {
- fprintf(stderr, "no rrb\n");
- return;
+ OUT_BATCH(atom->cmd[CTX_RB3D_COLOROFFSET]);
+ } else {
+ OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
}
- cbpitch = (rrb->pitch / rrb->cpp);
- if (rrb->cpp == 4)
- ;
- else
- ;
+ OUT_BATCH(atom->cmd[CTX_CMD_2]);
+
+ if (!rrb || !rrb->bo) {
+ OUT_BATCH(atom->cmd[CTX_RB3D_COLORPITCH]);
+ } else {
+ cbpitch = (rrb->pitch / rrb->cpp);
+ if (rrb->cpp == 4)
+ ;
+ else
+ ;
+ if (r200->radeon.sarea->tiling_enabled)
+ cbpitch |= R200_COLOR_TILE_ENABLE;
+ OUT_BATCH(cbpitch);
+ }
+
+ if (atom->cmd_size == CTX_STATE_SIZE_NEWDRM)
+ OUT_BATCH_TABLE((atom->cmd + 14), 4);
+
+ END_BATCH();
}
-#endif
static int tex_emit(GLcontext *ctx, struct radeon_state_atom *atom)
{
else
ALLOC_STATE( ctx, always, CTX_STATE_SIZE_OLDDRM, "CTX/context", 0 );
- // rmesa->hw.ctx.emit = ctx_emit;
+ rmesa->hw.ctx.emit = ctx_emit;
ALLOC_STATE( set, always, SET_STATE_SIZE, "SET/setup", 0 );
ALLOC_STATE( lin, always, LIN_STATE_SIZE, "LIN/line", 0 );
ALLOC_STATE( msk, always, MSK_STATE_SIZE, "MSK/mask", 0 );
static GLushort *r200AllocElts( r200ContextPtr rmesa, GLuint nr )
{
+ fprintf(stderr,"alloc elts\n");
if (rmesa->dma.flush == r200FlushElts &&
rmesa->store.cmd_used + nr*2 < R200_CMD_BUF_SZ) {
r200ContextPtr rmesa = R200_CONTEXT( ctx );
r200TclPrimitive( ctx, prim, hwprim );
+ fprintf(stderr,"Emit prim %d\n", rmesa->tcl.nr_aos_components);
rcommonEnsureCmdBufSpace( rmesa, AOS_BUFSZ(rmesa->tcl.nr_aos_components) +
rmesa->hw.max_state_size + VBUF_BUFSZ );
static void r300ClearBuffer(r300ContextPtr r300, int flags,
struct radeon_renderbuffer *rrb,
- struct radeon_renderbuffer *rrbd)
+ struct radeon_renderbuffer *rrbd)
{
BATCH_LOCALS(&r300->radeon);
GLcontext *ctx = r300->radeon.glCtx;
rcommonEnsureCmdBufSpace(&r300->radeon, 421 * 3, __FUNCTION__);
if (flags || bits)
r300EmitClearState(ctx);
- rrbd = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
+ rrbd = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
if (flags & BUFFER_BIT_FRONT_LEFT) {
rrb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
r300->hw.rb3d_discard_src_pixel_lte_threshold.cmd[2] = 0xffffffff;
rrb = r300->radeon.state.depth.rrb;
- if (rrb && rrb->bo && (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)) {
+ if (rrb && rrb->bo && (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)) {
/* XXX: Turn off when clearing buffers ? */
r300->hw.zb.cmd[R300_ZB_PITCH] |= R300_DEPTHMACROTILE_ENABLE;
int prevLockLine = 0;
#endif
-#ifdef RADEON_COMMON_FOR_R300
/* Turn on/off page flipping according to the flags in the sarea:
*/
void radeonUpdatePageFlipping(radeonContextPtr rmesa)
rmesa->state.depth.rrb = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
}
-#else
-/* Turn on/off page flipping according to the flags in the sarea:
- */
-void radeonUpdatePageFlipping(radeonContextPtr rmesa)
-{
- rmesa->doPageFlip = rmesa->sarea->pfState;
- if (rmesa->glCtx->WinSysDrawBuffer) {
- driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
- rmesa->sarea->pfCurrentPage);
- }
-}
-#endif
/* Update the hardware state. This is called if another context has
* grabbed the hardware lock, which includes the X server. This
}
radeon_bo_unmap(aos->bo);
}
+
+
+void radeon_print_state_atom( struct radeon_state_atom *state )
+{
+ int i;
+
+ fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
+
+ if (RADEON_DEBUG & DEBUG_VERBOSE)
+ for (i = 0 ; i < state->cmd_size ; i++)
+ fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
+
+}
void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
GLvoid * data, int size, int stride, int count);
+void radeon_print_state_atom( struct radeon_state_atom *state );
#endif
}
cs->section = 0;
if (cs->section_ndw != cs->section_cdw) {
- fprintf(stderr, "CS section size missmatch start at (%s,%s,%d)\n",
- cs->section_file, cs->section_func, cs->section_line);
+ fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
+ cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
fprintf(stderr, "CS section end at (%s,%s,%d)\n",
file, func, line);
return -EPIPE;
struct cs_reloc_legacy *relocs;
int i, j, r;
- if (!IS_R300_CLASS(csm->ctx->radeonScreen)) {
- /* FIXME: r300 only right now */
- return -EINVAL;
- }
csm = (struct cs_manager_legacy*)cs->csm;
relocs = (struct cs_reloc_legacy *)cs->relocs;
for (i = 0; i < cs->crelocs; i++) {
relocs[i].base.bo, soffset, eoffset);
return r;
}
+ fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
+ relocs[i].base.bo, soffset, eoffset);
cs->packets[relocs[i].indices[j]] += soffset;
if (cs->packets[relocs[i].indices[j]] >= eoffset) {
radeon_bo_debug(relocs[i].base.bo, 12);
return 0;
}
+static void dump_cmdbuf(struct radeon_cs *cs)
+{
+ int i;
+ for (i = 0; i < cs->cdw; i++){
+ fprintf(stderr,"%x: %08x\n", i, cs->packets[i]);
+ }
+
+}
static int cs_emit(struct radeon_cs *cs)
{
struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
csm->ctx->vtbl.emit_cs_header(cs, csm->ctx);
+
/* append buffer age */
- age.scratch.cmd_type = R300_CMD_SCRATCH;
- /* Scratch register 2 corresponds to what radeonGetAge polls */
- csm->pending_age = 0;
- csm->pending_count = 1;
- ull = (uint64_t) (intptr_t) &csm->pending_age;
- age.scratch.reg = 2;
- age.scratch.n_bufs = 1;
- age.scratch.flags = 0;
- radeon_cs_write_dword(cs, age.u);
- radeon_cs_write_dword(cs, ull & 0xffffffff);
- radeon_cs_write_dword(cs, ull >> 32);
- radeon_cs_write_dword(cs, 0);
+ if (IS_R300_CLASS(csm->ctx->radeonScreen)) {
+ age.scratch.cmd_type = R300_CMD_SCRATCH;
+ /* Scratch register 2 corresponds to what radeonGetAge polls */
+ csm->pending_age = 0;
+ csm->pending_count = 1;
+ ull = (uint64_t) (intptr_t) &csm->pending_age;
+ age.scratch.reg = 2;
+ age.scratch.n_bufs = 1;
+ age.scratch.flags = 0;
+ radeon_cs_write_dword(cs, age.u);
+ radeon_cs_write_dword(cs, ull & 0xffffffff);
+ radeon_cs_write_dword(cs, ull >> 32);
+ radeon_cs_write_dword(cs, 0);
+ }
r = cs_process_relocs(cs);
if (r) {
cmd.boxes = (drm_clip_rect_t *) csm->ctx->pClipRects;
}
+ dump_cmdbuf(cs);
+
r = drmCommandWrite(cs->csm->fd, DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
if (r) {
return r;