void r600InitCmdBuf(context_t *r600) /* from rcommonInitCmdBuf */
{
- radeonContextPtr rmesa = &r600->radeon;
- GLuint size;
- rmesa->hw.max_state_size = 4000; /* rough estimate */
+ radeonContextPtr rmesa = &r600->radeon;
+ GLuint size;
- rmesa->hw.all_dirty = GL_TRUE;
- rmesa->hw.is_dirty = GL_TRUE;
+ r600InitAtoms(r600);
/* Initialize command buffer */
size = 256 * driQueryOptioni(&rmesa->optionCache,
rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
assert(rmesa->cmdbuf.cs != NULL);
rmesa->cmdbuf.size = size;
-
+
if (!rmesa->radeonScreen->kernel_mm) {
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
struct r600_context;
typedef struct r600_context context_t;
-GLboolean r700SendPSState(context_t *context);
-GLboolean r700SendVSState(context_t *context);
-GLboolean r700SendSQConfig(context_t *context);
+extern GLboolean r700SendPSState(context_t *context);
+extern GLboolean r700SendVSState(context_t *context);
+extern GLboolean r700SendFSState(context_t *context);
#include "main/mm.h"
RIGHT_SHIFT = 2,
};
+struct r600_hw_state {
+ struct radeon_state_atom sq;
+ struct radeon_state_atom db;
+ struct radeon_state_atom db_target;
+ struct radeon_state_atom sc;
+ struct radeon_state_atom cl;
+ struct radeon_state_atom ucp;
+ struct radeon_state_atom su;
+ struct radeon_state_atom cb;
+ struct radeon_state_atom cb_target;
+ struct radeon_state_atom sx;
+ struct radeon_state_atom vgt;
+ struct radeon_state_atom spi;
+ struct radeon_state_atom vpt;
+};
+
/**
* \brief R600 context structure.
*/
/* ------ */
R700_CHIP_CONTEXT hw;
+ struct r600_hw_state atoms;
+
/* Vertex buffers
*/
GLvector4f dummy_attrib[_TNL_ATTRIB_MAX];
rmesa->radeon.dma.flush( rmesa->radeon.glCtx ); \
} while (0)
-#define R600_STATECHANGE(r600, atom) \
+#define R600_STATECHANGE(r600, ATOM) \
do { \
R600_NEWPRIM(r600); \
- (atom) = GL_TRUE; \
+ r600->atoms.ATOM.dirty = GL_TRUE; \
r600->radeon.hw.is_dirty = GL_TRUE; \
} while(0)
-extern GLboolean r700SendSPIState(context_t *context);
-extern GLboolean r700SendVGTState(context_t *context);
-extern GLboolean r700SendSXState(context_t *context);
-extern GLboolean r700SendDBState(context_t *context);
-extern GLboolean r700SendCBState(context_t *context);
-extern GLboolean r700SendSUState(context_t *context);
-extern GLboolean r700SendCLState(context_t *context);
-extern GLboolean r700SendSCState(context_t *context);
-extern GLboolean r700SendViewportState(context_t *context, int id);
-extern GLboolean r700SendRenderTargetState(context_t *context, int id);
extern GLboolean r700SendTextureState(context_t *context);
-extern GLboolean r700SendDepthTargetState(context_t *context);
-extern GLboolean r700SendUCPState(context_t *context);
-extern GLboolean r700SendFSState(context_t *context);
-extern void r700EmitState(GLcontext * ctx);
extern GLboolean r700SyncSurf(context_t *context,
struct radeon_bo *pbo,
unsigned int stride,
unsigned int Count); /* number of vectors in stream */
+extern void r600InitAtoms(context_t *context);
+
#define RADEON_D_CAPTURE 0
#define RADEON_D_PLAYBACK 1
#define RADEON_D_PLAYBACK_RAW 2
void r600EmitCacheFlush(context_t *rmesa)
{
- BATCH_LOCALS(&rmesa->radeon);
}
GLboolean r600EmitShader(GLcontext * ctx,
#include "main/imports.h"
#include "main/glheader.h"
+#include "main/simple_list.h"
#include "r600_context.h"
#include "r600_cmdbuf.h"
return R600_FALLBACK_NONE;
}
-GLboolean r700SendDepthTargetState(context_t *context)
+static void r700SendDepthTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
struct radeon_renderbuffer *rrb;
BATCH_LOCALS(&context->radeon);
rrb = radeon_get_depthbuffer(&context->radeon);
if (!rrb || !rrb->bo) {
fprintf(stderr, "no rrb\n");
- return GL_FALSE;
+ return;
}
BEGIN_BATCH_NO_AUTOSTATE(8 + 2);
COMMIT_BATCH();
- r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
- DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
-
- r700->db_target_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendRenderTargetState(context_t *context, int id)
+static void r700SendRenderTargetState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
struct radeon_renderbuffer *rrb;
BATCH_LOCALS(&context->radeon);
+ int id = 0;
rrb = radeon_get_colorbuffer(&context->radeon);
if (!rrb || !rrb->bo) {
fprintf(stderr, "no rrb\n");
- return GL_FALSE;
+ return;
}
if (id > R700_MAX_RENDER_TARGETS)
- return GL_FALSE;
+ return;
if (!r700->render_target[id].enabled)
- return GL_FALSE;
+ return;
BEGIN_BATCH_NO_AUTOSTATE(3 + 2);
R600_OUT_BATCH_REGSEQ(CB_COLOR0_BASE + (4 * id), 1);
COMMIT_BATCH();
- r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
- CB_ACTION_ENA_bit | (1 << (id + 6)));
-
- r700->render_target[id].dirty = GL_FALSE;
-
- return GL_TRUE;
}
GLboolean r700SendPSState(context_t *context)
return GL_TRUE;
}
-GLboolean r700SendViewportState(context_t *context, int id)
+static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
+ int id = 0;
if (id > R700_MAX_VIEWPORTS)
- return GL_FALSE;
+ return;
if (!r700->viewport[id].enabled)
- return GL_FALSE;
+ return;
BEGIN_BATCH_NO_AUTOSTATE(16);
R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
COMMIT_BATCH();
- r700->viewport[id].dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendSQConfig(context_t *context)
+static void r700SendSQConfig(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
END_BATCH();
COMMIT_BATCH();
-
- r700->sq_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendUCPState(context_t *context)
+static void r700SendUCPState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
int i;
for (i = 0; i < R700_MAX_UCP; i++) {
- if (r700->ucp[i].enabled && r700->ucp[i].dirty) {
+ if (r700->ucp[i].enabled) {
BEGIN_BATCH_NO_AUTOSTATE(6);
R600_OUT_BATCH_REGSEQ(PA_CL_UCP_0_X + (16 * i), 4);
R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_X.u32All);
R600_OUT_BATCH(r700->ucp[i].PA_CL_UCP_0_W.u32All);
END_BATCH();
COMMIT_BATCH();
- r700->ucp[i].dirty = GL_FALSE;
}
}
-
- return GL_TRUE;
}
-GLboolean r700SendSPIState(context_t *context)
+static void r700SendSPIState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
unsigned int ui;
END_BATCH();
COMMIT_BATCH();
-
- r700->spi_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendVGTState(context_t *context)
+static void r700SendVGTState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
END_BATCH();
COMMIT_BATCH();
-
- r700->vgt_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendSXState(context_t *context)
+static void r700SendSXState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
R600_OUT_BATCH_REGVAL(SX_ALPHA_REF, r700->SX_ALPHA_REF.u32All);
END_BATCH();
COMMIT_BATCH();
-
- r700->sx_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendDBState(context_t *context)
+static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
END_BATCH();
COMMIT_BATCH();
-
- r700->db_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendCBState(context_t *context)
+static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
unsigned int ui;
COMMIT_BATCH();
- r700->cb_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendSUState(context_t *context)
+static void r700SendSUState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
END_BATCH();
COMMIT_BATCH();
- r700->su_dirty = GL_FALSE;
-
- return GL_TRUE;
}
-GLboolean r700SendCLState(context_t *context)
+static void r700SendCLState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
END_BATCH();
COMMIT_BATCH();
-
- r700->cl_dirty = GL_FALSE;
-
- return GL_TRUE;
}
// XXX need to split this up
-GLboolean r700SendSCState(context_t *context)
+static void r700SendSCState(GLcontext *ctx, struct radeon_state_atom *atom)
{
+ context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
BATCH_LOCALS(&context->radeon);
END_BATCH();
COMMIT_BATCH();
+}
- r700->sc_dirty = GL_FALSE;
+static int check_always(GLcontext *ctx, struct radeon_state_atom *atom)
+{
+ return atom->cmd_size;
+}
- return GL_TRUE;
+#define ALLOC_STATE( ATOM, SZ, EMIT ) \
+do { \
+ context->atoms.ATOM.cmd_size = (SZ); \
+ context->atoms.ATOM.cmd = NULL; \
+ context->atoms.ATOM.name = #ATOM; \
+ context->atoms.ATOM.idx = 0; \
+ context->atoms.ATOM.check = check_always; \
+ context->atoms.ATOM.dirty = GL_FALSE; \
+ context->atoms.ATOM.emit = (EMIT); \
+ context->radeon.hw.max_state_size += (SZ); \
+ insert_at_tail(&context->radeon.hw.atomlist, &context->atoms.ATOM); \
+} while (0)
+
+void r600InitAtoms(context_t *context)
+{
+
+ /* Setup the atom linked list */
+ make_empty_list(&context->radeon.hw.atomlist);
+ context->radeon.hw.atomlist.name = "atom-list";
+
+ ALLOC_STATE(sq, 34, r700SendSQConfig);
+ ALLOC_STATE(db, 27, r700SendDBState);
+ ALLOC_STATE(db_target, 19, r700SendDepthTargetState);
+ ALLOC_STATE(sc, 47, r700SendSCState);
+ ALLOC_STATE(cl, 18, r700SendCLState);
+ ALLOC_STATE(ucp, 36, r700SendUCPState);
+ ALLOC_STATE(su, 19, r700SendSUState);
+ ALLOC_STATE(cb, 39, r700SendCBState);
+ ALLOC_STATE(cb_target, 32, r700SendRenderTargetState);
+ ALLOC_STATE(sx, 9, r700SendSXState);
+ ALLOC_STATE(vgt, 41, r700SendVGTState);
+ ALLOC_STATE(spi, (59 + R700_MAX_SHADER_EXPORTS), r700SendSPIState);
+ ALLOC_STATE(vpt, 16, r700SendViewportState);
+
+ context->radeon.hw.is_dirty = GL_TRUE;
+ context->radeon.hw.all_dirty = GL_TRUE;
}
#include "r700_ioctl.h"
#include "r700_clear.h"
-static void r700Flush(GLcontext *ctx)
-{
- radeonContextPtr radeon = RADEON_CONTEXT(ctx);
-
- if (RADEON_DEBUG & DEBUG_IOCTL)
- fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
-
- /* okay if we have no cmds in the buffer &&
- we have no DMA flush &&
- we have no DMA buffer allocated.
- then no point flushing anything at all.
- */
- if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
- return;
-
- if (radeon->dma.flush)
- radeon->dma.flush( ctx );
-
- r700EmitState(ctx);
-
- if (radeon->cmdbuf.cs->cdw)
- rcommonFlushCmdBuf(radeon, __FUNCTION__);
-}
void r700InitIoctlFuncs(struct dd_function_table *functions)
{
functions->Clear = r700Clear;
functions->Finish = radeonFinish;
- functions->Flush = r700Flush;
+ functions->Flush = radeonFlush;
}
}
-static void r700EmitAtoms(GLcontext * ctx, GLboolean dirty)
-{
- context_t *context = R700_CONTEXT(ctx);
- radeonContextPtr radeon = &context->radeon;
- R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
-
- if ((r700->sq_dirty || radeon->hw.all_dirty) == dirty)
- r700SendSQConfig(context);
- r700SendUCPState(context);
- if ((r700->sc_dirty || radeon->hw.all_dirty) == dirty)
- r700SendSCState(context);
- if ((r700->su_dirty || radeon->hw.all_dirty) == dirty)
- r700SendSUState(context);
- if ((r700->cl_dirty || radeon->hw.all_dirty) == dirty)
- r700SendCLState(context);
- if ((r700->cb_dirty || radeon->hw.all_dirty) == dirty)
- r700SendCBState(context);
- if ((r700->db_dirty || radeon->hw.all_dirty) == dirty)
- r700SendDBState(context);
- if ((r700->sx_dirty || radeon->hw.all_dirty) == dirty)
- r700SendSXState(context);
- if ((r700->vgt_dirty || radeon->hw.all_dirty) == dirty)
- r700SendVGTState(context);
- if ((r700->spi_dirty || radeon->hw.all_dirty) == dirty)
- r700SendSPIState(context);
- if ((r700->viewport[0].dirty || radeon->hw.all_dirty) == dirty)
- r700SendViewportState(context, 0);
- if ((r700->render_target[0].dirty || radeon->hw.all_dirty) == dirty)
- r700SendRenderTargetState(context, 0);
- if ((r700->db_target_dirty || radeon->hw.all_dirty) == dirty)
- r700SendDepthTargetState(context);
-
-}
-
-void r700EmitState(GLcontext * ctx)
-{
- context_t *context = R700_CONTEXT(ctx);
- radeonContextPtr radeon = &context->radeon;
-
- if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
- fprintf(stderr, "%s\n", __FUNCTION__);
-
- if (radeon->vtbl.pre_emit_state)
- radeon->vtbl.pre_emit_state(radeon);
-
- if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
- return;
-
- rcommonEnsureCmdBufSpace(&context->radeon,
- 652, __FUNCTION__);
-
- if (!radeon->cmdbuf.cs->cdw) {
- if (RADEON_DEBUG & DEBUG_STATE)
- fprintf(stderr, "Begin reemit state\n");
-
- r700EmitAtoms(ctx, GL_FALSE);
- }
-
- if (RADEON_DEBUG & DEBUG_STATE)
- fprintf(stderr, "Begin dirty state\n");
-
- r700EmitAtoms(ctx, GL_TRUE);
- radeon->hw.is_dirty = GL_FALSE;
- radeon->hw.all_dirty = GL_FALSE;
-
-}
-
static GLboolean r700RunRender(GLcontext * ctx,
struct tnl_pipeline_stage *stage)
{
context_t *context = R700_CONTEXT(ctx);
radeonContextPtr radeon = &context->radeon;
- unsigned int i, ind_count = 0;
+ unsigned int i, ind_count = 0, id = 0;
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *vb = &tnl->vb;
+ struct radeon_renderbuffer *rrb;
for (i = 0; i < vb->PrimitiveCount; i++)
ind_count += vb->Primitive[i].count + 10;
/* just an estimate, need to properly calculate this */
rcommonEnsureCmdBufSpace(&context->radeon,
- radeon->hw.max_state_size + ind_count, __FUNCTION__);
+ radeon->hw.max_state_size + ind_count + 1000, __FUNCTION__);
r700Start3D(context);
-
r700UpdateShaders(ctx);
r700SetScissor(context);
r700SetupShaders(ctx);
-
- r700EmitState(ctx);
+ radeonEmitState(radeon);
/* richard test code */
for (i = 0; i < vb->PrimitiveCount; i++) {
/* Flush render op cached for last several quads. */
r700WaitForIdleClean(context);
+ rrb = radeon_get_colorbuffer(&context->radeon);
+ if (!rrb || !rrb->bo)
+ r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
+ CB_ACTION_ENA_bit | (1 << (id + 6)));
+
+ rrb = radeon_get_depthbuffer(&context->radeon);
+ if (!rrb || !rrb->bo)
+ r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
+ DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
+
radeonReleaseArrays(ctx, ~0);
return GL_FALSE;
/* Note: this should also modify whatever data the context reset
* code uses...
*/
- R600_STATECHANGE(context, r700->viewport[id].dirty);
+ R600_STATECHANGE(context, vpt);
r700->viewport[id].PA_CL_VPORT_XOFFSET.f32All = tx;
r700->viewport[id].PA_CL_VPORT_YOFFSET.f32All = ty;
}
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->render_target[0].dirty);
- R600_STATECHANGE(context, r700->db_target_dirty);
+ R600_STATECHANGE(context, cb_target);
+ R600_STATECHANGE(context, db_target);
r700SetRenderTarget(context, 0);
r700SetDepthTarget(context);
r700UpdateStateParameters(ctx, new_state);
- R600_STATECHANGE(context, r700->cl_dirty);
- R600_STATECHANGE(context, r700->spi_dirty);
+ R600_STATECHANGE(context, cl);
+ R600_STATECHANGE(context, spi);
if(GL_TRUE == r700->bEnablePerspective)
{
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->db_dirty);
+ R600_STATECHANGE(context, db);
if (ctx->Depth.Test)
{
uint32_t alpha_func = REF_ALWAYS;
GLboolean really_enabled = ctx->Color.AlphaEnabled;
- R600_STATECHANGE(context, r700->sx_dirty);
+ R600_STATECHANGE(context, sx);
switch (ctx->Color.AlphaFunc) {
case GL_NEVER:
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->cb_dirty);
+ R600_STATECHANGE(context, cb);
r700->CB_BLEND_RED.f32All = cf[0];
r700->CB_BLEND_GREEN.f32All = cf[1];
int id = 0;
uint32_t blend_reg = 0, eqn, eqnA;
- R600_STATECHANGE(context, r700->cb_dirty);
+ R600_STATECHANGE(context, cb);
if (RGBA_LOGICOP_ENABLED(ctx) || !ctx->Color.BlendEnabled) {
SETfield(blend_reg,
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&R700_CONTEXT(ctx)->hw);
- R600_STATECHANGE(context, r700->cb_dirty);
+ R600_STATECHANGE(context, cb);
if (RGBA_LOGICOP_ENABLED(ctx))
SETfield(r700->CB_COLOR_CONTROL.u32All,
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&R700_CONTEXT(ctx)->hw);
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
CLEARbit(r700->PA_SU_SC_MODE_CNTL.u32All, FACE_bit);
CLEARbit(r700->PA_SU_SC_MODE_CNTL.u32All, CULL_FRONT_bit);
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&R700_CONTEXT(ctx)->hw);
- R600_STATECHANGE(context, r700->sc_dirty);
+ R600_STATECHANGE(context, sc);
if (ctx->Line.StippleFlag)
{
(a ? 8 : 0));
if (mask != r700->CB_SHADER_MASK.u32All) {
- R600_STATECHANGE(context, r700->cb_dirty);
+ R600_STATECHANGE(context, cb);
SETfield(r700->CB_SHADER_MASK.u32All, mask, OUTPUT0_ENABLE_shift, OUTPUT0_ENABLE_mask);
}
}
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->spi_dirty);
+ R600_STATECHANGE(context, spi);
/* also need to set/clear FLAT_SHADE bit per param in SPI_PS_INPUT_CNTL_[0-31] */
switch (mode) {
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
/* We need to clamp to user defined range here, because
* the HW clamping happens only for per vertex point size. */
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
/* format is 12.4 fixed point */
switch (pname) {
}
if (hw_stencil) {
- R600_STATECHANGE(context, r700->db_dirty);
+ R600_STATECHANGE(context, db);
if (state)
SETbit(r700->DB_DEPTH_CONTROL.u32All, STENCIL_ENABLE_bit);
else
//fixme
//r300CatchStencilFallback(ctx);
- R600_STATECHANGE(context, r700->db_dirty);
+ R600_STATECHANGE(context, db);
//front
SETfield(r700->DB_STENCILREFMASK.u32All, ctx->Stencil.Ref[0],
//fixme
//r300CatchStencilFallback(ctx);
- R600_STATECHANGE(context, r700->db_dirty);
+ R600_STATECHANGE(context, db);
// front
SETfield(r700->DB_STENCILREFMASK.u32All, ctx->Stencil.WriteMask[0],
//fixme
//r300CatchStencilFallback(ctx);
- R600_STATECHANGE(context, r700->db_dirty);
+ R600_STATECHANGE(context, db);
SETfield(r700->DB_DEPTH_CONTROL.u32All, translate_stencil_op(ctx->Stencil.FailFunc[0]),
STENCILFAIL_shift, STENCILFAIL_mask);
GLfloat sz = v[MAT_SZ] * depthScale;
GLfloat tz = v[MAT_TZ] * depthScale;
- R600_STATECHANGE(context, r700->viewport[id].dirty);
+ R600_STATECHANGE(context, vpt);
r700->viewport[id].PA_CL_VPORT_XSCALE.f32All = sx;
r700->viewport[id].PA_CL_VPORT_XOFFSET.f32All = tx;
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
uint32_t lineWidth = (uint32_t)((widthf * 0.5) * (1 << 4));
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
if (lineWidth > 0xFFFF)
lineWidth = 0xFFFF;
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->sc_dirty);
+ R600_STATECHANGE(context, sc);
SETfield(r700->PA_SC_LINE_STIPPLE.u32All, pattern, LINE_PATTERN_shift, LINE_PATTERN_mask);
SETfield(r700->PA_SC_LINE_STIPPLE.u32All, (factor-1), REPEAT_COUNT_shift, REPEAT_COUNT_mask);
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
if (state) {
SETbit(r700->PA_SU_SC_MODE_CNTL.u32All, POLY_OFFSET_FRONT_ENABLE_bit);
factor *= 12.0;
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
r700->PA_SU_POLY_OFFSET_FRONT_SCALE.f32All = factor;
r700->PA_SU_POLY_OFFSET_FRONT_OFFSET.f32All = constant;
context_t *context = R700_CONTEXT(ctx);
R700_CHIP_CONTEXT *r700 = (R700_CHIP_CONTEXT*)(&context->hw);
- R600_STATECHANGE(context, r700->su_dirty);
+ R600_STATECHANGE(context, su);
SETfield(r700->PA_SU_SC_MODE_CNTL.u32All, X_DISABLE_POLY_MODE, POLY_MODE_shift, POLY_MODE_mask);
p = (GLint) plane - (GLint) GL_CLIP_PLANE0;
ip = (GLint *)ctx->Transform._ClipUserPlane[p];
- R600_STATECHANGE(context, r700->ucp[p].dirty);
+ R600_STATECHANGE(context, ucp);
r700->ucp[p].PA_CL_UCP_0_X.u32All = ip[0];
r700->ucp[p].PA_CL_UCP_0_Y.u32All = ip[1];
p = cap - GL_CLIP_PLANE0;
- R600_STATECHANGE(context, r700->cl_dirty);
+ R600_STATECHANGE(context, cl);
if (state) {
r700->PA_CL_CLIP_CNTL.u32All |= (UCP_ENA_0_bit << p);
y2 = rrb->dPriv->y + rrb->dPriv->h;
}
- R600_STATECHANGE(context, r700->sc_dirty);
+ R600_STATECHANGE(context, sc);
/* window */
SETbit(r700->PA_SC_WINDOW_SCISSOR_TL.u32All, WINDOW_OFFSET_DISABLE_bit);
return;
}
- R600_STATECHANGE(context, r700->render_target[id].dirty);
- R600_STATECHANGE(context, r700->cb_dirty);
+ R600_STATECHANGE(context, cb_target);
+ R600_STATECHANGE(context, cb);
/* screen/window/view */
SETfield(r700->CB_TARGET_MASK.u32All, 0xF, (4 * id), TARGET0_ENABLE_mask);
if (!rrb)
return;
- R600_STATECHANGE(context, r700->db_target_dirty);
- R600_STATECHANGE(context, r700->db_dirty);
+ R600_STATECHANGE(context, db_target);
/* depth buf */
r700->DB_DEPTH_SIZE.u32All = 0;
int num_gs_stack_entries;
int num_es_stack_entries;
- R600_STATECHANGE(context, r700->sq_dirty);
+ R600_STATECHANGE(context, sq);
// SQ
ps_prio = 0;
GET_CURRENT_CONTEXT(ctx);
radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
radeonContextPtr current = ctx ? RADEON_CONTEXT(ctx) : NULL;
-#if RADEON_COMMON && defined(RADEON_COMMON_FOR_R600) /* +r6/r7 */
- __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
- radeonScreenPtr screen = (radeonScreenPtr) (sPriv->private);
-#endif
if (radeon == current) {
radeon_firevertices(radeon);
rcommonDestroyCmdBuf(radeon);
-#if RADEON_COMMON && defined(RADEON_COMMON_FOR_R600) /* +r6/r7 */
- if (!IS_R600_CLASS(screen))
-#endif
radeon_destroy_atom_list(radeon);
if (radeon->state.scissor.pClipRects) {