drm_r300_cmd_header_t cmd;
uint32_t addr, ndw;
- if (!r300->radeon.radeonScreen->kernel_mm) {
- uint32_t dwords;
- dwords = (*atom->check) (ctx, atom);
- BEGIN_BATCH_NO_AUTOSTATE(dwords);
- OUT_BATCH_TABLE(atom->cmd, dwords);
- END_BATCH();
- return;
- }
-
cmd.u = atom->cmd[0];
addr = (cmd.vpu.adrhi << 8) | cmd.vpu.adrlo;
- ndw = cmd.vpu.count * 4;
- if (ndw) {
+ ndw = atom->check(ctx, atom);
- if (r300->vap_flush_needed) {
- BEGIN_BATCH_NO_AUTOSTATE(15 + ndw);
+ BEGIN_BATCH_NO_AUTOSTATE(ndw);
- /* flush processing vertices */
- OUT_BATCH_REGVAL(R300_SC_SCREENDOOR, 0);
- OUT_BATCH_REGVAL(R300_RB3D_DSTCACHE_CTLSTAT, R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
- OUT_BATCH_REGVAL(RADEON_WAIT_UNTIL, RADEON_WAIT_3D_IDLECLEAN);
- OUT_BATCH_REGVAL(R300_SC_SCREENDOOR, 0xffffff);
- OUT_BATCH_REGVAL(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- r300->vap_flush_needed = GL_FALSE;
- } else {
- BEGIN_BATCH_NO_AUTOSTATE(5 + ndw);
- }
- OUT_BATCH_REGVAL(R300_VAP_PVS_VECTOR_INDX_REG, addr);
- OUT_BATCH(CP_PACKET0(R300_VAP_PVS_UPLOAD_DATA, ndw-1) | RADEON_ONE_REG_WR);
- OUT_BATCH_TABLE(&atom->cmd[1], ndw);
- OUT_BATCH_REGVAL(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- END_BATCH();
- }
+ ndw -= 5;
+ OUT_BATCH_REGVAL(R300_VAP_PVS_VECTOR_INDX_REG, addr);
+ OUT_BATCH(CP_PACKET0(R300_VAP_PVS_UPLOAD_DATA, ndw-1) | RADEON_ONE_REG_WR);
- OUT_BATCH_TABLE(atom->cmd + 1, ndw);
++ OUT_BATCH_TABLE(&atom->cmd[1], ndw);
+ OUT_BATCH_REGVAL(R300_VAP_PVS_STATE_FLUSH_REG, 0);
+ END_BATCH();
}
void emit_r500fp(GLcontext *ctx, struct radeon_state_atom * atom)
addr |= (type << 16);
addr |= (clamp << 17);
- stride = type ? 4 : 6;
-
- ndw = sz * stride;
- if (ndw) {
+ BEGIN_BATCH_NO_AUTOSTATE(ndw);
+ OUT_BATCH(CP_PACKET0(R500_GA_US_VECTOR_INDEX, 0));
+ OUT_BATCH(addr);
+ ndw-=3;
+ OUT_BATCH(CP_PACKET0(R500_GA_US_VECTOR_DATA, ndw-1) | RADEON_ONE_REG_WR);
- OUT_BATCH_TABLE(atom->cmd + 1, ndw);
++ OUT_BATCH_TABLE(&atom->cmd[1], ndw);
+ END_BATCH();
+}
- BEGIN_BATCH_NO_AUTOSTATE(3 + ndw);
- OUT_BATCH(CP_PACKET0(R500_GA_US_VECTOR_INDEX, 0));
- OUT_BATCH(addr);
- OUT_BATCH(CP_PACKET0(R500_GA_US_VECTOR_DATA, ndw-1) | RADEON_ONE_REG_WR);
- OUT_BATCH_TABLE(&atom->cmd[1], ndw);
- END_BATCH();
+static int check_tex_offsets(GLcontext *ctx, struct radeon_state_atom * atom)
+{
+ r300ContextPtr r300 = R300_CONTEXT(ctx);
+ int numtmus = packet0_count(r300, r300->hw.tex.offset.cmd);
+ int dw = 0, i;
+ if (atom->cmd[0] == CP_PACKET2) {
+ return dw;
+ }
+ for(i = 0; i < numtmus; ++i) {
+ radeonTexObj *t = r300->hw.textures[i];
+ if (!t && !r300->radeon.radeonScreen->kernel_mm) {
+ dw += 0;
+ } else if (t && t->image_override && !t->bo) {
+ if (!r300->radeon.radeonScreen->kernel_mm)
+ dw += 2;
+ } else
+ dw += 4;
}
+ return dw;
}
static void emit_tex_offsets(GLcontext *ctx, struct radeon_state_atom * atom)