R1xx/r2xx: Don't use an alpha texture format for GLX_TEXTURE_FORMAT_RGB_EXT
[mesa.git] / src / mesa / drivers / dri / r300 / r300_cmdbuf.c
index 4c6108cc63651340f3425b05f3c4ad290b7c9057..6ae724bff9d2cbfb2fdc3522a11d3cc375e6788e 100644 (file)
@@ -55,9 +55,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "r300_state.h"
 #include "radeon_reg.h"
 
-#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
-#   define RADEON_ONE_REG_WR        (1 << 15)
-
 /** # of dwords reserved for additional instructions that may need to be written
  * during flushing.
  */
@@ -71,7 +68,6 @@ static unsigned packet0_count(r300ContextPtr r300, uint32_t *pkt)
         drm_r300_cmd_header_t *t = (drm_r300_cmd_header_t*)pkt;
         return t->packet0.count;
     }
-    return 0;
 }
 
 #define vpu_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
@@ -83,7 +79,7 @@ void emit_vpu(GLcontext *ctx, struct radeon_state_atom * atom)
        BATCH_LOCALS(&r300->radeon);
        drm_r300_cmd_header_t cmd;
        uint32_t addr, ndw, i;
-       
+
        if (!r300->radeon.radeonScreen->kernel_mm) {
                uint32_t dwords;
                dwords = (*atom->check) (ctx, atom);
@@ -92,7 +88,7 @@ void emit_vpu(GLcontext *ctx, struct radeon_state_atom * atom)
                END_BATCH();
                return;
        }
-       
+
        cmd.u = atom->cmd[0];
        addr = (cmd.vpu.adrhi << 8) | cmd.vpu.adrlo;
        ndw = cmd.vpu.count * 4;
@@ -111,7 +107,7 @@ void emit_vpu(GLcontext *ctx, struct radeon_state_atom * atom)
                } else {
                        BEGIN_BATCH_NO_AUTOSTATE(5 + ndw);
                }
-               OUT_BATCH_REGVAL(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
+               OUT_BATCH_REGVAL(R300_VAP_PVS_VECTOR_INDX_REG, addr);
                OUT_BATCH(CP_PACKET0(R300_VAP_PVS_UPLOAD_DATA, ndw-1) | RADEON_ONE_REG_WR);
                for (i = 0; i < ndw; i++) {
                        OUT_BATCH(atom->cmd[i+1]);
@@ -175,7 +171,7 @@ static void emit_tex_offsets(GLcontext *ctx, struct radeon_state_atom * atom)
 
                for(i = 0; i < numtmus; ++i) {
                    radeonTexObj *t = r300->hw.textures[i];
-               
+
                    if (!t)
                        notexture = 1;
                }
@@ -304,7 +300,7 @@ static void emit_zb_offset(GLcontext *ctx, struct radeon_state_atom * atom)
        if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE){
                zbpitch |= R300_DEPTHMICROTILE_TILED;
        }
-       
+
        BEGIN_BATCH_NO_AUTOSTATE(6);
        OUT_BATCH_REGSEQ(R300_ZB_DEPTHOFFSET, 1);
        OUT_BATCH_RELOC(0, rrb->bo, 0, 0, RADEON_GEM_DOMAIN_VRAM, 0);
@@ -606,8 +602,8 @@ void r300InitCmdBuf(r300ContextPtr r300)
        r300->hw.zb.emit = emit_zb_offset;
        ALLOC_STATE(zb_depthclearvalue, always, 2, 0);
        r300->hw.zb_depthclearvalue.cmd[0] = cmdpacket0(r300->radeon.radeonScreen, R300_ZB_DEPTHCLEARVALUE, 1);
-       ALLOC_STATE(unk4F30, always, 3, 0);
-       r300->hw.unk4F30.cmd[0] = cmdpacket0(r300->radeon.radeonScreen, 0x4F30, 2);
+       ALLOC_STATE(zb_zmask, always, 3, 0);
+       r300->hw.zb_zmask.cmd[0] = cmdpacket0(r300->radeon.radeonScreen, R300_ZB_ZMASK_OFFSET, 2);
        ALLOC_STATE(zb_hiz_offset, always, 2, 0);
        r300->hw.zb_hiz_offset.cmd[0] = cmdpacket0(r300->radeon.radeonScreen, R300_ZB_HIZ_OFFSET, 1);
        ALLOC_STATE(zb_hiz_pitch, always, 2, 0);