#include "r200_context.h"
#include "r200_state.h"
#include "r200_ioctl.h"
-#include "r200_tex.h"
#include "r200_tcl.h"
#include "r200_swtcl.h"
#include "r200_maos.h"
* discrete and there are no intervening state changes. (Somewhat
* duplicates changes to DrawArrays code)
*/
-static void r200EmitPrim( GLcontext *ctx,
+static void r200EmitPrim( struct gl_context *ctx,
GLenum prim,
GLuint hwprim,
GLuint start,
/* External entrypoints */
/**********************************************************************/
-void r200EmitPrimitive( GLcontext *ctx,
+void r200EmitPrimitive( struct gl_context *ctx,
GLuint first,
GLuint last,
GLuint flags )
tcl_render_tab_verts[flags&PRIM_MODE_MASK]( ctx, first, last, flags );
}
-void r200EmitEltPrimitive( GLcontext *ctx,
+void r200EmitEltPrimitive( struct gl_context *ctx,
GLuint first,
GLuint last,
GLuint flags )
tcl_render_tab_elts[flags&PRIM_MODE_MASK]( ctx, first, last, flags );
}
-void r200TclPrimitive( GLcontext *ctx,
+void r200TclPrimitive( struct gl_context *ctx,
GLenum prim,
int hw_prim )
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
GLuint newprim = hw_prim | R200_VF_TCL_OUTPUT_VTX_ENABLE;
+ radeon_prepare_render(&rmesa->radeon);
+ if (rmesa->radeon.NewGLState)
+ r200ValidateState( ctx );
+
if (newprim != rmesa->tcl.hw_primitive ||
!discrete_prim[hw_prim&0xf]) {
/* need to disable perspective-correct texturing for point sprites */
* Fog blend factors are in the range [0,1].
*/
float
-r200ComputeFogBlendFactor( GLcontext *ctx, GLfloat fogcoord )
+r200ComputeFogBlendFactor( struct gl_context *ctx, GLfloat fogcoord )
{
GLfloat end = ctx->Fog.End;
GLfloat d, temp;
* Predict total emit size for next rendering operation so there is no flush in middle of rendering
* Prediction has to aim towards the best possible value that is worse than worst case scenario
*/
-static GLuint r200EnsureEmitSize( GLcontext * ctx , GLubyte* vimap_rev )
+static GLuint r200EnsureEmitSize( struct gl_context * ctx , GLubyte* vimap_rev )
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
rendering code may decide convert to elts.
In that case we have to make pessimistic prediction.
and use larger of 2 paths. */
- const GLuint elts = ELTS_BUFSZ(nr_aos);
- const GLuint index = INDEX_BUFSZ;
+ const GLuint elt_count =(VB->Primitive[i].count/GET_MAX_HW_ELTS() + 1);
+ const GLuint elts = ELTS_BUFSZ(nr_aos) * elt_count;
+ const GLuint index = INDEX_BUFSZ * elt_count;
const GLuint vbuf = VBUF_BUFSZ;
if ( (!VB->Elts && VB->Primitive[i].count >= MAX_CONVERSION_SIZE)
|| vbuf > index + elts)
/* TCL render.
*/
-static GLboolean r200_run_tcl_render( GLcontext *ctx,
+static GLboolean r200_run_tcl_render( struct gl_context *ctx,
struct tnl_pipeline_stage *stage )
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
prog to a not enabled output however, so just don't mess with it.
We only need to change compsel. */
GLuint out_compsel = 0;
- GLuint vp_out = rmesa->curr_vp_hw->mesa_program.Base.OutputsWritten;
+ const GLbitfield64 vp_out =
+ rmesa->curr_vp_hw->mesa_program.Base.OutputsWritten;
vimap_rev = &rmesa->curr_vp_hw->inputmap_rev[0];
- assert(vp_out & (1 << VERT_RESULT_HPOS));
+ assert(vp_out & BITFIELD64_BIT(VERT_RESULT_HPOS));
out_compsel = R200_OUTPUT_XYZW;
- if (vp_out & (1 << VERT_RESULT_COL0)) {
+ if (vp_out & BITFIELD64_BIT(VERT_RESULT_COL0)) {
out_compsel |= R200_OUTPUT_COLOR_0;
}
- if (vp_out & (1 << VERT_RESULT_COL1)) {
+ if (vp_out & BITFIELD64_BIT(VERT_RESULT_COL1)) {
out_compsel |= R200_OUTPUT_COLOR_1;
}
- if (vp_out & (1 << VERT_RESULT_FOGC)) {
+ if (vp_out & BITFIELD64_BIT(VERT_RESULT_FOGC)) {
out_compsel |= R200_OUTPUT_DISCRETE_FOG;
}
- if (vp_out & (1 << VERT_RESULT_PSIZ)) {
+ if (vp_out & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
out_compsel |= R200_OUTPUT_PT_SIZE;
}
for (i = VERT_RESULT_TEX0; i < VERT_RESULT_TEX6; i++) {
- if (vp_out & (1 << i)) {
+ if (vp_out & BITFIELD64_BIT(i)) {
out_compsel |= R200_OUTPUT_TEX_0 << (i - VERT_RESULT_TEX0);
}
}
*/
-static void transition_to_swtnl( GLcontext *ctx )
+static void transition_to_swtnl( struct gl_context *ctx )
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
rmesa->hw.vap.cmd[VAP_SE_VAP_CNTL] &= ~(R200_VAP_TCL_ENABLE|R200_VAP_PROG_VTX_SHADER_ENABLE);
}
-static void transition_to_hwtnl( GLcontext *ctx )
+static void transition_to_hwtnl( struct gl_context *ctx )
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
-void r200TclFallback( GLcontext *ctx, GLuint bit, GLboolean mode )
+void r200TclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
{
- r200ContextPtr rmesa = R200_CONTEXT(ctx);
- GLuint oldfallback = rmesa->radeon.TclFallback;
-
- if (mode) {
- rmesa->radeon.TclFallback |= bit;
- if (oldfallback == 0) {
- if (R200_DEBUG & RADEON_FALLBACKS)
- fprintf(stderr, "R200 begin tcl fallback %s\n",
- getFallbackString( bit ));
- transition_to_swtnl( ctx );
- }
- }
- else {
- rmesa->radeon.TclFallback &= ~bit;
- if (oldfallback == bit) {
- if (R200_DEBUG & RADEON_FALLBACKS)
- fprintf(stderr, "R200 end tcl fallback %s\n",
- getFallbackString( bit ));
- transition_to_hwtnl( ctx );
- }
- }
+ r200ContextPtr rmesa = R200_CONTEXT(ctx);
+ GLuint oldfallback = rmesa->radeon.TclFallback;
+
+ if (mode) {
+ if (oldfallback == 0) {
+ /* We have to flush before transition */
+ if ( rmesa->radeon.dma.flush )
+ rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+
+ if (R200_DEBUG & RADEON_FALLBACKS)
+ fprintf(stderr, "R200 begin tcl fallback %s\n",
+ getFallbackString( bit ));
+ rmesa->radeon.TclFallback |= bit;
+ transition_to_swtnl( ctx );
+ } else
+ rmesa->radeon.TclFallback |= bit;
+ } else {
+ if (oldfallback == bit) {
+ /* We have to flush before transition */
+ if ( rmesa->radeon.dma.flush )
+ rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
+
+ if (R200_DEBUG & RADEON_FALLBACKS)
+ fprintf(stderr, "R200 end tcl fallback %s\n",
+ getFallbackString( bit ));
+ rmesa->radeon.TclFallback &= ~bit;
+ transition_to_hwtnl( ctx );
+ } else
+ rmesa->radeon.TclFallback &= ~bit;
+ }
}