1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 **************************************************************************/
9 * Render unclipped vertex buffers by emitting vertices directly to
10 * dma buffers. Use strip/fan hardware acceleration where possible.
20 #include "tnl/t_context.h"
21 #include "tnl/t_vertex.h"
23 #include "intel_screen.h"
24 #include "intel_context.h"
25 #include "intel_tris.h"
26 #include "intel_batchbuffer.h"
27 #include "intel_reg.h"
30 * Render unclipped vertex buffers by emitting vertices directly to
31 * dma buffers. Use strip/fan hardware primitives where possible.
32 * Try to simulate missing primitives with indexed vertices.
34 #define HAVE_POINTS 0 /* Has it, but can't use because subpixel has to
35 * be adjusted for points on the INTEL/I845G
38 #define HAVE_LINE_STRIPS 1
39 #define HAVE_TRIANGLES 1
40 #define HAVE_TRI_STRIPS 1
41 #define HAVE_TRI_STRIP_1 0 /* has it, template can't use it yet */
42 #define HAVE_TRI_FANS 1
43 #define HAVE_POLYGONS 1
45 #define HAVE_QUAD_STRIPS 0
49 static GLuint hw_prim
[GL_POLYGON
+1] = {
62 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
75 static const int scale_prim
[GL_POLYGON
+1] = {
76 0, /* fallback case */
83 0, /* fallback case */
84 0, /* fallback case */
89 static void intelDmaPrimitive( intelContextPtr intel
, GLenum prim
)
91 if (0) fprintf(stderr
, "%s %s\n", __FUNCTION__
, _mesa_lookup_enum_by_nr(prim
));
92 INTEL_FIREVERTICES(intel
);
93 intel
->vtbl
.reduced_primitive_state( intel
, reduced_prim
[prim
] );
94 intelStartInlinePrimitive( intel
, hw_prim
[prim
] );
98 #define LOCAL_VARS intelContextPtr intel = INTEL_CONTEXT(ctx)
99 #define INIT( prim ) \
101 intelDmaPrimitive( intel, prim ); \
103 #define FLUSH() INTEL_FIREVERTICES( intel )
105 #define GET_SUBSEQUENT_VB_MAX_VERTS() \
106 (((intel->alloc.size / 2) - 1500) / (intel->vertex_size*4))
107 #define GET_CURRENT_VB_MAX_VERTS() GET_SUBSEQUENT_VB_MAX_VERTS()
109 #define ALLOC_VERTS( nr ) \
110 intelExtendInlinePrimitive( intel, (nr) * intel->vertex_size )
112 #define EMIT_VERTS( ctx, j, nr, buf ) \
113 _tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf )
115 #define TAG(x) intel_##x
116 #include "tnl_dd/t_dd_dmatmp.h"
119 /**********************************************************************/
120 /* Render pipeline stage */
121 /**********************************************************************/
123 /* Heuristic to choose between the two render paths:
125 static GLboolean
choose_render( intelContextPtr intel
,
126 struct vertex_buffer
*VB
)
128 int vertsz
= intel
->vertex_size
;
130 int cost_fallback
= 0;
134 int rprim
= intel
->reduced_primitive
;
137 for (i
= 0 ; i
< VB
->PrimitiveCount
; i
++) {
138 GLuint prim
= VB
->Primitive
[i
].mode
;
139 GLuint length
= VB
->Primitive
[i
].count
;
145 nr_rverts
+= length
* scale_prim
[prim
& PRIM_MODE_MASK
];
147 if (reduced_prim
[prim
& PRIM_MODE_MASK
] != rprim
) {
149 rprim
= reduced_prim
[prim
& PRIM_MODE_MASK
];
153 /* One point for each generated primitive:
155 cost_render
= nr_prims
;
156 cost_fallback
= nr_rprims
;
158 /* One point for every 1024 dwords (4k) of dma:
160 cost_render
+= (vertsz
* i
) / 1024;
161 cost_fallback
+= (vertsz
* nr_rverts
) / 1024;
164 fprintf(stderr
, "cost render: %d fallback: %d\n",
165 cost_render
, cost_fallback
);
167 if (cost_render
> cost_fallback
)
174 static GLboolean
intel_run_render( GLcontext
*ctx
,
175 struct tnl_pipeline_stage
*stage
)
177 intelContextPtr intel
= INTEL_CONTEXT(ctx
);
178 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
179 struct vertex_buffer
*VB
= &tnl
->vb
;
182 /* Don't handle clipping or indexed vertices.
184 if (intel
->RenderIndex
!= 0 ||
185 !intel_validate_render( ctx
, VB
) ||
186 !choose_render( intel
, VB
)) {
190 tnl
->clipspace
.new_inputs
|= VERT_BIT_POS
;
192 tnl
->Driver
.Render
.Start( ctx
);
194 for (i
= 0 ; i
< VB
->PrimitiveCount
; i
++)
196 GLuint prim
= VB
->Primitive
[i
].mode
;
197 GLuint start
= VB
->Primitive
[i
].start
;
198 GLuint length
= VB
->Primitive
[i
].count
;
203 intel_render_tab_verts
[prim
& PRIM_MODE_MASK
]( ctx
, start
, start
+ length
,
207 tnl
->Driver
.Render
.Finish( ctx
);
209 return GL_FALSE
; /* finished the pipe */
213 static void intel_check_render( GLcontext
*ctx
,
214 struct tnl_pipeline_stage
*stage
)
216 stage
->inputs
= TNL_CONTEXT(ctx
)->render_inputs
;
219 static void dtr( struct tnl_pipeline_stage
*stage
)
225 const struct tnl_pipeline_stage _intel_render_stage
=
228 (_DD_NEW_SEPARATE_SPECULAR
|
231 _NEW_RENDERMODE
), /* re-check (new inputs) */
232 0, /* re-run (always runs) */
233 GL_TRUE
, /* active */
234 0, 0, /* inputs (set in check_render), outputs */
235 0, 0, /* changed_inputs, private */
236 dtr
, /* destructor */
237 intel_check_render
, /* check - initially set to alloc data */
238 intel_run_render
/* run */