added a few more fallbackStrings (Andreas Stenglein)
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_swtcl.c
1 /* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_swtcl.c,v 1.6 2003/05/06 23:52:08 daenzer Exp $ */
2 /**************************************************************************
3
4 Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5 VA Linux Systems Inc., Fremont, California.
6
7 All Rights Reserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32 * Authors:
33 * Keith Whitwell <keith@tungstengraphics.com>
34 */
35
36 #include "glheader.h"
37 #include "mtypes.h"
38 #include "colormac.h"
39 #include "enums.h"
40 #include "imports.h"
41 #include "macros.h"
42
43 #include "swrast_setup/swrast_setup.h"
44 #include "math/m_translate.h"
45 #include "tnl/tnl.h"
46 #include "tnl/t_context.h"
47 #include "tnl/t_pipeline.h"
48 #include "tnl/t_vtx_api.h" /* for _tnl_FlushVertices */
49
50 #include "radeon_context.h"
51 #include "radeon_ioctl.h"
52 #include "radeon_state.h"
53 #include "radeon_swtcl.h"
54 #include "radeon_tcl.h"
55
56
57 static void flush_last_swtcl_prim( radeonContextPtr rmesa );
58
59 /* R100: xyzw, c0, c1/fog, stq[0..2] = 4+1+1+3*3 = 15 right? */
60 /* R200: xyzw, c0, c1/fog, strq[0..5] = 4+1+1+4*6 = 30 */
61 #define RADEON_MAX_TNL_VERTEX_SIZE (15 * sizeof(GLfloat)) /* for mesa _tnl stage */
62
63 /***********************************************************************
64 * Initialization
65 ***********************************************************************/
66
67 #define EMIT_ATTR( ATTR, STYLE, F0 ) \
68 do { \
69 rmesa->swtcl.vertex_attrs[rmesa->swtcl.vertex_attr_count].attrib = (ATTR); \
70 rmesa->swtcl.vertex_attrs[rmesa->swtcl.vertex_attr_count].format = (STYLE); \
71 rmesa->swtcl.vertex_attr_count++; \
72 fmt_0 |= F0; \
73 } while (0)
74
75 #define EMIT_PAD( N ) \
76 do { \
77 rmesa->swtcl.vertex_attrs[rmesa->swtcl.vertex_attr_count].attrib = 0; \
78 rmesa->swtcl.vertex_attrs[rmesa->swtcl.vertex_attr_count].format = EMIT_PAD; \
79 rmesa->swtcl.vertex_attrs[rmesa->swtcl.vertex_attr_count].offset = (N); \
80 rmesa->swtcl.vertex_attr_count++; \
81 } while (0)
82
83 static GLuint radeon_cp_vc_frmts[3][2] =
84 {
85 { RADEON_CP_VC_FRMT_ST0, RADEON_CP_VC_FRMT_ST0 | RADEON_CP_VC_FRMT_Q0 },
86 { RADEON_CP_VC_FRMT_ST1, RADEON_CP_VC_FRMT_ST1 | RADEON_CP_VC_FRMT_Q1 },
87 { RADEON_CP_VC_FRMT_ST2, RADEON_CP_VC_FRMT_ST2 | RADEON_CP_VC_FRMT_Q2 },
88 };
89
90 static void radeonSetVertexFormat( GLcontext *ctx )
91 {
92 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
93 TNLcontext *tnl = TNL_CONTEXT(ctx);
94 struct vertex_buffer *VB = &tnl->vb;
95 GLuint index = tnl->render_inputs;
96 int fmt_0 = 0;
97 int offset = 0;
98
99
100 /* Important:
101 */
102 if ( VB->NdcPtr != NULL ) {
103 VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
104 }
105 else {
106 VB->AttribPtr[VERT_ATTRIB_POS] = VB->ClipPtr;
107 }
108
109 assert( VB->AttribPtr[VERT_ATTRIB_POS] != NULL );
110 rmesa->swtcl.vertex_attr_count = 0;
111
112 /* EMIT_ATTR's must be in order as they tell t_vertex.c how to
113 * build up a hardware vertex.
114 */
115 if ( !rmesa->swtcl.needproj ||
116 (index & _TNL_BITS_TEX_ANY)) { /* for projtex */
117 EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F,
118 RADEON_CP_VC_FRMT_XY | RADEON_CP_VC_FRMT_Z | RADEON_CP_VC_FRMT_W0 );
119 offset = 4;
120 }
121 else {
122 EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_3F,
123 RADEON_CP_VC_FRMT_XY | RADEON_CP_VC_FRMT_Z );
124 offset = 3;
125 }
126
127 rmesa->swtcl.coloroffset = offset;
128 #if MESA_LITTLE_ENDIAN
129 EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_RGBA,
130 RADEON_CP_VC_FRMT_PKCOLOR );
131 #else
132 EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_ABGR,
133 RADEON_CP_VC_FRMT_PKCOLOR );
134 #endif
135 offset += 1;
136
137 rmesa->swtcl.specoffset = 0;
138 if (index & (_TNL_BIT_COLOR1|_TNL_BIT_FOG)) {
139
140 #if MESA_LITTLE_ENDIAN
141 if (index & _TNL_BIT_COLOR1) {
142 rmesa->swtcl.specoffset = offset;
143 EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_3UB_3F_RGB,
144 RADEON_CP_VC_FRMT_PKSPEC );
145 }
146 else {
147 EMIT_PAD( 3 );
148 }
149
150 if (index & _TNL_BIT_FOG) {
151 EMIT_ATTR( _TNL_ATTRIB_FOG, EMIT_1UB_1F,
152 RADEON_CP_VC_FRMT_PKSPEC );
153 }
154 else {
155 EMIT_PAD( 1 );
156 }
157 #else
158 if (index & _TNL_BIT_FOG) {
159 EMIT_ATTR( _TNL_ATTRIB_FOG, EMIT_1UB_1F,
160 RADEON_CP_VC_FRMT_PKSPEC );
161 }
162 else {
163 EMIT_PAD( 1 );
164 }
165
166 if (index & _TNL_BIT_COLOR1) {
167 rmesa->swtcl.specoffset = offset;
168 EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR,
169 RADEON_CP_VC_FRMT_PKSPEC );
170 }
171 else {
172 EMIT_PAD( 3 );
173 }
174 #endif
175 }
176
177 if (index & _TNL_BITS_TEX_ANY) {
178 int i;
179
180 for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
181 if (index & _TNL_BIT_TEX(i)) {
182 GLuint sz = VB->TexCoordPtr[i]->size;
183
184 switch (sz) {
185 case 1:
186 case 2:
187 case 3:
188 EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_2F,
189 radeon_cp_vc_frmts[i][0] );
190 break;
191 case 4:
192 EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_3F_XYW,
193 radeon_cp_vc_frmts[i][1] );
194 break;
195 default:
196 continue;
197 };
198 }
199 }
200 }
201
202 if ( rmesa->tnl_index != index ||
203 fmt_0 != rmesa->swtcl.vertex_format) {
204 RADEON_NEWPRIM(rmesa);
205 rmesa->swtcl.vertex_format = fmt_0;
206 rmesa->swtcl.vertex_size =
207 _tnl_install_attrs( ctx,
208 rmesa->swtcl.vertex_attrs,
209 rmesa->swtcl.vertex_attr_count,
210 NULL, 0 );
211 rmesa->swtcl.vertex_size /= 4;
212 rmesa->tnl_index = index;
213 if (RADEON_DEBUG & DEBUG_VERTS)
214 fprintf( stderr, "%s: vertex_size= %d floats\n",
215 __FUNCTION__, rmesa->swtcl.vertex_size);
216 }
217 }
218
219
220 static void radeonRenderStart( GLcontext *ctx )
221 {
222 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
223
224 radeonSetVertexFormat( ctx );
225
226 if (rmesa->dma.flush != 0 &&
227 rmesa->dma.flush != flush_last_swtcl_prim)
228 rmesa->dma.flush( rmesa );
229 }
230
231
232 /**
233 * Set vertex state for SW TCL. The primary purpose of this function is to
234 * determine in advance whether or not the hardware can / should do the
235 * projection divide or Mesa should do it.
236 */
237 void radeonChooseVertexState( GLcontext *ctx )
238 {
239 radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
240 TNLcontext *tnl = TNL_CONTEXT(ctx);
241
242 GLuint se_coord_fmt;
243
244 /* We must ensure that we don't do _tnl_need_projected_coords while in a
245 * rasterization fallback. As this function will be called again when we
246 * leave a rasterization fallback, we can just skip it for now.
247 */
248 if (rmesa->Fallback != 0)
249 return;
250
251 /* HW perspective divide is a win, but tiny vertex formats are a
252 * bigger one.
253 */
254
255 if ( ((tnl->render_inputs & (_TNL_BITS_TEX_ANY|_TNL_BIT_COLOR1) ) == 0)
256 || (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
257 rmesa->swtcl.needproj = GL_TRUE;
258 se_coord_fmt = (RADEON_VTX_XY_PRE_MULT_1_OVER_W0 |
259 RADEON_VTX_Z_PRE_MULT_1_OVER_W0 |
260 RADEON_TEX1_W_ROUTING_USE_Q1);
261 }
262 else {
263 rmesa->swtcl.needproj = GL_FALSE;
264 se_coord_fmt = (RADEON_VTX_W0_IS_NOT_1_OVER_W0 |
265 RADEON_TEX1_W_ROUTING_USE_Q1);
266 }
267
268 _tnl_need_projected_coords( ctx, rmesa->swtcl.needproj );
269
270 if ( se_coord_fmt != rmesa->hw.set.cmd[SET_SE_COORDFMT] ) {
271 RADEON_STATECHANGE( rmesa, set );
272 rmesa->hw.set.cmd[SET_SE_COORDFMT] = se_coord_fmt;
273 }
274 }
275
276
277 /* Flush vertices in the current dma region.
278 */
279 static void flush_last_swtcl_prim( radeonContextPtr rmesa )
280 {
281 if (RADEON_DEBUG & DEBUG_IOCTL)
282 fprintf(stderr, "%s\n", __FUNCTION__);
283
284 rmesa->dma.flush = NULL;
285
286 if (rmesa->dma.current.buf) {
287 struct radeon_dma_region *current = &rmesa->dma.current;
288 GLuint current_offset = (rmesa->radeonScreen->gart_buffer_offset +
289 current->buf->buf->idx * RADEON_BUFFER_SIZE +
290 current->start);
291
292 assert (!(rmesa->swtcl.hw_primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
293
294 assert (current->start +
295 rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
296 current->ptr);
297
298 if (rmesa->dma.current.start != rmesa->dma.current.ptr) {
299 radeonEnsureCmdBufSpace( rmesa, VERT_AOS_BUFSZ +
300 rmesa->hw.max_state_size + VBUF_BUFSZ );
301
302 radeonEmitVertexAOS( rmesa,
303 rmesa->swtcl.vertex_size,
304 current_offset);
305
306 radeonEmitVbufPrim( rmesa,
307 rmesa->swtcl.vertex_format,
308 rmesa->swtcl.hw_primitive,
309 rmesa->swtcl.numverts);
310 }
311
312 rmesa->swtcl.numverts = 0;
313 current->start = current->ptr;
314 }
315 }
316
317
318 /* Alloc space in the current dma region.
319 */
320 static __inline void *radeonAllocDmaLowVerts( radeonContextPtr rmesa,
321 int nverts, int vsize )
322 {
323 GLuint bytes = vsize * nverts;
324
325 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
326 radeonRefillCurrentDmaRegion( rmesa );
327
328 if (!rmesa->dma.flush) {
329 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
330 rmesa->dma.flush = flush_last_swtcl_prim;
331 }
332
333 assert( vsize == rmesa->swtcl.vertex_size * 4 );
334 assert( rmesa->dma.flush == flush_last_swtcl_prim );
335 assert (rmesa->dma.current.start +
336 rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
337 rmesa->dma.current.ptr);
338
339
340 {
341 GLubyte *head = (GLubyte *)(rmesa->dma.current.address + rmesa->dma.current.ptr);
342 rmesa->dma.current.ptr += bytes;
343 rmesa->swtcl.numverts += nverts;
344 return head;
345 }
346
347 }
348
349
350 /*
351 * Render unclipped vertex buffers by emitting vertices directly to
352 * dma buffers. Use strip/fan hardware primitives where possible.
353 * Try to simulate missing primitives with indexed vertices.
354 */
355 #define HAVE_POINTS 1
356 #define HAVE_LINES 1
357 #define HAVE_LINE_STRIPS 1
358 #define HAVE_TRIANGLES 1
359 #define HAVE_TRI_STRIPS 1
360 #define HAVE_TRI_STRIP_1 0
361 #define HAVE_TRI_FANS 1
362 #define HAVE_QUADS 0
363 #define HAVE_QUAD_STRIPS 0
364 #define HAVE_POLYGONS 0
365 /* \todo: is it possible to make "ELTS" work with t_vertex code ? */
366 #define HAVE_ELTS 0
367
368 static const GLuint hw_prim[GL_POLYGON+1] = {
369 RADEON_CP_VC_CNTL_PRIM_TYPE_POINT,
370 RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
371 0,
372 RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP,
373 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
374 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP,
375 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN,
376 0,
377 0,
378 0
379 };
380
381 static __inline void radeonDmaPrimitive( radeonContextPtr rmesa, GLenum prim )
382 {
383 RADEON_NEWPRIM( rmesa );
384 rmesa->swtcl.hw_primitive = hw_prim[prim];
385 assert(rmesa->dma.current.ptr == rmesa->dma.current.start);
386 }
387
388 #define LOCAL_VARS radeonContextPtr rmesa = RADEON_CONTEXT(ctx); (void)rmesa
389 #define INIT( prim ) radeonDmaPrimitive( rmesa, prim )
390 #define FLUSH() RADEON_NEWPRIM( rmesa )
391 #define GET_CURRENT_VB_MAX_VERTS() \
392 (((int)rmesa->dma.current.end - (int)rmesa->dma.current.ptr) / (rmesa->swtcl.vertex_size*4))
393 #define GET_SUBSEQUENT_VB_MAX_VERTS() \
394 ((RADEON_BUFFER_SIZE) / (rmesa->swtcl.vertex_size*4))
395 #define ALLOC_VERTS( nr ) \
396 radeonAllocDmaLowVerts( rmesa, nr, rmesa->swtcl.vertex_size * 4 )
397 #define EMIT_VERTS( ctx, j, nr, buf ) \
398 _tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf)
399
400 #define TAG(x) radeon_dma_##x
401 #include "tnl_dd/t_dd_dmatmp.h"
402
403
404 /**********************************************************************/
405 /* Render pipeline stage */
406 /**********************************************************************/
407
408
409 static GLboolean radeon_run_render( GLcontext *ctx,
410 struct tnl_pipeline_stage *stage )
411 {
412 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
413 TNLcontext *tnl = TNL_CONTEXT(ctx);
414 struct vertex_buffer *VB = &tnl->vb;
415 tnl_render_func *tab = TAG(render_tab_verts);
416 GLuint i;
417
418 if (rmesa->swtcl.indexed_verts.buf)
419 RELEASE_ELT_VERTS();
420
421 if (rmesa->swtcl.RenderIndex != 0 ||
422 !radeon_dma_validate_render( ctx, VB ))
423 return GL_TRUE;
424
425 tnl->Driver.Render.Start( ctx );
426
427 for (i = 0 ; i < VB->PrimitiveCount ; i++)
428 {
429 GLuint prim = VB->Primitive[i].mode;
430 GLuint start = VB->Primitive[i].start;
431 GLuint length = VB->Primitive[i].count;
432
433 if (!length)
434 continue;
435
436 if (RADEON_DEBUG & DEBUG_PRIMS)
437 fprintf(stderr, "radeon_render.c: prim %s %d..%d\n",
438 _mesa_lookup_enum_by_nr(prim & PRIM_MODE_MASK),
439 start, start+length);
440
441 if (length)
442 tab[prim & PRIM_MODE_MASK]( ctx, start, start + length, prim );
443 }
444
445 tnl->Driver.Render.Finish( ctx );
446
447 return GL_FALSE; /* finished the pipe */
448 }
449
450
451
452
453 const struct tnl_pipeline_stage _radeon_render_stage =
454 {
455 "radeon render",
456 NULL,
457 NULL,
458 NULL,
459 NULL,
460 radeon_run_render /* run */
461 };
462
463
464 /**************************************************************************/
465
466 /* Radeon texture rectangle expects coords in 0..1 range, not 0..dimension
467 * as in the extension spec. Need to translate here.
468 *
469 * Note that swrast expects 0..dimension, so if a fallback is active,
470 * don't do anything. (Maybe need to configure swrast to match hw)
471 */
472 struct texrect_stage_data {
473 GLvector4f texcoord[MAX_TEXTURE_UNITS];
474 };
475
476 #define TEXRECT_STAGE_DATA(stage) ((struct texrect_stage_data *)stage->privatePtr)
477
478
479 static GLboolean run_texrect_stage( GLcontext *ctx,
480 struct tnl_pipeline_stage *stage )
481 {
482 struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
483 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
484 TNLcontext *tnl = TNL_CONTEXT(ctx);
485 struct vertex_buffer *VB = &tnl->vb;
486 GLuint i;
487
488 if (rmesa->Fallback)
489 return GL_TRUE;
490
491 for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++) {
492 if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_RECT_BIT) {
493 struct gl_texture_object *texObj = ctx->Texture.Unit[i].CurrentRect;
494 struct gl_texture_image *texImage = texObj->Image[0][texObj->BaseLevel];
495 const GLfloat iw = 1.0/texImage->Width;
496 const GLfloat ih = 1.0/texImage->Height;
497 GLfloat *in = (GLfloat *)VB->TexCoordPtr[i]->data;
498 GLint instride = VB->TexCoordPtr[i]->stride;
499 GLfloat (*out)[4] = store->texcoord[i].data;
500 GLint j;
501
502 for (j = 0 ; j < VB->Count ; j++) {
503 out[j][0] = in[0] * iw;
504 out[j][1] = in[1] * ih;
505 in = (GLfloat *)((GLubyte *)in + instride);
506 }
507
508 VB->AttribPtr[VERT_ATTRIB_TEX0+i] = VB->TexCoordPtr[i] = &store->texcoord[i];
509 }
510 }
511
512 return GL_TRUE;
513 }
514
515
516 /* Called the first time stage->run() is invoked.
517 */
518 static GLboolean alloc_texrect_data( GLcontext *ctx,
519 struct tnl_pipeline_stage *stage )
520 {
521 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
522 struct texrect_stage_data *store;
523 GLuint i;
524
525 stage->privatePtr = CALLOC(sizeof(*store));
526 store = TEXRECT_STAGE_DATA(stage);
527 if (!store)
528 return GL_FALSE;
529
530 for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++)
531 _mesa_vector4f_alloc( &store->texcoord[i], 0, VB->Size, 32 );
532
533 return GL_TRUE;
534 }
535
536 static void free_texrect_data( struct tnl_pipeline_stage *stage )
537 {
538 struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
539 GLuint i;
540
541 if (store) {
542 for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
543 if (store->texcoord[i].data)
544 _mesa_vector4f_free( &store->texcoord[i] );
545 FREE( store );
546 stage->privatePtr = NULL;
547 }
548 }
549
550 const struct tnl_pipeline_stage _radeon_texrect_stage =
551 {
552 "radeon texrect stage", /* name */
553 NULL,
554 alloc_texrect_data,
555 free_texrect_data,
556 NULL,
557 run_texrect_stage
558 };
559
560
561 /**************************************************************************/
562
563
564 static const GLuint reduced_hw_prim[GL_POLYGON+1] = {
565 RADEON_CP_VC_CNTL_PRIM_TYPE_POINT,
566 RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
567 RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
568 RADEON_CP_VC_CNTL_PRIM_TYPE_LINE,
569 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
570 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
571 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
572 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
573 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST,
574 RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST
575 };
576
577 static void radeonRasterPrimitive( GLcontext *ctx, GLuint hwprim );
578 static void radeonRenderPrimitive( GLcontext *ctx, GLenum prim );
579 static void radeonResetLineStipple( GLcontext *ctx );
580
581
582 /***********************************************************************
583 * Emit primitives as inline vertices *
584 ***********************************************************************/
585
586 #undef LOCAL_VARS
587 #undef ALLOC_VERTS
588 #define CTX_ARG radeonContextPtr rmesa
589 #define GET_VERTEX_DWORDS() rmesa->swtcl.vertex_size
590 #define ALLOC_VERTS( n, size ) radeonAllocDmaLowVerts( rmesa, n, (size) * 4 )
591 #undef LOCAL_VARS
592 #define LOCAL_VARS \
593 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); \
594 const char *radeonverts = (char *)rmesa->swtcl.verts;
595 #define VERT(x) (radeonVertex *)(radeonverts + ((x) * (vertsize) * sizeof(int)))
596 #define VERTEX radeonVertex
597 #undef TAG
598 #define TAG(x) radeon_##x
599 #include "tnl_dd/t_dd_triemit.h"
600
601
602 /***********************************************************************
603 * Macros for t_dd_tritmp.h to draw basic primitives *
604 ***********************************************************************/
605
606 #define QUAD( a, b, c, d ) radeon_quad( rmesa, a, b, c, d )
607 #define TRI( a, b, c ) radeon_triangle( rmesa, a, b, c )
608 #define LINE( a, b ) radeon_line( rmesa, a, b )
609 #define POINT( a ) radeon_point( rmesa, a )
610
611 /***********************************************************************
612 * Build render functions from dd templates *
613 ***********************************************************************/
614
615 #define RADEON_TWOSIDE_BIT 0x01
616 #define RADEON_UNFILLED_BIT 0x02
617 #define RADEON_MAX_TRIFUNC 0x08
618
619
620 static struct {
621 tnl_points_func points;
622 tnl_line_func line;
623 tnl_triangle_func triangle;
624 tnl_quad_func quad;
625 } rast_tab[RADEON_MAX_TRIFUNC];
626
627
628 #define DO_FALLBACK 0
629 #define DO_OFFSET 0
630 #define DO_UNFILLED (IND & RADEON_UNFILLED_BIT)
631 #define DO_TWOSIDE (IND & RADEON_TWOSIDE_BIT)
632 #define DO_FLAT 0
633 #define DO_TRI 1
634 #define DO_QUAD 1
635 #define DO_LINE 1
636 #define DO_POINTS 1
637 #define DO_FULL_QUAD 1
638
639 #define HAVE_RGBA 1
640 #define HAVE_SPEC 1
641 #define HAVE_BACK_COLORS 0
642 #define HAVE_HW_FLATSHADE 1
643 #define TAB rast_tab
644
645 #define DEPTH_SCALE 1.0
646 #define UNFILLED_TRI unfilled_tri
647 #define UNFILLED_QUAD unfilled_quad
648 #define VERT_X(_v) _v->v.x
649 #define VERT_Y(_v) _v->v.y
650 #define VERT_Z(_v) _v->v.z
651 #define AREA_IS_CCW( a ) (a < 0)
652 #define GET_VERTEX(e) (rmesa->swtcl.verts + ((e) * rmesa->swtcl.vertex_size * sizeof(int)))
653
654 #define VERT_SET_RGBA( v, c ) \
655 do { \
656 radeon_color_t *color = (radeon_color_t *)&((v)->ui[coloroffset]); \
657 UNCLAMPED_FLOAT_TO_UBYTE(color->red, (c)[0]); \
658 UNCLAMPED_FLOAT_TO_UBYTE(color->green, (c)[1]); \
659 UNCLAMPED_FLOAT_TO_UBYTE(color->blue, (c)[2]); \
660 UNCLAMPED_FLOAT_TO_UBYTE(color->alpha, (c)[3]); \
661 } while (0)
662
663 #define VERT_COPY_RGBA( v0, v1 ) v0->ui[coloroffset] = v1->ui[coloroffset]
664
665 #define VERT_SET_SPEC( v, c ) \
666 do { \
667 if (specoffset) { \
668 radeon_color_t *spec = (radeon_color_t *)&((v)->ui[specoffset]); \
669 UNCLAMPED_FLOAT_TO_UBYTE(spec->red, (c)[0]); \
670 UNCLAMPED_FLOAT_TO_UBYTE(spec->green, (c)[1]); \
671 UNCLAMPED_FLOAT_TO_UBYTE(spec->blue, (c)[2]); \
672 } \
673 } while (0)
674 #define VERT_COPY_SPEC( v0, v1 ) \
675 do { \
676 if (specoffset) { \
677 radeon_color_t *spec0 = (radeon_color_t *)&((v0)->ui[specoffset]); \
678 radeon_color_t *spec1 = (radeon_color_t *)&((v1)->ui[specoffset]); \
679 spec0->red = spec1->red; \
680 spec0->green = spec1->green; \
681 spec0->blue = spec1->blue; \
682 } \
683 } while (0)
684
685 /* These don't need LE32_TO_CPU() as they used to save and restore
686 * colors which are already in the correct format.
687 */
688 #define VERT_SAVE_RGBA( idx ) color[idx] = v[idx]->ui[coloroffset]
689 #define VERT_RESTORE_RGBA( idx ) v[idx]->ui[coloroffset] = color[idx]
690 #define VERT_SAVE_SPEC( idx ) if (specoffset) spec[idx] = v[idx]->ui[specoffset]
691 #define VERT_RESTORE_SPEC( idx ) if (specoffset) v[idx]->ui[specoffset] = spec[idx]
692
693 #undef LOCAL_VARS
694 #undef TAG
695 #undef INIT
696
697 #define LOCAL_VARS(n) \
698 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); \
699 GLuint color[n], spec[n]; \
700 GLuint coloroffset = rmesa->swtcl.coloroffset; \
701 GLuint specoffset = rmesa->swtcl.specoffset; \
702 (void) color; (void) spec; (void) coloroffset; (void) specoffset;
703
704 /***********************************************************************
705 * Helpers for rendering unfilled primitives *
706 ***********************************************************************/
707
708 #define RASTERIZE(x) radeonRasterPrimitive( ctx, reduced_hw_prim[x] )
709 #define RENDER_PRIMITIVE rmesa->swtcl.render_primitive
710 #undef TAG
711 #define TAG(x) x
712 #include "tnl_dd/t_dd_unfilled.h"
713 #undef IND
714
715
716 /***********************************************************************
717 * Generate GL render functions *
718 ***********************************************************************/
719
720
721 #define IND (0)
722 #define TAG(x) x
723 #include "tnl_dd/t_dd_tritmp.h"
724
725 #define IND (RADEON_TWOSIDE_BIT)
726 #define TAG(x) x##_twoside
727 #include "tnl_dd/t_dd_tritmp.h"
728
729 #define IND (RADEON_UNFILLED_BIT)
730 #define TAG(x) x##_unfilled
731 #include "tnl_dd/t_dd_tritmp.h"
732
733 #define IND (RADEON_TWOSIDE_BIT|RADEON_UNFILLED_BIT)
734 #define TAG(x) x##_twoside_unfilled
735 #include "tnl_dd/t_dd_tritmp.h"
736
737
738 static void init_rast_tab( void )
739 {
740 init();
741 init_twoside();
742 init_unfilled();
743 init_twoside_unfilled();
744 }
745
746 /**********************************************************************/
747 /* Render unclipped begin/end objects */
748 /**********************************************************************/
749
750 #define RENDER_POINTS( start, count ) \
751 for ( ; start < count ; start++) \
752 radeon_point( rmesa, VERT(start) )
753 #define RENDER_LINE( v0, v1 ) \
754 radeon_line( rmesa, VERT(v0), VERT(v1) )
755 #define RENDER_TRI( v0, v1, v2 ) \
756 radeon_triangle( rmesa, VERT(v0), VERT(v1), VERT(v2) )
757 #define RENDER_QUAD( v0, v1, v2, v3 ) \
758 radeon_quad( rmesa, VERT(v0), VERT(v1), VERT(v2), VERT(v3) )
759 #undef INIT
760 #define INIT(x) do { \
761 radeonRenderPrimitive( ctx, x ); \
762 } while (0)
763 #undef LOCAL_VARS
764 #define LOCAL_VARS \
765 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); \
766 const GLuint vertsize = rmesa->swtcl.vertex_size; \
767 const char *radeonverts = (char *)rmesa->swtcl.verts; \
768 const GLuint * const elt = TNL_CONTEXT(ctx)->vb.Elts; \
769 const GLboolean stipple = ctx->Line.StippleFlag; \
770 (void) elt; (void) stipple;
771 #define RESET_STIPPLE if ( stipple ) radeonResetLineStipple( ctx );
772 #define RESET_OCCLUSION
773 #define PRESERVE_VB_DEFS
774 #define ELT(x) (x)
775 #define TAG(x) radeon_##x##_verts
776 #include "tnl/t_vb_rendertmp.h"
777 #undef ELT
778 #undef TAG
779 #define TAG(x) radeon_##x##_elts
780 #define ELT(x) elt[x]
781 #include "tnl/t_vb_rendertmp.h"
782
783
784
785 /**********************************************************************/
786 /* Choose render functions */
787 /**********************************************************************/
788
789 void radeonChooseRenderState( GLcontext *ctx )
790 {
791 TNLcontext *tnl = TNL_CONTEXT(ctx);
792 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
793 GLuint index = 0;
794 GLuint flags = ctx->_TriangleCaps;
795
796 if (!rmesa->TclFallback || rmesa->Fallback)
797 return;
798
799 if (flags & DD_TRI_LIGHT_TWOSIDE) index |= RADEON_TWOSIDE_BIT;
800 if (flags & DD_TRI_UNFILLED) index |= RADEON_UNFILLED_BIT;
801
802 if (index != rmesa->swtcl.RenderIndex) {
803 tnl->Driver.Render.Points = rast_tab[index].points;
804 tnl->Driver.Render.Line = rast_tab[index].line;
805 tnl->Driver.Render.ClippedLine = rast_tab[index].line;
806 tnl->Driver.Render.Triangle = rast_tab[index].triangle;
807 tnl->Driver.Render.Quad = rast_tab[index].quad;
808
809 if (index == 0) {
810 tnl->Driver.Render.PrimTabVerts = radeon_render_tab_verts;
811 tnl->Driver.Render.PrimTabElts = radeon_render_tab_elts;
812 tnl->Driver.Render.ClippedPolygon = radeon_fast_clipped_poly;
813 } else {
814 tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts;
815 tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts;
816 tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon;
817 }
818
819 rmesa->swtcl.RenderIndex = index;
820 }
821 }
822
823
824 /**********************************************************************/
825 /* High level hooks for t_vb_render.c */
826 /**********************************************************************/
827
828
829 static void radeonRasterPrimitive( GLcontext *ctx, GLuint hwprim )
830 {
831 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
832
833 if (rmesa->swtcl.hw_primitive != hwprim) {
834 RADEON_NEWPRIM( rmesa );
835 rmesa->swtcl.hw_primitive = hwprim;
836 }
837 }
838
839 static void radeonRenderPrimitive( GLcontext *ctx, GLenum prim )
840 {
841 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
842 rmesa->swtcl.render_primitive = prim;
843 if (prim < GL_TRIANGLES || !(ctx->_TriangleCaps & DD_TRI_UNFILLED))
844 radeonRasterPrimitive( ctx, reduced_hw_prim[prim] );
845 }
846
847 static void radeonRenderFinish( GLcontext *ctx )
848 {
849 }
850
851 static void radeonResetLineStipple( GLcontext *ctx )
852 {
853 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
854 RADEON_STATECHANGE( rmesa, lin );
855 }
856
857
858 /**********************************************************************/
859 /* Transition to/from hardware rasterization. */
860 /**********************************************************************/
861
862 static const char * const fallbackStrings[] = {
863 "Texture mode",
864 "glDrawBuffer(GL_FRONT_AND_BACK)",
865 "glEnable(GL_STENCIL) without hw stencil buffer",
866 "glRenderMode(selection or feedback)",
867 "glBlendEquation",
868 "glBlendFunc",
869 "RADEON_NO_RAST",
870 "Mixing GL_CLAMP_TO_BORDER and GL_CLAMP (or GL_MIRROR_CLAMP_ATI)"
871 };
872
873
874 static const char *getFallbackString(GLuint bit)
875 {
876 int i = 0;
877 while (bit > 1) {
878 i++;
879 bit >>= 1;
880 }
881 return fallbackStrings[i];
882 }
883
884
885 void radeonFallback( GLcontext *ctx, GLuint bit, GLboolean mode )
886 {
887 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
888 TNLcontext *tnl = TNL_CONTEXT(ctx);
889 GLuint oldfallback = rmesa->Fallback;
890
891 if (mode) {
892 rmesa->Fallback |= bit;
893 if (oldfallback == 0) {
894 RADEON_FIREVERTICES( rmesa );
895 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_TRUE );
896 _swsetup_Wakeup( ctx );
897 rmesa->swtcl.RenderIndex = ~0;
898 if (RADEON_DEBUG & DEBUG_FALLBACKS) {
899 fprintf(stderr, "Radeon begin rasterization fallback: 0x%x %s\n",
900 bit, getFallbackString(bit));
901 }
902 }
903 }
904 else {
905 rmesa->Fallback &= ~bit;
906 if (oldfallback == bit) {
907 _swrast_flush( ctx );
908 tnl->Driver.Render.Start = radeonRenderStart;
909 tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive;
910 tnl->Driver.Render.Finish = radeonRenderFinish;
911
912 tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
913 tnl->Driver.Render.CopyPV = _tnl_copy_pv;
914 tnl->Driver.Render.Interp = _tnl_interp;
915
916 tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple;
917 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_FALSE );
918 if (rmesa->TclFallback) {
919 /* These are already done if rmesa->TclFallback goes to
920 * zero above. But not if it doesn't (RADEON_NO_TCL for
921 * example?)
922 */
923 radeonChooseVertexState( ctx );
924 radeonChooseRenderState( ctx );
925 }
926 if (RADEON_DEBUG & DEBUG_FALLBACKS) {
927 fprintf(stderr, "Radeon end rasterization fallback: 0x%x %s\n",
928 bit, getFallbackString(bit));
929 }
930 }
931 }
932 }
933
934
935 void radeonFlushVertices( GLcontext *ctx, GLuint flags )
936 {
937 _tnl_FlushVertices( ctx, flags );
938
939 if (flags & FLUSH_STORED_VERTICES)
940 RADEON_NEWPRIM( RADEON_CONTEXT( ctx ) );
941 }
942
943 /**********************************************************************/
944 /* Initialization. */
945 /**********************************************************************/
946
947 void radeonInitSwtcl( GLcontext *ctx )
948 {
949 TNLcontext *tnl = TNL_CONTEXT(ctx);
950 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
951 static int firsttime = 1;
952
953 if (firsttime) {
954 init_rast_tab();
955 firsttime = 0;
956 }
957
958 tnl->Driver.Render.Start = radeonRenderStart;
959 tnl->Driver.Render.Finish = radeonRenderFinish;
960 tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive;
961 tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple;
962 tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
963 tnl->Driver.Render.CopyPV = _tnl_copy_pv;
964 tnl->Driver.Render.Interp = _tnl_interp;
965
966 _tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
967 RADEON_MAX_TNL_VERTEX_SIZE);
968
969 rmesa->swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf;
970 rmesa->swtcl.RenderIndex = ~0;
971 rmesa->swtcl.render_primitive = GL_TRIANGLES;
972 rmesa->swtcl.hw_primitive = 0;
973 }
974
975
976 void radeonDestroySwtcl( GLcontext *ctx )
977 {
978 radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
979
980 if (rmesa->swtcl.indexed_verts.buf)
981 radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts,
982 __FUNCTION__ );
983 }