Remove ctx->Point._Size and ctx->Line._Width.
[mesa.git] / src / mesa / drivers / dri / mach64 / mach64_tris.c
1 /* $XFree86$ */ /* -*- mode: c; c-basic-offset: 3 -*- */
2 /*
3 * Copyright 2000 Gareth Hughes
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * GARETH HUGHES BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 /*
26 * Authors:
27 * Gareth Hughes <gareth@valinux.com>
28 * Leif Delgass <ldelgass@retinalburn.net>
29 * José Fonseca <j_r_fonseca@yahoo.co.uk>
30 */
31
32 #include "glheader.h"
33 #include "mtypes.h"
34 #include "colormac.h"
35 #include "macros.h"
36
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "tnl/tnl.h"
40 #include "tnl/t_context.h"
41 #include "tnl/t_pipeline.h"
42
43 #include "mach64_tris.h"
44 #include "mach64_state.h"
45 #include "mach64_context.h"
46 #include "mach64_vb.h"
47 #include "mach64_ioctl.h"
48
49 static const GLuint hw_prim[GL_POLYGON+1] = {
50 MACH64_PRIM_POINTS,
51 MACH64_PRIM_LINES,
52 MACH64_PRIM_LINE_LOOP,
53 MACH64_PRIM_LINE_STRIP,
54 MACH64_PRIM_TRIANGLES,
55 MACH64_PRIM_TRIANGLE_STRIP,
56 MACH64_PRIM_TRIANGLE_FAN,
57 MACH64_PRIM_QUADS,
58 MACH64_PRIM_QUAD_STRIP,
59 MACH64_PRIM_POLYGON,
60 };
61
62 static void mach64RasterPrimitive( GLcontext *ctx, GLuint hwprim );
63 static void mach64RenderPrimitive( GLcontext *ctx, GLenum prim );
64
65
66 /* FIXME: Remove this when native template is finished. */
67 #define MACH64_PRINT_BUFFER 0
68
69 /***********************************************************************
70 * Emit primitives as inline vertices *
71 ***********************************************************************/
72
73 #if defined(USE_X86_ASM)
74 #define DO_COPY_VERTEX( vb, vertsize, v, n, m ) \
75 do { \
76 register const CARD32 *__p __asm__( "esi" ) = (CARD32 *)v + 10 - vertsize; \
77 register int __s __asm__( "ecx" ) = vertsize; \
78 if ( vertsize > 7 ) { \
79 *vb++ = (2 << 16) | ADRINDEX( MACH64_VERTEX_##n##_SECONDARY_S ); \
80 __asm__ __volatile__( "movsl ; movsl ; movsl" \
81 : "=D" (vb), "=S" (__p) \
82 : "0" (vb), "1" (__p) ); \
83 __s -= 3; \
84 } \
85 *vb++ = ((__s - 1 + m) << 16) | \
86 (ADRINDEX( MACH64_VERTEX_##n##_X_Y ) - (__s - 1) ); \
87 __asm__ __volatile__( "rep ; movsl" \
88 : "=%c" (__s), "=D" (vb), "=S" (__p) \
89 : "0" (__s), "1" (vb), "2" (__p) ); \
90 } while (0)
91 #else
92 #define DO_COPY_VERTEX( vb, vertsize, v, n, m ) \
93 do { \
94 CARD32 *__p = (CARD32 *)v + 10 - vertsize; \
95 int __s = vertsize; \
96 if ( vertsize > 7 ) { \
97 LE32_OUT( vb++, (2 << 16) | \
98 ADRINDEX( MACH64_VERTEX_##n##_SECONDARY_S ) ); \
99 *vb++ = *__p++; \
100 *vb++ = *__p++; \
101 *vb++ = *__p++; \
102 __s -= 3; \
103 } \
104 LE32_OUT( vb++, ((__s - 1 + m) << 16) | \
105 (ADRINDEX( MACH64_VERTEX_##n##_X_Y ) - (__s - 1)) ); \
106 while ( __s-- ) { \
107 *vb++ = *__p++; \
108 } \
109 } while (0)
110 #endif
111
112 #define COPY_VERTEX( vb, vertsize, v, n ) DO_COPY_VERTEX( vb, vertsize, v, n, 0 )
113 #define COPY_VERTEX_OOA( vb, vertsize, v, n ) DO_COPY_VERTEX( vb, vertsize, v, n, 1 )
114
115
116 static __inline void mach64_draw_quad( mach64ContextPtr mmesa,
117 mach64VertexPtr v0,
118 mach64VertexPtr v1,
119 mach64VertexPtr v2,
120 mach64VertexPtr v3 )
121 {
122 #if MACH64_NATIVE_VTXFMT
123 GLcontext *ctx = mmesa->glCtx;
124 const GLuint vertsize = mmesa->vertex_size;
125 GLint a;
126 GLfloat ooa;
127 GLuint xy;
128 const GLuint xyoffset = 9;
129 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
130 unsigned vbsiz = (vertsize + (vertsize > 7 ? 2 : 1)) * 4 + 2;
131 CARD32 *vb, *vbchk;
132
133 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
134 fprintf(stderr, "%s:\n", __FUNCTION__);
135 fprintf(stderr,"Vertex 1:\n");
136 mach64_print_vertex( ctx, v0 );
137 fprintf(stderr,"Vertex 2:\n");
138 mach64_print_vertex( ctx, v1 );
139 fprintf(stderr,"Vertex 3:\n");
140 mach64_print_vertex( ctx, v2 );
141 fprintf(stderr,"Vertex 4:\n");
142 mach64_print_vertex( ctx, v3 );
143 }
144
145 xy = LE32_IN( &v0->ui[xyoffset] );
146 xx[0] = (GLshort)( xy >> 16 );
147 yy[0] = (GLshort)( xy & 0xffff );
148
149 xy = LE32_IN( &v1->ui[xyoffset] );
150 xx[1] = (GLshort)( xy >> 16 );
151 yy[1] = (GLshort)( xy & 0xffff );
152
153 xy = LE32_IN( &v3->ui[xyoffset] );
154 xx[2] = (GLshort)( xy >> 16 );
155 yy[2] = (GLshort)( xy & 0xffff );
156
157 a = (xx[0] - xx[2]) * (yy[1] - yy[2]) -
158 (yy[0] - yy[2]) * (xx[1] - xx[2]);
159
160 if ( (mmesa->backface_sign &&
161 ((a < 0 && !signbit( mmesa->backface_sign )) ||
162 (a > 0 && signbit( mmesa->backface_sign )))) ) {
163 /* cull quad */
164 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
165 fprintf(stderr,"Quad culled\n");
166 return;
167 }
168
169 ooa = 16.0 / a;
170
171 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * sizeof(CARD32) );
172 vbchk = vb + vbsiz;
173
174 COPY_VERTEX( vb, vertsize, v0, 1 );
175 COPY_VERTEX( vb, vertsize, v1, 2 );
176 COPY_VERTEX_OOA( vb, vertsize, v3, 3 );
177 LE32_OUT( vb++, *(CARD32 *)&ooa );
178
179 xy = LE32_IN( &v2->ui[xyoffset] );
180 xx[0] = (GLshort)( xy >> 16 );
181 yy[0] = (GLshort)( xy & 0xffff );
182
183 a = (xx[0] - xx[2]) * (yy[1] - yy[2]) -
184 (yy[0] - yy[2]) * (xx[1] - xx[2]);
185
186 ooa = 16.0 / a;
187
188 COPY_VERTEX_OOA( vb, vertsize, v2, 1 );
189 LE32_OUT( vb++, *(CARD32 *)&ooa );
190
191 assert( vb == vbchk );
192
193 #if MACH64_PRINT_BUFFER
194 {
195 int i;
196 fprintf(stderr, "quad:\n");
197 for (i = 0; i < vbsiz; i++)
198 fprintf(stderr, " %08lx\n", *(vb - vbsiz + i));
199 fprintf(stderr, "\n");
200 }
201 #endif
202 #else
203 GLuint vertsize = mmesa->vertex_size;
204 GLint coloridx;
205 GLfloat ooa;
206 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
207 unsigned vbsiz =
208 ((
209 1 +
210 (vertsize > 6 ? 2 : 0) +
211 (vertsize > 4 ? 2 : 0) +
212 3 +
213 (mmesa->multitex ? 4 : 0)
214 ) * 4 + 4);
215 CARD32 *vb;
216 unsigned vbidx = 0;
217
218 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
219 fprintf(stderr, "%s:\n", __FUNCTION__);
220 fprintf(stderr,"Vertex 1: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
221 v0->v.x, v0->v.y, v0->v.z, v0->v.w, v0->v.u0, v0->v.v0, v0->v.u1, v0->v.v1);
222 fprintf(stderr,"Vertex 2: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
223 v1->v.x, v1->v.y, v1->v.z, v1->v.w, v1->v.u0, v1->v.v0, v1->v.u1, v1->v.v1);
224 fprintf(stderr,"Vertex 3: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
225 v2->v.x, v2->v.y, v2->v.z, v2->v.w, v2->v.u0, v2->v.v0, v2->v.u1, v2->v.v1);
226 fprintf(stderr,"Vertex 4: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
227 v3->v.x, v3->v.y, v3->v.z, v3->v.w, v3->v.u0, v3->v.v0, v3->v.u1, v3->v.v1);
228 }
229
230 #if MACH64_CLIENT_STATE_EMITS
231 /* Enable for interleaved client-side state emits */
232 LOCK_HARDWARE( mmesa );
233 if ( mmesa->dirty ) {
234 mach64EmitHwStateLocked( mmesa );
235 }
236 if ( mmesa->sarea->dirty ) {
237 mach64UploadHwStateLocked( mmesa );
238 }
239 UNLOCK_HARDWARE( mmesa );
240 #endif
241
242 xx[0] = (GLint)(v0->v.x * 4);
243 yy[0] = (GLint)(v0->v.y * 4);
244
245 xx[1] = (GLint)(v1->v.x * 4);
246 yy[1] = (GLint)(v1->v.y * 4);
247
248 xx[2] = (GLint)(v3->v.x * 4);
249 yy[2] = (GLint)(v3->v.y * 4);
250
251 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
252 (yy[0] - yy[2]) * (xx[1] - xx[2]));
253
254 if ( ooa * mmesa->backface_sign < 0 ) {
255 /* cull quad */
256 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
257 fprintf(stderr,"Quad culled\n");
258 return;
259 }
260
261 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * 4 );
262
263 ooa = 1.0 / ooa;
264
265 coloridx = (vertsize > 4) ? 4: 3;
266
267 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
268 switch (vertsize) {
269 case 6:
270 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
271 break;
272 case 4:
273 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
274 break;
275 default: /* vertsize >= 8 */
276 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
277 break;
278 }
279 if (vertsize > 6) {
280 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_1_S */
281 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_1_T */
282 }
283 if (vertsize > 4) {
284 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_W */
285 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
286 }
287 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
288 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
289 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
290
291 if (mmesa->multitex) {
292 /* setup for 3 sequential reg writes */
293 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_SECONDARY_S) );
294 LE32_OUT( &vb[vbidx++], v0->ui[8] ); /* MACH64_VERTEX_1_SECONDARY_S */
295 LE32_OUT( &vb[vbidx++], v0->ui[9] ); /* MACH64_VERTEX_1_SECONDARY_T */
296 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_SECONDARY_W */
297 }
298
299 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
300 switch (vertsize) {
301 case 6:
302 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_2_W) );
303 break;
304 case 4:
305 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_2_Z) );
306 break;
307 default: /* vertsize >= 8 */
308 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_2_S) );
309 break;
310 }
311 if (vertsize > 6) {
312 LE32_OUT( &vb[vbidx++], v1->ui[6] ); /* MACH64_VERTEX_2_S */
313 LE32_OUT( &vb[vbidx++], v1->ui[7] ); /* MACH64_VERTEX_2_T */
314 }
315 if (vertsize > 4) {
316 LE32_OUT( &vb[vbidx++], v1->ui[3] ); /* MACH64_VERTEX_2_W */
317 LE32_OUT( &vb[vbidx++], v1->ui[5] ); /* MACH64_VERTEX_2_SPEC_ARGB */
318 }
319 LE32_OUT( &vb[vbidx++], ((GLint)(v1->v.z) << 15) ); /* MACH64_VERTEX_2_Z */
320 vb[vbidx++] = v1->ui[coloridx]; /* MACH64_VERTEX_2_ARGB */
321 LE32_OUT( &vb[vbidx++], (xx[1] << 16) | (yy[1] & 0xffff) ); /* MACH64_VERTEX_2_X_Y */
322
323 if (mmesa->multitex) {
324 /* setup for 3 sequential reg writes */
325 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_2_SECONDARY_S) );
326 LE32_OUT( &vb[vbidx++], v1->ui[8] ); /* MACH64_VERTEX_2_SECONDARY_S */
327 LE32_OUT( &vb[vbidx++], v1->ui[9] ); /* MACH64_VERTEX_2_SECONDARY_T */
328 LE32_OUT( &vb[vbidx++], v1->ui[3] ); /* MACH64_VERTEX_2_SECONDARY_W */
329 }
330
331 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
332 switch (vertsize) {
333 case 6:
334 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_3_W) );
335 break;
336 case 4:
337 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_3_Z) );
338 break;
339 default: /* vertsize >= 8 */
340 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_3_S) );
341 break;
342 }
343 if (vertsize > 6) {
344 LE32_OUT( &vb[vbidx++], v3->ui[6] ); /* MACH64_VERTEX_3_S */
345 LE32_OUT( &vb[vbidx++], v3->ui[7] ); /* MACH64_VERTEX_3_T */
346 }
347 if (vertsize > 4) {
348 LE32_OUT( &vb[vbidx++], v3->ui[3] ); /* MACH64_VERTEX_3_W */
349 LE32_OUT( &vb[vbidx++], v3->ui[5] ); /* MACH64_VERTEX_3_SPEC_ARGB */
350 }
351 LE32_OUT( &vb[vbidx++], ((GLint)(v3->v.z) << 15) ); /* MACH64_VERTEX_3_Z */
352 vb[vbidx++] = v3->ui[coloridx]; /* MACH64_VERTEX_3_ARGB */
353 LE32_OUT( &vb[vbidx++], (xx[2] << 16) | (yy[2] & 0xffff) ); /* MACH64_VERTEX_3_X_Y */
354
355 if (mmesa->multitex) {
356 /* setup for 3 sequential reg writes */
357 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_3_SECONDARY_S) );
358 LE32_OUT( &vb[vbidx++], v3->ui[8] ); /* MACH64_VERTEX_3_SECONDARY_S */
359 LE32_OUT( &vb[vbidx++], v3->ui[9] ); /* MACH64_VERTEX_3_SECONDARY_T */
360 LE32_OUT( &vb[vbidx++], v3->ui[3] ); /* MACH64_VERTEX_3_SECONDARY_W */
361 }
362
363 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
364 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
365
366 xx[0] = (GLint)(v2->v.x * 4);
367 yy[0] = (GLint)(v2->v.y * 4);
368
369 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
370 (yy[0] - yy[2]) * (xx[1] - xx[2]));
371 ooa = 1.0 / ooa;
372
373 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
374 switch (vertsize) {
375 case 6:
376 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
377 break;
378 case 4:
379 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
380 break;
381 default: /* vertsize >= 8 */
382 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
383 break;
384 }
385 if (vertsize > 6) {
386 LE32_OUT( &vb[vbidx++], v2->ui[6] ); /* MACH64_VERTEX_1_S */
387 LE32_OUT( &vb[vbidx++], v2->ui[7] ); /* MACH64_VERTEX_1_T */
388 }
389 if (vertsize > 4) {
390 LE32_OUT( &vb[vbidx++], v2->ui[3] ); /* MACH64_VERTEX_1_W */
391 LE32_OUT( &vb[vbidx++], v2->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
392 }
393 LE32_OUT( &vb[vbidx++], ((GLint)(v2->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
394 vb[vbidx++] = v2->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
395 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
396
397 if (mmesa->multitex) {
398 /* setup for 3 sequential reg writes */
399 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_SECONDARY_S) );
400 LE32_OUT( &vb[vbidx++], v2->ui[8] ); /* MACH64_VERTEX_1_SECONDARY_S */
401 LE32_OUT( &vb[vbidx++], v2->ui[9] ); /* MACH64_VERTEX_1_SECONDARY_T */
402 LE32_OUT( &vb[vbidx++], v2->ui[3] ); /* MACH64_VERTEX_1_SECONDARY_W */
403 }
404
405 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
406 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
407
408 assert(vbsiz == vbidx);
409
410 #if MACH64_PRINT_BUFFER
411 {
412 int i;
413 fprintf(stderr, "quad:\n");
414 for (i = 0; i < vbsiz; i++)
415 fprintf(stderr, " %08lx\n", *(vb + i));
416 fprintf(stderr, "\n");
417 }
418 #endif
419 #endif
420 }
421
422 static __inline void mach64_draw_triangle( mach64ContextPtr mmesa,
423 mach64VertexPtr v0,
424 mach64VertexPtr v1,
425 mach64VertexPtr v2 )
426 {
427 #if MACH64_NATIVE_VTXFMT
428 GLcontext *ctx = mmesa->glCtx;
429 GLuint vertsize = mmesa->vertex_size;
430 GLint a;
431 GLfloat ooa;
432 GLuint xy;
433 const GLuint xyoffset = 9;
434 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
435 unsigned vbsiz = (vertsize + (vertsize > 7 ? 2 : 1)) * 3 + 1;
436 CARD32 *vb, *vbchk;
437
438 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
439 fprintf(stderr, "%s:\n", __FUNCTION__);
440 fprintf(stderr,"Vertex 1:\n");
441 mach64_print_vertex( ctx, v0 );
442 fprintf(stderr,"Vertex 2:\n");
443 mach64_print_vertex( ctx, v1 );
444 fprintf(stderr,"Vertex 3:\n");
445 mach64_print_vertex( ctx, v2 );
446 }
447
448 xy = LE32_IN( &v0->ui[xyoffset] );
449 xx[0] = (GLshort)( xy >> 16 );
450 yy[0] = (GLshort)( xy & 0xffff );
451
452 xy = LE32_IN( &v1->ui[xyoffset] );
453 xx[1] = (GLshort)( xy >> 16 );
454 yy[1] = (GLshort)( xy & 0xffff );
455
456 xy = LE32_IN( &v2->ui[xyoffset] );
457 xx[2] = (GLshort)( xy >> 16 );
458 yy[2] = (GLshort)( xy & 0xffff );
459
460 a = (xx[0] - xx[2]) * (yy[1] - yy[2]) -
461 (yy[0] - yy[2]) * (xx[1] - xx[2]);
462
463 if ( mmesa->backface_sign &&
464 ((a < 0 && !signbit( mmesa->backface_sign )) ||
465 (a > 0 && signbit( mmesa->backface_sign ))) ) {
466 /* cull triangle */
467 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
468 fprintf(stderr,"Triangle culled\n");
469 return;
470 }
471
472 ooa = 16.0 / a;
473
474 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * sizeof(CARD32) );
475 vbchk = vb + vbsiz;
476
477 COPY_VERTEX( vb, vertsize, v0, 1 );
478 COPY_VERTEX( vb, vertsize, v1, 2 );
479 COPY_VERTEX_OOA( vb, vertsize, v2, 3 );
480 LE32_OUT( vb++, *(CARD32 *)&ooa );
481
482 assert( vb == vbchk );
483
484 #if MACH64_PRINT_BUFFER
485 {
486 int i;
487 fprintf(stderr, "tri:\n");
488 for (i = 0; i < vbsiz; i++)
489 fprintf(stderr, " %08lx\n", *(vb - vbsiz + i));
490 fprintf(stderr, "\n");
491 }
492 #endif
493 #else
494 GLuint vertsize = mmesa->vertex_size;
495 GLint coloridx;
496 GLfloat ooa;
497 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
498 unsigned vbsiz =
499 ((
500 1 +
501 (vertsize > 6 ? 2 : 0) +
502 (vertsize > 4 ? 2 : 0) +
503 3 +
504 (mmesa->multitex ? 4 : 0)
505 ) * 3 + 2);
506 CARD32 *vb;
507 unsigned vbidx = 0;
508
509 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
510 fprintf(stderr, "%s:\n", __FUNCTION__);
511 fprintf(stderr,"Vertex 1: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
512 v0->v.x, v0->v.y, v0->v.z, v0->v.w, v0->v.u0, v0->v.v0, v0->v.u1, v0->v.v1);
513 fprintf(stderr,"Vertex 2: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
514 v1->v.x, v1->v.y, v1->v.z, v1->v.w, v1->v.u0, v1->v.v0, v1->v.u1, v1->v.v1);
515 fprintf(stderr,"Vertex 3: x: %.2f, y: %.2f, z: %.2f, w: %f\n\ts0: %f, t0: %f\n\ts1: %f, t1: %f\n",
516 v2->v.x, v2->v.y, v2->v.z, v2->v.w, v2->v.u0, v2->v.v0, v2->v.u1, v2->v.v1);
517 }
518
519 #if MACH64_CLIENT_STATE_EMITS
520 /* Enable for interleaved client-side state emits */
521 LOCK_HARDWARE( mmesa );
522 if ( mmesa->dirty ) {
523 mach64EmitHwStateLocked( mmesa );
524 }
525 if ( mmesa->sarea->dirty ) {
526 mach64UploadHwStateLocked( mmesa );
527 }
528 UNLOCK_HARDWARE( mmesa );
529 #endif
530
531 xx[0] = (GLint)(v0->v.x * 4);
532 yy[0] = (GLint)(v0->v.y * 4);
533
534 xx[1] = (GLint)(v1->v.x * 4);
535 yy[1] = (GLint)(v1->v.y * 4);
536
537 xx[2] = (GLint)(v2->v.x * 4);
538 yy[2] = (GLint)(v2->v.y * 4);
539
540 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
541 (yy[0] - yy[2]) * (xx[1] - xx[2]));
542
543 if ( ooa * mmesa->backface_sign < 0 ) {
544 /* cull triangle */
545 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
546 fprintf(stderr,"Triangle culled\n");
547 return;
548 }
549
550 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * 4 );
551
552 ooa = 1.0 / ooa;
553
554 coloridx = (vertsize > 4) ? 4: 3;
555
556 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
557 switch (vertsize) {
558 case 6:
559 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
560 break;
561 case 4:
562 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
563 break;
564 default: /* vertsize >= 8 */
565 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
566 break;
567 }
568 if (vertsize > 6) {
569 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_1_S */
570 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_1_T */
571 }
572 if (vertsize > 4) {
573 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_W */
574 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
575 }
576 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
577 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
578 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
579
580 if (mmesa->multitex) {
581 /* setup for 3 sequential reg writes */
582 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_SECONDARY_S) );
583 LE32_OUT( &vb[vbidx++], v0->ui[8] ); /* MACH64_VERTEX_1_SECONDARY_S */
584 LE32_OUT( &vb[vbidx++], v0->ui[9] ); /* MACH64_VERTEX_1_SECONDARY_T */
585 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_SECONDARY_W */
586 }
587
588 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
589 switch (vertsize) {
590 case 6:
591 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_2_W) );
592 break;
593 case 4:
594 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_2_Z) );
595 break;
596 default: /* vertsize >= 8 */
597 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_2_S) );
598 break;
599 }
600 if (vertsize > 6) {
601 LE32_OUT( &vb[vbidx++], v1->ui[6] ); /* MACH64_VERTEX_2_S */
602 LE32_OUT( &vb[vbidx++], v1->ui[7] ); /* MACH64_VERTEX_2_T */
603 }
604 if (vertsize > 4) {
605 LE32_OUT( &vb[vbidx++], v1->ui[3] ); /* MACH64_VERTEX_2_W */
606 LE32_OUT( &vb[vbidx++], v1->ui[5] ); /* MACH64_VERTEX_2_SPEC_ARGB */
607 }
608 LE32_OUT( &vb[vbidx++], ((GLint)(v1->v.z) << 15) ); /* MACH64_VERTEX_2_Z */
609 vb[vbidx++] = v1->ui[coloridx]; /* MACH64_VERTEX_2_ARGB */
610 LE32_OUT( &vb[vbidx++], (xx[1] << 16) | (yy[1] & 0xffff) ); /* MACH64_VERTEX_2_X_Y */
611
612 if (mmesa->multitex) {
613 /* setup for 3 sequential reg writes */
614 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_2_SECONDARY_S) );
615 LE32_OUT( &vb[vbidx++], v1->ui[8] ); /* MACH64_VERTEX_2_SECONDARY_S */
616 LE32_OUT( &vb[vbidx++], v1->ui[9] ); /* MACH64_VERTEX_2_SECONDARY_T */
617 LE32_OUT( &vb[vbidx++], v1->ui[3] ); /* MACH64_VERTEX_2_SECONDARY_W */
618 }
619
620 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
621 switch (vertsize) {
622 case 6:
623 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_3_W) );
624 break;
625 case 4:
626 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_3_Z) );
627 break;
628 default: /* vertsize >= 8 */
629 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_3_S) );
630 break;
631 }
632 if (vertsize > 6) {
633 LE32_OUT( &vb[vbidx++], v2->ui[6] ); /* MACH64_VERTEX_3_S */
634 LE32_OUT( &vb[vbidx++], v2->ui[7] ); /* MACH64_VERTEX_3_T */
635 }
636 if (vertsize > 4) {
637 LE32_OUT( &vb[vbidx++], v2->ui[3] ); /* MACH64_VERTEX_3_W */
638 LE32_OUT( &vb[vbidx++], v2->ui[5] ); /* MACH64_VERTEX_3_SPEC_ARGB */
639 }
640 LE32_OUT( &vb[vbidx++], ((GLint)(v2->v.z) << 15) ); /* MACH64_VERTEX_3_Z */
641 vb[vbidx++] = v2->ui[coloridx]; /* MACH64_VERTEX_3_ARGB */
642 LE32_OUT( &vb[vbidx++], (xx[2] << 16) | (yy[2] & 0xffff) ); /* MACH64_VERTEX_3_X_Y */
643
644 if (mmesa->multitex) {
645 /* setup for 3 sequential reg writes */
646 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_3_SECONDARY_S) );
647 LE32_OUT( &vb[vbidx++], v2->ui[8] ); /* MACH64_VERTEX_3_SECONDARY_S */
648 LE32_OUT( &vb[vbidx++], v2->ui[9] ); /* MACH64_VERTEX_3_SECONDARY_T */
649 LE32_OUT( &vb[vbidx++], v2->ui[3] ); /* MACH64_VERTEX_3_SECONDARY_W */
650 }
651
652 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
653 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
654
655 assert(vbsiz == vbidx);
656
657 #if MACH64_PRINT_BUFFER
658 {
659 int i;
660 fprintf(stderr, "tri:\n");
661 for (i = 0; i < vbsiz; ++i)
662 fprintf(stderr, " %08lx\n", *(vb + i));
663 fprintf(stderr, "\n");
664 }
665 #endif
666 #endif
667 }
668
669 static __inline void mach64_draw_line( mach64ContextPtr mmesa,
670 mach64VertexPtr v0,
671 mach64VertexPtr v1 )
672 {
673 #if MACH64_NATIVE_VTXFMT
674 GLcontext *ctx = mmesa->glCtx;
675 const GLuint vertsize = mmesa->vertex_size;
676 /* 2 fractional bits for hardware: */
677 const int width = (int) (2.0 * CLAMP(mmesa->glCtx->Line.Width,
678 mmesa->glCtx->Const.MinLineWidth,
679 mmesa->glCtx->Const.MaxLineWidth));
680 GLfloat ooa;
681 GLuint *pxy0, *pxy1;
682 GLuint xy0old, xy0, xy1old, xy1;
683 const GLuint xyoffset = 9;
684 GLint x0, y0, x1, y1;
685 GLint dx, dy, ix, iy;
686 unsigned vbsiz = (vertsize + (vertsize > 7 ? 2 : 1)) * 4 + 2;
687 CARD32 *vb, *vbchk;
688
689 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
690 fprintf(stderr, "%s:\n", __FUNCTION__);
691 fprintf(stderr,"Vertex 1:\n");
692 mach64_print_vertex( ctx, v0 );
693 fprintf(stderr,"Vertex 2:\n");
694 mach64_print_vertex( ctx, v1 );
695 }
696
697 pxy0 = &v0->ui[xyoffset];
698 xy0old = *pxy0;
699 xy0 = LE32_IN( &xy0old );
700 x0 = (GLshort)( xy0 >> 16 );
701 y0 = (GLshort)( xy0 & 0xffff );
702
703 pxy1 = &v1->ui[xyoffset];
704 xy1old = *pxy1;
705 xy1 = LE32_IN( &xy1old );
706 x1 = (GLshort)( xy1 >> 16 );
707 y1 = (GLshort)( xy1 & 0xffff );
708
709 if ( (dx = x1 - x0) < 0 ) {
710 dx = -dx;
711 }
712 if ( (dy = y1 - y0) < 0 ) {
713 dy = -dy;
714 }
715
716 /* adjust vertices depending on line direction */
717 if ( dx > dy ) {
718 ix = 0;
719 iy = width;
720 ooa = 8.0 / ((x1 - x0) * width);
721 } else {
722 ix = width;
723 iy = 0;
724 ooa = 8.0 / ((y0 - y1) * width);
725 }
726
727 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * sizeof(CARD32) );
728 vbchk = vb + vbsiz;
729
730 LE32_OUT( pxy0, (( x0 - ix ) << 16) | (( y0 - iy ) & 0xffff) );
731 COPY_VERTEX( vb, vertsize, v0, 1 );
732 LE32_OUT( pxy1, (( x1 - ix ) << 16) | (( y1 - iy ) & 0xffff) );
733 COPY_VERTEX( vb, vertsize, v1, 2 );
734 LE32_OUT( pxy0, (( x0 + ix ) << 16) | (( y0 + iy ) & 0xffff) );
735 COPY_VERTEX_OOA( vb, vertsize, v0, 3 );
736 LE32_OUT( vb++, *(CARD32 *)&ooa );
737
738 ooa = -ooa;
739
740 LE32_OUT( pxy1, (( x1 + ix ) << 16) | (( y1 + iy ) & 0xffff) );
741 COPY_VERTEX_OOA( vb, vertsize, v1, 1 );
742 LE32_OUT( vb++, *(CARD32 *)&ooa );
743
744 *pxy0 = xy0old;
745 *pxy1 = xy1old;
746 #else /* !MACH64_NATIVE_VTXFMT */
747 GLuint vertsize = mmesa->vertex_size;
748 GLint coloridx;
749 float width = 1.0; /* Only support 1 pix lines now */
750 GLfloat ooa;
751 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
752 unsigned vbsiz =
753 ((
754 1 +
755 (vertsize > 6 ? 2 : 0) +
756 (vertsize > 4 ? 2 : 0) +
757 3 +
758 (mmesa->multitex ? 4 : 0)
759 ) * 4 + 4);
760 CARD32 *vb;
761 unsigned vbidx = 0;
762
763 GLfloat hw, dx, dy, ix, iy;
764 GLfloat x0 = v0->v.x;
765 GLfloat y0 = v0->v.y;
766 GLfloat x1 = v1->v.x;
767 GLfloat y1 = v1->v.y;
768
769 #if MACH64_CLIENT_STATE_EMITS
770 /* Enable for interleaved client-side state emits */
771 LOCK_HARDWARE( mmesa );
772 if ( mmesa->dirty ) {
773 mach64EmitHwStateLocked( mmesa );
774 }
775 if ( mmesa->sarea->dirty ) {
776 mach64UploadHwStateLocked( mmesa );
777 }
778 UNLOCK_HARDWARE( mmesa );
779 #endif
780
781 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
782 fprintf(stderr, "%s:\n", __FUNCTION__);
783 fprintf(stderr,"Vertex 1: x: %.2f, y: %.2f, z: %.2f, w: %f\n",
784 v0->v.x, v0->v.y, v0->v.z, v0->v.w);
785 fprintf(stderr,"Vertex 2: x: %.2f, y: %.2f, z: %.2f, w: %f\n",
786 v1->v.x, v1->v.y, v1->v.z, v1->v.w);
787 }
788
789 hw = 0.5F * width;
790 if (hw > 0.1F && hw < 0.5F) {
791 hw = 0.5F;
792 }
793
794 /* adjust vertices depending on line direction */
795 dx = v0->v.x - v1->v.x;
796 dy = v0->v.y - v1->v.y;
797 if (dx * dx > dy * dy) {
798 /* X-major line */
799 ix = 0.0F;
800 iy = hw;
801 if (x1 < x0) {
802 x0 += 0.5F;
803 x1 += 0.5F;
804 }
805 y0 -= 0.5F;
806 y1 -= 0.5F;
807 }
808 else {
809 /* Y-major line */
810 ix = hw;
811 iy = 0.0F;
812 if (y1 > y0) {
813 y0 -= 0.5F;
814 y1 -= 0.5F;
815 }
816 x0 += 0.5F;
817 x1 += 0.5F;
818 }
819
820 xx[0] = (GLint)((x0 - ix) * 4);
821 yy[0] = (GLint)((y0 - iy) * 4);
822
823 xx[1] = (GLint)((x1 - ix) * 4);
824 yy[1] = (GLint)((y1 - iy) * 4);
825
826 xx[2] = (GLint)((x0 + ix) * 4);
827 yy[2] = (GLint)((y0 + iy) * 4);
828
829 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
830 (yy[0] - yy[2]) * (xx[1] - xx[2]));
831
832 if ( ooa * mmesa->backface_sign < 0 ) {
833 /* cull line */
834 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
835 fprintf(stderr,"Line culled\n");
836 return;
837 }
838
839 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * 4 );
840
841 ooa = 1.0 / ooa;
842
843 coloridx = (vertsize > 4) ? 4: 3;
844
845 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
846 switch (vertsize) {
847 case 6:
848 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
849 break;
850 case 4:
851 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
852 break;
853 default: /* vertsize >= 8 */
854 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
855 break;
856 }
857 if (vertsize > 6) {
858 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_1_S */
859 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_1_T */
860 }
861 if (vertsize > 4) {
862 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_W */
863 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
864 }
865 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
866 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
867 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
868
869 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
870 switch (vertsize) {
871 case 6:
872 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_2_W) );
873 break;
874 case 4:
875 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_2_Z) );
876 break;
877 default: /* vertsize >= 8 */
878 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_2_S) );
879 break;
880 }
881 if (vertsize > 6) {
882 LE32_OUT( &vb[vbidx++], v1->ui[6] ); /* MACH64_VERTEX_2_S */
883 LE32_OUT( &vb[vbidx++], v1->ui[7] ); /* MACH64_VERTEX_2_T */
884 }
885 if (vertsize > 4) {
886 LE32_OUT( &vb[vbidx++], v1->ui[3] ); /* MACH64_VERTEX_2_W */
887 LE32_OUT( &vb[vbidx++], v1->ui[5] ); /* MACH64_VERTEX_2_SPEC_ARGB */
888 }
889 LE32_OUT( &vb[vbidx++], ((GLint)(v1->v.z) << 15) ); /* MACH64_VERTEX_2_Z */
890 vb[vbidx++] = v1->ui[coloridx]; /* MACH64_VERTEX_2_ARGB */
891 LE32_OUT( &vb[vbidx++], (xx[1] << 16) | (yy[1] & 0xffff) ); /* MACH64_VERTEX_2_X_Y */
892
893 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
894 switch (vertsize) {
895 case 6:
896 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_3_W) );
897 break;
898 case 4:
899 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_3_Z) );
900 break;
901 default: /* vertsize >= 8 */
902 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_3_S) );
903 break;
904 }
905 if (vertsize > 6) {
906 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_3_S */
907 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_3_T */
908 }
909 if (vertsize > 4) {
910 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_3_W */
911 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_3_SPEC_ARGB */
912 }
913 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_3_Z */
914 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_3_ARGB */
915 LE32_OUT( &vb[vbidx++], (xx[2] << 16) | (yy[2] & 0xffff) ); /* MACH64_VERTEX_3_X_Y */
916
917 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
918 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
919
920 xx[0] = (GLint)((x1 + ix) * 4);
921 yy[0] = (GLint)((y1 + iy) * 4);
922
923 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
924 (yy[0] - yy[2]) * (xx[1] - xx[2]));
925 ooa = 1.0 / ooa;
926
927 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
928 switch (vertsize) {
929 case 6:
930 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
931 break;
932 case 4:
933 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
934 break;
935 default: /* vertsize >= 8 */
936 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
937 break;
938 }
939 if (vertsize > 6) {
940 LE32_OUT( &vb[vbidx++], v1->ui[6] ); /* MACH64_VERTEX_1_S */
941 LE32_OUT( &vb[vbidx++], v1->ui[7] ); /* MACH64_VERTEX_1_T */
942 }
943 if (vertsize > 4) {
944 LE32_OUT( &vb[vbidx++], v1->ui[3] ); /* MACH64_VERTEX_1_W */
945 LE32_OUT( &vb[vbidx++], v1->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
946 }
947 LE32_OUT( &vb[vbidx++], ((GLint)(v1->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
948 vb[vbidx++] = v1->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
949 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
950
951 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
952 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
953
954 assert(vbsiz == vbidx);
955 #endif
956 }
957
958 static __inline void mach64_draw_point( mach64ContextPtr mmesa,
959 mach64VertexPtr v0 )
960 {
961 #if MACH64_NATIVE_VTXFMT
962 GLcontext *ctx = mmesa->glCtx;
963 const GLuint vertsize = mmesa->vertex_size;
964 /* 2 fractional bits for hardware: */
965 GLint sz = (GLint) (2.0 * CLAMP(mmesa->glCtx->Point.Size,
966 ctx->Const.MinPointSize,
967 ctx->Const.MaxPointSize));
968 GLfloat ooa;
969 GLuint *pxy;
970 GLuint xyold, xy;
971 const GLuint xyoffset = 9;
972 GLint x, y;
973 unsigned vbsiz = (vertsize + (vertsize > 7 ? 2 : 1)) * 4 + 2;
974 CARD32 *vb, *vbchk;
975
976 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
977 fprintf(stderr, "%s:\n", __FUNCTION__);
978 fprintf(stderr,"Vertex 1:\n");
979 mach64_print_vertex( ctx, v0 );
980 }
981
982 if( !sz )
983 sz = 1; /* round to the nearest supported size */
984
985 pxy = &v0->ui[xyoffset];
986 xyold = *pxy;
987 xy = LE32_IN( &xyold );
988 x = (GLshort)( xy >> 16 );
989 y = (GLshort)( xy & 0xffff );
990
991 ooa = 4.0 / (sz * sz);
992
993 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * sizeof(CARD32) );
994 vbchk = vb + vbsiz;
995
996 LE32_OUT( pxy, (( x - sz ) << 16) | (( y - sz ) & 0xffff) );
997 COPY_VERTEX( vb, vertsize, v0, 1 );
998 LE32_OUT( pxy, (( x + sz ) << 16) | (( y - sz ) & 0xffff) );
999 COPY_VERTEX( vb, vertsize, v0, 2 );
1000 LE32_OUT( pxy, (( x - sz ) << 16) | (( y + sz ) & 0xffff) );
1001 COPY_VERTEX_OOA( vb, vertsize, v0, 3 );
1002 LE32_OUT( vb++, *(CARD32 *)&ooa );
1003
1004 ooa = -ooa;
1005
1006 LE32_OUT( pxy, (( x + sz ) << 16) | (( y + sz ) & 0xffff) );
1007 COPY_VERTEX_OOA( vb, vertsize, v0, 1 );
1008 LE32_OUT( vb++, *(CARD32 *)&ooa );
1009
1010 *pxy = xyold;
1011 #else /* !MACH64_NATIVE_VTXFMT */
1012 GLuint vertsize = mmesa->vertex_size;
1013 GLint coloridx;
1014 float sz = 1.0; /* Only support 1 pix points now */
1015 GLfloat ooa;
1016 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
1017 unsigned vbsiz =
1018 ((
1019 1 +
1020 (vertsize > 6 ? 2 : 0) +
1021 (vertsize > 4 ? 2 : 0) +
1022 3 +
1023 (mmesa->multitex ? 4 : 0)
1024 ) * 4 + 4);
1025 CARD32 *vb;
1026 unsigned vbidx = 0;
1027
1028 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS ) {
1029 fprintf(stderr, "%s:\n", __FUNCTION__);
1030 fprintf(stderr,"Vertex 1: x: %.2f, y: %.2f, z: %.2f, w: %f\n",
1031 v0->v.x, v0->v.y, v0->v.z, v0->v.w);
1032 }
1033
1034 #if MACH64_CLIENT_STATE_EMITS
1035 /* Enable for interleaved client-side state emits */
1036 LOCK_HARDWARE( mmesa );
1037 if ( mmesa->dirty ) {
1038 mach64EmitHwStateLocked( mmesa );
1039 }
1040 if ( mmesa->sarea->dirty ) {
1041 mach64UploadHwStateLocked( mmesa );
1042 }
1043 UNLOCK_HARDWARE( mmesa );
1044 #endif
1045
1046 xx[0] = (GLint)((v0->v.x - sz) * 4);
1047 yy[0] = (GLint)((v0->v.y - sz) * 4);
1048
1049 xx[1] = (GLint)((v0->v.x + sz) * 4);
1050 yy[1] = (GLint)((v0->v.y - sz) * 4);
1051
1052 xx[2] = (GLint)((v0->v.x - sz) * 4);
1053 yy[2] = (GLint)((v0->v.y + sz) * 4);
1054
1055 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
1056 (yy[0] - yy[2]) * (xx[1] - xx[2]));
1057
1058 if ( ooa * mmesa->backface_sign < 0 ) {
1059 /* cull quad */
1060 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
1061 fprintf(stderr,"Point culled\n");
1062 return;
1063 }
1064
1065 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * 4 );
1066
1067 ooa = 1.0 / ooa;
1068
1069 coloridx = (vertsize > 4) ? 4: 3;
1070
1071 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
1072 switch (vertsize) {
1073 case 6:
1074 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
1075 break;
1076 case 4:
1077 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
1078 break;
1079 default: /* vertsize >= 8 */
1080 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
1081 break;
1082 }
1083 if (vertsize > 6) {
1084 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_1_S */
1085 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_1_T */
1086 }
1087 if (vertsize > 4) {
1088 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_W */
1089 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
1090 }
1091 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
1092 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
1093 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
1094
1095 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
1096 switch (vertsize) {
1097 case 6:
1098 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_2_W) );
1099 break;
1100 case 4:
1101 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_2_Z) );
1102 break;
1103 default: /* vertsize >= 8 */
1104 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_2_S) );
1105 break;
1106 }
1107 if (vertsize > 6) {
1108 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_2_S */
1109 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_2_T */
1110 }
1111 if (vertsize > 4) {
1112 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_2_W */
1113 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_2_SPEC_ARGB */
1114 }
1115 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_2_Z */
1116 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_2_ARGB */
1117 LE32_OUT( &vb[vbidx++], (xx[1] << 16) | (yy[1] & 0xffff) ); /* MACH64_VERTEX_2_X_Y */
1118
1119 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
1120 switch (vertsize) {
1121 case 6:
1122 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_3_W) );
1123 break;
1124 case 4:
1125 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_3_Z) );
1126 break;
1127 default: /* vertsize >= 8 */
1128 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_3_S) );
1129 break;
1130 }
1131 if (vertsize > 6) {
1132 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_3_S */
1133 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_3_T */
1134 }
1135 if (vertsize > 4) {
1136 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_3_W */
1137 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_3_SPEC_ARGB */
1138 }
1139 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_3_Z */
1140 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_3_ARGB */
1141 LE32_OUT( &vb[vbidx++], (xx[2] << 16) | (yy[2] & 0xffff) ); /* MACH64_VERTEX_3_X_Y */
1142
1143 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
1144 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
1145
1146 xx[0] = (GLint)((v0->v.x + sz) * 4);
1147 yy[0] = (GLint)((v0->v.y + sz) * 4);
1148
1149 ooa = 0.25 * 0.25 * ((xx[0] - xx[2]) * (yy[1] - yy[2]) -
1150 (yy[0] - yy[2]) * (xx[1] - xx[2]));
1151 ooa = 1.0 / ooa;
1152
1153 /* setup for 3,5, or 7 sequential reg writes based on vertex format */
1154 switch (vertsize) {
1155 case 6:
1156 LE32_OUT( &vb[vbidx++], (4 << 16) | ADRINDEX(MACH64_VERTEX_1_W) );
1157 break;
1158 case 4:
1159 LE32_OUT( &vb[vbidx++], (2 << 16) | ADRINDEX(MACH64_VERTEX_1_Z) );
1160 break;
1161 default: /* vertsize >= 8 */
1162 LE32_OUT( &vb[vbidx++], (6 << 16) | ADRINDEX(MACH64_VERTEX_1_S) );
1163 break;
1164 }
1165 if (vertsize > 6) {
1166 LE32_OUT( &vb[vbidx++], v0->ui[6] ); /* MACH64_VERTEX_1_S */
1167 LE32_OUT( &vb[vbidx++], v0->ui[7] ); /* MACH64_VERTEX_1_T */
1168 }
1169 if (vertsize > 4) {
1170 LE32_OUT( &vb[vbidx++], v0->ui[3] ); /* MACH64_VERTEX_1_W */
1171 LE32_OUT( &vb[vbidx++], v0->ui[5] ); /* MACH64_VERTEX_1_SPEC_ARGB */
1172 }
1173 LE32_OUT( &vb[vbidx++], ((GLint)(v0->v.z) << 15) ); /* MACH64_VERTEX_1_Z */
1174 vb[vbidx++] = v0->ui[coloridx]; /* MACH64_VERTEX_1_ARGB */
1175 LE32_OUT( &vb[vbidx++], (xx[0] << 16) | (yy[0] & 0xffff) ); /* MACH64_VERTEX_1_X_Y */
1176
1177 LE32_OUT( &vb[vbidx++], ADRINDEX(MACH64_ONE_OVER_AREA_UC) );
1178 LE32_OUT( &vb[vbidx++], *(GLuint *)&ooa );
1179
1180 assert(vbsiz == vbidx);
1181 #endif
1182 }
1183
1184 /***********************************************************************
1185 * Macros for t_dd_tritmp.h to draw basic primitives *
1186 ***********************************************************************/
1187
1188 #define TRI( a, b, c ) \
1189 do { \
1190 if (DO_FALLBACK) \
1191 mmesa->draw_tri( mmesa, a, b, c ); \
1192 else \
1193 mach64_draw_triangle( mmesa, a, b, c ); \
1194 } while (0)
1195
1196 #define QUAD( a, b, c, d ) \
1197 do { \
1198 if (DO_FALLBACK) { \
1199 mmesa->draw_tri( mmesa, a, b, d ); \
1200 mmesa->draw_tri( mmesa, b, c, d ); \
1201 } else \
1202 mach64_draw_quad( mmesa, a, b, c, d ); \
1203 } while (0)
1204
1205 #define LINE( v0, v1 ) \
1206 do { \
1207 if (DO_FALLBACK) \
1208 mmesa->draw_line( mmesa, v0, v1 ); \
1209 else \
1210 mach64_draw_line( mmesa, v0, v1 ); \
1211 } while (0)
1212
1213 #define POINT( v0 ) \
1214 do { \
1215 if (DO_FALLBACK) \
1216 mmesa->draw_point( mmesa, v0 ); \
1217 else \
1218 mach64_draw_point( mmesa, v0 ); \
1219 } while (0)
1220
1221
1222 /***********************************************************************
1223 * Build render functions from dd templates *
1224 ***********************************************************************/
1225
1226 #define MACH64_OFFSET_BIT 0x01
1227 #define MACH64_TWOSIDE_BIT 0x02
1228 #define MACH64_UNFILLED_BIT 0x04
1229 #define MACH64_FALLBACK_BIT 0x08
1230 #define MACH64_MAX_TRIFUNC 0x10
1231
1232 static struct {
1233 tnl_points_func points;
1234 tnl_line_func line;
1235 tnl_triangle_func triangle;
1236 tnl_quad_func quad;
1237 } rast_tab[MACH64_MAX_TRIFUNC];
1238
1239
1240 #define DO_FALLBACK (IND & MACH64_FALLBACK_BIT)
1241 #define DO_OFFSET (IND & MACH64_OFFSET_BIT)
1242 #define DO_UNFILLED (IND & MACH64_UNFILLED_BIT)
1243 #define DO_TWOSIDE (IND & MACH64_TWOSIDE_BIT)
1244 #define DO_FLAT 0
1245 #define DO_TRI 1
1246 #define DO_QUAD 1
1247 #define DO_LINE 1
1248 #define DO_POINTS 1
1249 #define DO_FULL_QUAD 1
1250
1251 #define HAVE_RGBA 1
1252 #define HAVE_SPEC 1
1253 #define HAVE_BACK_COLORS 0
1254 #define HAVE_HW_FLATSHADE 1
1255 #define VERTEX mach64Vertex
1256 #define TAB rast_tab
1257
1258 #if MACH64_NATIVE_VTXFMT
1259
1260 /* #define DEPTH_SCALE 65536.0 */
1261 #define DEPTH_SCALE 1
1262 #define UNFILLED_TRI unfilled_tri
1263 #define UNFILLED_QUAD unfilled_quad
1264 #define VERT_X(_v) ((GLfloat)(GLshort)(LE32_IN( &(_v)->ui[xyoffset] ) & 0xffff) / 4.0)
1265 #define VERT_Y(_v) ((GLfloat)(GLshort)(LE32_IN( &(_v)->ui[xyoffset] ) >> 16) / 4.0)
1266 #define VERT_Z(_v) ((GLfloat) LE32_IN( &(_v)->ui[zoffset] ))
1267 #define INSANE_VERTICES
1268 #define VERT_SET_Z(_v,val) LE32_OUT( &(_v)->ui[zoffset], (GLuint)(val) )
1269 #define VERT_Z_ADD(_v,val) LE32_OUT( &(_v)->ui[zoffset], LE32_IN( &(_v)->ui[zoffset] ) + (GLuint)(val) )
1270 #define AREA_IS_CCW( a ) ((a) < 0)
1271 #define GET_VERTEX(e) (mmesa->verts + ((e) * mmesa->vertex_size * sizeof(int)))
1272
1273 #define MACH64_COLOR( dst, src ) \
1274 do { \
1275 UNCLAMPED_FLOAT_TO_UBYTE(dst[0], src[2]); \
1276 UNCLAMPED_FLOAT_TO_UBYTE(dst[1], src[1]); \
1277 UNCLAMPED_FLOAT_TO_UBYTE(dst[2], src[0]); \
1278 UNCLAMPED_FLOAT_TO_UBYTE(dst[3], src[3]); \
1279 } while (0)
1280
1281 #define MACH64_SPEC( dst, src ) \
1282 do { \
1283 UNCLAMPED_FLOAT_TO_UBYTE(dst[0], src[2]); \
1284 UNCLAMPED_FLOAT_TO_UBYTE(dst[1], src[1]); \
1285 UNCLAMPED_FLOAT_TO_UBYTE(dst[2], src[0]); \
1286 } while (0)
1287
1288 #define VERT_SET_RGBA( v, c ) MACH64_COLOR( v->ub4[coloroffset], c )
1289 #define VERT_COPY_RGBA( v0, v1 ) v0->ui[coloroffset] = v1->ui[coloroffset]
1290 #define VERT_SAVE_RGBA( idx ) color[idx] = v[idx]->ui[coloroffset]
1291 #define VERT_RESTORE_RGBA( idx ) v[idx]->ui[coloroffset] = color[idx]
1292
1293 #define VERT_SET_SPEC( v, c ) if (havespec) MACH64_SPEC( v->ub4[specoffset], c )
1294 #define VERT_COPY_SPEC( v0, v1 ) if (havespec) COPY_3V( v0->ub4[specoffset], v1->ub4[specoffset] )
1295 #define VERT_SAVE_SPEC( idx ) if (havespec) spec[idx] = v[idx]->ui[specoffset]
1296 #define VERT_RESTORE_SPEC( idx ) if (havespec) v[idx]->ui[specoffset] = spec[idx]
1297
1298 #define LOCAL_VARS(n) \
1299 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx); \
1300 GLuint color[n], spec[n]; \
1301 GLuint vertex_size = mmesa->vertex_size; \
1302 const GLuint xyoffset = 9; \
1303 const GLuint coloroffset = 8; \
1304 const GLuint zoffset = 7; \
1305 const GLuint specoffset = 6; \
1306 GLboolean havespec = vertex_size >= 4 ? 1 : 0; \
1307 (void) color; (void) spec; (void) vertex_size; \
1308 (void) xyoffset; (void) coloroffset; (void) zoffset; \
1309 (void) specoffset; (void) havespec;
1310
1311 #else
1312
1313 #define DEPTH_SCALE 1.0
1314 #define UNFILLED_TRI unfilled_tri
1315 #define UNFILLED_QUAD unfilled_quad
1316 #define VERT_X(_v) _v->v.x
1317 #define VERT_Y(_v) _v->v.y
1318 #define VERT_Z(_v) _v->v.z
1319 #define AREA_IS_CCW( a ) (a > 0)
1320 #define GET_VERTEX(e) (mmesa->verts + ((e) * mmesa->vertex_size * sizeof(int)))
1321
1322 #define MACH64_COLOR( dst, src ) \
1323 do { \
1324 UNCLAMPED_FLOAT_TO_UBYTE(dst[0], src[2]); \
1325 UNCLAMPED_FLOAT_TO_UBYTE(dst[1], src[1]); \
1326 UNCLAMPED_FLOAT_TO_UBYTE(dst[2], src[0]); \
1327 UNCLAMPED_FLOAT_TO_UBYTE(dst[3], src[3]); \
1328 } while (0)
1329
1330 #define MACH64_SPEC( dst, src ) \
1331 do { \
1332 UNCLAMPED_FLOAT_TO_UBYTE(dst[0], src[2]); \
1333 UNCLAMPED_FLOAT_TO_UBYTE(dst[1], src[1]); \
1334 UNCLAMPED_FLOAT_TO_UBYTE(dst[2], src[0]); \
1335 } while (0)
1336
1337 #define VERT_SET_RGBA( v, c ) MACH64_COLOR( v->ub4[coloroffset], c )
1338 #define VERT_COPY_RGBA( v0, v1 ) v0->ui[coloroffset] = v1->ui[coloroffset]
1339 #define VERT_SAVE_RGBA( idx ) color[idx] = v[idx]->ui[coloroffset]
1340 #define VERT_RESTORE_RGBA( idx ) v[idx]->ui[coloroffset] = color[idx]
1341
1342 #define VERT_SET_SPEC( v, c ) if (havespec) MACH64_SPEC( v->ub4[5], c )
1343 #define VERT_COPY_SPEC( v0, v1 ) if (havespec) COPY_3V(v0->ub4[5], v1->ub4[5])
1344 #define VERT_SAVE_SPEC( idx ) if (havespec) spec[idx] = v[idx]->ui[5]
1345 #define VERT_RESTORE_SPEC( idx ) if (havespec) v[idx]->ui[5] = spec[idx]
1346
1347 #define LOCAL_VARS(n) \
1348 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx); \
1349 GLuint color[n], spec[n]; \
1350 GLuint coloroffset = (mmesa->vertex_size == 4 ? 3 : 4); \
1351 GLboolean havespec = (mmesa->vertex_size == 4 ? 0 : 1); \
1352 (void) color; (void) spec; (void) coloroffset; (void) havespec;
1353
1354 #endif
1355
1356 /***********************************************************************
1357 * Helpers for rendering unfilled primitives *
1358 ***********************************************************************/
1359
1360 #define RASTERIZE(x) if (mmesa->hw_primitive != hw_prim[x]) \
1361 mach64RasterPrimitive( ctx, hw_prim[x] )
1362 #define RENDER_PRIMITIVE mmesa->render_primitive
1363 #define IND MACH64_FALLBACK_BIT
1364 #define TAG(x) x
1365 #include "tnl_dd/t_dd_unfilled.h"
1366 #undef IND
1367
1368
1369 /***********************************************************************
1370 * Generate GL render functions *
1371 ***********************************************************************/
1372
1373
1374 #define IND (0)
1375 #define TAG(x) x
1376 #include "tnl_dd/t_dd_tritmp.h"
1377
1378 #define IND (MACH64_OFFSET_BIT)
1379 #define TAG(x) x##_offset
1380 #include "tnl_dd/t_dd_tritmp.h"
1381
1382 #define IND (MACH64_TWOSIDE_BIT)
1383 #define TAG(x) x##_twoside
1384 #include "tnl_dd/t_dd_tritmp.h"
1385
1386 #define IND (MACH64_TWOSIDE_BIT|MACH64_OFFSET_BIT)
1387 #define TAG(x) x##_twoside_offset
1388 #include "tnl_dd/t_dd_tritmp.h"
1389
1390 #define IND (MACH64_UNFILLED_BIT)
1391 #define TAG(x) x##_unfilled
1392 #include "tnl_dd/t_dd_tritmp.h"
1393
1394 #define IND (MACH64_OFFSET_BIT|MACH64_UNFILLED_BIT)
1395 #define TAG(x) x##_offset_unfilled
1396 #include "tnl_dd/t_dd_tritmp.h"
1397
1398 #define IND (MACH64_TWOSIDE_BIT|MACH64_UNFILLED_BIT)
1399 #define TAG(x) x##_twoside_unfilled
1400 #include "tnl_dd/t_dd_tritmp.h"
1401
1402 #define IND (MACH64_TWOSIDE_BIT|MACH64_OFFSET_BIT|MACH64_UNFILLED_BIT)
1403 #define TAG(x) x##_twoside_offset_unfilled
1404 #include "tnl_dd/t_dd_tritmp.h"
1405
1406 #define IND (MACH64_FALLBACK_BIT)
1407 #define TAG(x) x##_fallback
1408 #include "tnl_dd/t_dd_tritmp.h"
1409
1410 #define IND (MACH64_OFFSET_BIT|MACH64_FALLBACK_BIT)
1411 #define TAG(x) x##_offset_fallback
1412 #include "tnl_dd/t_dd_tritmp.h"
1413
1414 #define IND (MACH64_TWOSIDE_BIT|MACH64_FALLBACK_BIT)
1415 #define TAG(x) x##_twoside_fallback
1416 #include "tnl_dd/t_dd_tritmp.h"
1417
1418 #define IND (MACH64_TWOSIDE_BIT|MACH64_OFFSET_BIT|MACH64_FALLBACK_BIT)
1419 #define TAG(x) x##_twoside_offset_fallback
1420 #include "tnl_dd/t_dd_tritmp.h"
1421
1422 #define IND (MACH64_UNFILLED_BIT|MACH64_FALLBACK_BIT)
1423 #define TAG(x) x##_unfilled_fallback
1424 #include "tnl_dd/t_dd_tritmp.h"
1425
1426 #define IND (MACH64_OFFSET_BIT|MACH64_UNFILLED_BIT|MACH64_FALLBACK_BIT)
1427 #define TAG(x) x##_offset_unfilled_fallback
1428 #include "tnl_dd/t_dd_tritmp.h"
1429
1430 #define IND (MACH64_TWOSIDE_BIT|MACH64_UNFILLED_BIT|MACH64_FALLBACK_BIT)
1431 #define TAG(x) x##_twoside_unfilled_fallback
1432 #include "tnl_dd/t_dd_tritmp.h"
1433
1434 #define IND (MACH64_TWOSIDE_BIT|MACH64_OFFSET_BIT|MACH64_UNFILLED_BIT| \
1435 MACH64_FALLBACK_BIT)
1436 #define TAG(x) x##_twoside_offset_unfilled_fallback
1437 #include "tnl_dd/t_dd_tritmp.h"
1438
1439
1440 static void init_rast_tab( void )
1441 {
1442 init();
1443 init_offset();
1444 init_twoside();
1445 init_twoside_offset();
1446 init_unfilled();
1447 init_offset_unfilled();
1448 init_twoside_unfilled();
1449 init_twoside_offset_unfilled();
1450 init_fallback();
1451 init_offset_fallback();
1452 init_twoside_fallback();
1453 init_twoside_offset_fallback();
1454 init_unfilled_fallback();
1455 init_offset_unfilled_fallback();
1456 init_twoside_unfilled_fallback();
1457 init_twoside_offset_unfilled_fallback();
1458 }
1459
1460
1461 /***********************************************************************
1462 * Rasterization fallback helpers *
1463 ***********************************************************************/
1464
1465
1466 /* This code is hit only when a mix of accelerated and unaccelerated
1467 * primitives are being drawn, and only for the unaccelerated
1468 * primitives.
1469 */
1470 static void
1471 mach64_fallback_tri( mach64ContextPtr mmesa,
1472 mach64Vertex *v0,
1473 mach64Vertex *v1,
1474 mach64Vertex *v2 )
1475 {
1476 GLcontext *ctx = mmesa->glCtx;
1477 SWvertex v[3];
1478 mach64_translate_vertex( ctx, v0, &v[0] );
1479 mach64_translate_vertex( ctx, v1, &v[1] );
1480 mach64_translate_vertex( ctx, v2, &v[2] );
1481 _swrast_Triangle( ctx, &v[0], &v[1], &v[2] );
1482 }
1483
1484
1485 static void
1486 mach64_fallback_line( mach64ContextPtr mmesa,
1487 mach64Vertex *v0,
1488 mach64Vertex *v1 )
1489 {
1490 GLcontext *ctx = mmesa->glCtx;
1491 SWvertex v[2];
1492 mach64_translate_vertex( ctx, v0, &v[0] );
1493 mach64_translate_vertex( ctx, v1, &v[1] );
1494 _swrast_Line( ctx, &v[0], &v[1] );
1495 }
1496
1497
1498 static void
1499 mach64_fallback_point( mach64ContextPtr mmesa,
1500 mach64Vertex *v0 )
1501 {
1502 GLcontext *ctx = mmesa->glCtx;
1503 SWvertex v[1];
1504 mach64_translate_vertex( ctx, v0, &v[0] );
1505 _swrast_Point( ctx, &v[0] );
1506 }
1507
1508
1509
1510 /**********************************************************************/
1511 /* Render unclipped begin/end objects */
1512 /**********************************************************************/
1513
1514 #define VERT(x) (mach64Vertex *)(mach64verts + ((x) * vertsize * sizeof(int)))
1515 #define RENDER_POINTS( start, count ) \
1516 for ( ; start < count ; start++) \
1517 mach64_draw_point( mmesa, VERT(start) )
1518 #define RENDER_LINE( v0, v1 ) \
1519 mach64_draw_line( mmesa, VERT(v0), VERT(v1) )
1520 #define RENDER_TRI( v0, v1, v2 ) \
1521 mach64_draw_triangle( mmesa, VERT(v0), VERT(v1), VERT(v2) )
1522 #define RENDER_QUAD( v0, v1, v2, v3 ) \
1523 mach64_draw_quad( mmesa, VERT(v0), VERT(v1), VERT(v2), VERT(v3) )
1524 #define INIT(x) do { \
1525 if (0) fprintf(stderr, "%s\n", __FUNCTION__); \
1526 mach64RenderPrimitive( ctx, x ); \
1527 } while (0)
1528 #undef LOCAL_VARS
1529 #define LOCAL_VARS \
1530 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx); \
1531 const GLuint vertsize = mmesa->vertex_size; \
1532 const char *mach64verts = (char *)mmesa->verts; \
1533 const GLuint * const elt = TNL_CONTEXT(ctx)->vb.Elts; \
1534 (void) elt;
1535 #define RESET_STIPPLE
1536 #define RESET_OCCLUSION
1537 #define PRESERVE_VB_DEFS
1538 #define ELT(x) (x)
1539 #define TAG(x) mach64_##x##_verts
1540 #include "tnl/t_vb_rendertmp.h"
1541 #undef ELT
1542 #undef TAG
1543 #define TAG(x) mach64_##x##_elts
1544 #define ELT(x) elt[x]
1545 #include "tnl/t_vb_rendertmp.h"
1546
1547
1548 /**********************************************************************/
1549 /* Render clipped primitives */
1550 /**********************************************************************/
1551
1552 static void mach64RenderClippedPoly( GLcontext *ctx, const GLuint *elts,
1553 GLuint n )
1554 {
1555 mach64ContextPtr mmesa = MACH64_CONTEXT( ctx );
1556 TNLcontext *tnl = TNL_CONTEXT(ctx);
1557 struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
1558 GLuint prim = mmesa->render_primitive;
1559
1560 /* Render the new vertices as an unclipped polygon.
1561 */
1562 {
1563 GLuint *tmp = VB->Elts;
1564 VB->Elts = (GLuint *)elts;
1565 tnl->Driver.Render.PrimTabElts[GL_POLYGON]( ctx, 0, n, PRIM_BEGIN|PRIM_END );
1566 VB->Elts = tmp;
1567 }
1568
1569 /* Restore the render primitive
1570 */
1571 if (prim != GL_POLYGON)
1572 tnl->Driver.Render.PrimitiveNotify( ctx, prim );
1573
1574 }
1575
1576 static void mach64RenderClippedLine( GLcontext *ctx, GLuint ii, GLuint jj )
1577 {
1578 TNLcontext *tnl = TNL_CONTEXT(ctx);
1579 tnl->Driver.Render.Line( ctx, ii, jj );
1580 }
1581
1582 #if MACH64_NATIVE_VTXFMT
1583 static void mach64FastRenderClippedPoly( GLcontext *ctx, const GLuint *elts,
1584 GLuint n )
1585 {
1586 mach64ContextPtr mmesa = MACH64_CONTEXT( ctx );
1587 const GLuint vertsize = mmesa->vertex_size;
1588 GLint a;
1589 union {
1590 GLfloat f;
1591 CARD32 u;
1592 } ooa;
1593 GLuint xy;
1594 const GLuint xyoffset = 9;
1595 GLint xx[3], yy[3]; /* 2 fractional bits for hardware */
1596 unsigned vbsiz = (vertsize + (vertsize > 7 ? 2 : 1)) * n + (n-2);
1597 CARD32 *vb, *vbchk;
1598 GLubyte *mach64verts = (GLubyte *)mmesa->verts;
1599 mach64VertexPtr v0, v1, v2;
1600 int i;
1601
1602 v0 = (mach64VertexPtr)VERT(elts[1]);
1603 v1 = (mach64VertexPtr)VERT(elts[2]);
1604 v2 = (mach64VertexPtr)VERT(elts[0]);
1605
1606 xy = LE32_IN( &v0->ui[xyoffset] );
1607 xx[0] = (GLshort)( xy >> 16 );
1608 yy[0] = (GLshort)( xy & 0xffff );
1609
1610 xy = LE32_IN( &v1->ui[xyoffset] );
1611 xx[1] = (GLshort)( xy >> 16 );
1612 yy[1] = (GLshort)( xy & 0xffff );
1613
1614 xy = LE32_IN( &v2->ui[xyoffset] );
1615 xx[2] = (GLshort)( xy >> 16 );
1616 yy[2] = (GLshort)( xy & 0xffff );
1617
1618 a = (xx[0] - xx[2]) * (yy[1] - yy[2]) -
1619 (yy[0] - yy[2]) * (xx[1] - xx[2]);
1620
1621 if ( (mmesa->backface_sign &&
1622 ((a < 0 && !signbit( mmesa->backface_sign )) ||
1623 (a > 0 && signbit( mmesa->backface_sign )))) ) {
1624 /* cull polygon */
1625 if ( MACH64_DEBUG & DEBUG_VERBOSE_PRIMS )
1626 fprintf(stderr,"Polygon culled\n");
1627 return;
1628 }
1629
1630 ooa.f = 16.0 / a;
1631
1632 vb = (CARD32 *)mach64AllocDmaLow( mmesa, vbsiz * sizeof(CARD32) );
1633 vbchk = vb + vbsiz;
1634
1635 COPY_VERTEX( vb, vertsize, v0, 1 );
1636 COPY_VERTEX( vb, vertsize, v1, 2 );
1637 COPY_VERTEX_OOA( vb, vertsize, v2, 3 );
1638 LE32_OUT( vb++, ooa.u );
1639
1640 i = 3;
1641 while (1) {
1642 if (i >= n)
1643 break;
1644 v0 = (mach64VertexPtr)VERT(elts[i]);
1645 i++;
1646
1647 xy = LE32_IN( &v0->ui[xyoffset] );
1648 xx[0] = (GLshort)( xy >> 16 );
1649 yy[0] = (GLshort)( xy & 0xffff );
1650
1651 a = (xx[0] - xx[2]) * (yy[1] - yy[2]) -
1652 (yy[0] - yy[2]) * (xx[1] - xx[2]);
1653 ooa.f = 16.0 / a;
1654
1655 COPY_VERTEX_OOA( vb, vertsize, v0, 1 );
1656 LE32_OUT( vb++, ooa.u );
1657
1658 if (i >= n)
1659 break;
1660 v1 = (mach64VertexPtr)VERT(elts[i]);
1661 i++;
1662
1663 xy = LE32_IN( &v1->ui[xyoffset] );
1664 xx[1] = (GLshort)( xy >> 16 );
1665 yy[1] = (GLshort)( xy & 0xffff );
1666
1667 a = (xx[0] - xx[2]) * (yy[1] - yy[2]) -
1668 (yy[0] - yy[2]) * (xx[1] - xx[2]);
1669 ooa.f = 16.0 / a;
1670
1671 COPY_VERTEX_OOA( vb, vertsize, v1, 2 );
1672 LE32_OUT( vb++, ooa.u );
1673 }
1674
1675 assert( vb == vbchk );
1676 }
1677 #else
1678 static void mach64FastRenderClippedPoly( GLcontext *ctx, const GLuint *elts,
1679 GLuint n )
1680 {
1681 mach64ContextPtr mmesa = MACH64_CONTEXT( ctx );
1682 const GLuint vertsize = mmesa->vertex_size;
1683 GLubyte *mach64verts = (GLubyte *)mmesa->verts;
1684 const GLuint *start = (const GLuint *)VERT(elts[0]);
1685 int i;
1686
1687 for (i = 2 ; i < n ; i++) {
1688 mach64_draw_triangle( mmesa,
1689 VERT(elts[i-1]),
1690 VERT(elts[i]),
1691 (mach64VertexPtr) start
1692 );
1693 }
1694 }
1695 #endif /* MACH64_NATIVE_VTXFMT */
1696
1697 /**********************************************************************/
1698 /* Choose render functions */
1699 /**********************************************************************/
1700
1701 #define _MACH64_NEW_RENDER_STATE (_DD_NEW_POINT_SMOOTH | \
1702 _DD_NEW_LINE_SMOOTH | \
1703 _DD_NEW_LINE_STIPPLE | \
1704 _DD_NEW_TRI_SMOOTH | \
1705 _DD_NEW_TRI_STIPPLE | \
1706 _NEW_POLYGONSTIPPLE | \
1707 _DD_NEW_TRI_UNFILLED | \
1708 _DD_NEW_TRI_LIGHT_TWOSIDE | \
1709 _DD_NEW_TRI_OFFSET) \
1710
1711 #define POINT_FALLBACK (DD_POINT_SMOOTH)
1712 #define LINE_FALLBACK (DD_LINE_SMOOTH|DD_LINE_STIPPLE)
1713 #define TRI_FALLBACK (DD_TRI_SMOOTH|DD_TRI_STIPPLE)
1714 #define ANY_FALLBACK_FLAGS (POINT_FALLBACK|LINE_FALLBACK|TRI_FALLBACK)
1715 #define ANY_RASTER_FLAGS (DD_TRI_LIGHT_TWOSIDE|DD_TRI_OFFSET|DD_TRI_UNFILLED)
1716
1717
1718 static void mach64ChooseRenderState(GLcontext *ctx)
1719 {
1720 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx);
1721 GLuint flags = ctx->_TriangleCaps;
1722 GLuint index = 0;
1723
1724 if (flags & (ANY_RASTER_FLAGS|ANY_FALLBACK_FLAGS)) {
1725 mmesa->draw_point = mach64_draw_point;
1726 mmesa->draw_line = mach64_draw_line;
1727 mmesa->draw_tri = mach64_draw_triangle;
1728
1729 if (flags & ANY_RASTER_FLAGS) {
1730 if (flags & DD_TRI_LIGHT_TWOSIDE) index |= MACH64_TWOSIDE_BIT;
1731 if (flags & DD_TRI_OFFSET) index |= MACH64_OFFSET_BIT;
1732 if (flags & DD_TRI_UNFILLED) index |= MACH64_UNFILLED_BIT;
1733 }
1734
1735 /* Hook in fallbacks for specific primitives.
1736 */
1737 if (flags & (POINT_FALLBACK|LINE_FALLBACK|TRI_FALLBACK)) {
1738 if (flags & POINT_FALLBACK) mmesa->draw_point = mach64_fallback_point;
1739 if (flags & LINE_FALLBACK) mmesa->draw_line = mach64_fallback_line;
1740 if (flags & TRI_FALLBACK) mmesa->draw_tri = mach64_fallback_tri;
1741 index |= MACH64_FALLBACK_BIT;
1742 }
1743 }
1744
1745 if (index != mmesa->RenderIndex) {
1746 TNLcontext *tnl = TNL_CONTEXT(ctx);
1747 tnl->Driver.Render.Points = rast_tab[index].points;
1748 tnl->Driver.Render.Line = rast_tab[index].line;
1749 tnl->Driver.Render.Triangle = rast_tab[index].triangle;
1750 tnl->Driver.Render.Quad = rast_tab[index].quad;
1751
1752 if (index == 0) {
1753 tnl->Driver.Render.PrimTabVerts = mach64_render_tab_verts;
1754 tnl->Driver.Render.PrimTabElts = mach64_render_tab_elts;
1755 tnl->Driver.Render.ClippedLine = rast_tab[index].line;
1756 tnl->Driver.Render.ClippedPolygon = mach64FastRenderClippedPoly;
1757 } else {
1758 tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts;
1759 tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts;
1760 tnl->Driver.Render.ClippedLine = mach64RenderClippedLine;
1761 tnl->Driver.Render.ClippedPolygon = mach64RenderClippedPoly;
1762 }
1763
1764 mmesa->RenderIndex = index;
1765 }
1766 }
1767
1768 /**********************************************************************/
1769 /* Validate state at pipeline start */
1770 /**********************************************************************/
1771
1772 static void mach64RunPipeline( GLcontext *ctx )
1773 {
1774 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx);
1775
1776 if (mmesa->new_state)
1777 mach64DDUpdateHWState( ctx );
1778
1779 if (!mmesa->Fallback && mmesa->NewGLState) {
1780 if (mmesa->NewGLState & _MACH64_NEW_VERTEX_STATE)
1781 mach64ChooseVertexState( ctx );
1782
1783 if (mmesa->NewGLState & _MACH64_NEW_RENDER_STATE)
1784 mach64ChooseRenderState( ctx );
1785
1786 mmesa->NewGLState = 0;
1787 }
1788
1789 _tnl_run_pipeline( ctx );
1790 }
1791
1792 /**********************************************************************/
1793 /* High level hooks for t_vb_render.c */
1794 /**********************************************************************/
1795
1796 /* This is called when Mesa switches between rendering triangle
1797 * primitives (such as GL_POLYGON, GL_QUADS, GL_TRIANGLE_STRIP, etc),
1798 * and lines, points and bitmaps.
1799 */
1800
1801 static void mach64RasterPrimitive( GLcontext *ctx, GLuint hwprim )
1802 {
1803 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx);
1804
1805 mmesa->new_state |= MACH64_NEW_CONTEXT;
1806 mmesa->dirty |= MACH64_UPLOAD_CONTEXT;
1807
1808 if (mmesa->hw_primitive != hwprim) {
1809 FLUSH_BATCH( mmesa );
1810 mmesa->hw_primitive = hwprim;
1811 }
1812 }
1813
1814 static void mach64RenderPrimitive( GLcontext *ctx, GLenum prim )
1815 {
1816 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx);
1817 GLuint hw = hw_prim[prim];
1818
1819 mmesa->render_primitive = prim;
1820
1821 if (prim >= GL_TRIANGLES && (ctx->_TriangleCaps & DD_TRI_UNFILLED))
1822 return;
1823
1824 mach64RasterPrimitive( ctx, hw );
1825 }
1826
1827
1828 static void mach64RenderStart( GLcontext *ctx )
1829 {
1830 /* Check for projective texturing. Make sure all texcoord
1831 * pointers point to something. (fix in mesa?)
1832 */
1833 mach64CheckTexSizes( ctx );
1834 }
1835
1836 static void mach64RenderFinish( GLcontext *ctx )
1837 {
1838 if (MACH64_CONTEXT(ctx)->RenderIndex & MACH64_FALLBACK_BIT)
1839 _swrast_flush( ctx );
1840 }
1841
1842
1843 /**********************************************************************/
1844 /* Transition to/from hardware rasterization. */
1845 /**********************************************************************/
1846
1847 static const char * const fallbackStrings[] = {
1848 "Texture mode",
1849 "glDrawBuffer(GL_FRONT_AND_BACK)",
1850 "glReadBuffer",
1851 "glEnable(GL_STENCIL) without hw stencil buffer",
1852 "glRenderMode(selection or feedback)",
1853 "glLogicOp (mode != GL_COPY)",
1854 "GL_SEPARATE_SPECULAR_COLOR",
1855 "glBlendEquation (mode != ADD)",
1856 "glBlendFunc",
1857 "Rasterization disable",
1858 };
1859
1860
1861 static const char *getFallbackString(GLuint bit)
1862 {
1863 int i = 0;
1864 while (bit > 1) {
1865 i++;
1866 bit >>= 1;
1867 }
1868 return fallbackStrings[i];
1869 }
1870
1871 void mach64Fallback( GLcontext *ctx, GLuint bit, GLboolean mode )
1872 {
1873 TNLcontext *tnl = TNL_CONTEXT(ctx);
1874 mach64ContextPtr mmesa = MACH64_CONTEXT(ctx);
1875 GLuint oldfallback = mmesa->Fallback;
1876
1877 if (mode) {
1878 mmesa->Fallback |= bit;
1879 if (oldfallback == 0) {
1880 FLUSH_BATCH( mmesa );
1881 _swsetup_Wakeup( ctx );
1882 mmesa->RenderIndex = ~0;
1883 if (MACH64_DEBUG & DEBUG_VERBOSE_FALLBACK) {
1884 fprintf(stderr, "Mach64 begin rasterization fallback: 0x%x %s\n",
1885 bit, getFallbackString(bit));
1886 }
1887 }
1888 }
1889 else {
1890 mmesa->Fallback &= ~bit;
1891 if (oldfallback == bit) {
1892 _swrast_flush( ctx );
1893 tnl->Driver.Render.Start = mach64RenderStart;
1894 tnl->Driver.Render.PrimitiveNotify = mach64RenderPrimitive;
1895 tnl->Driver.Render.Finish = mach64RenderFinish;
1896 tnl->Driver.Render.BuildVertices = mach64BuildVertices;
1897 mmesa->NewGLState |= (_MACH64_NEW_RENDER_STATE|
1898 _MACH64_NEW_VERTEX_STATE);
1899 if (MACH64_DEBUG & DEBUG_VERBOSE_FALLBACK) {
1900 fprintf(stderr, "Mach64 end rasterization fallback: 0x%x %s\n",
1901 bit, getFallbackString(bit));
1902 }
1903 }
1904 }
1905 }
1906
1907 /**********************************************************************/
1908 /* Initialization. */
1909 /**********************************************************************/
1910
1911 void mach64InitTriFuncs( GLcontext *ctx )
1912 {
1913 TNLcontext *tnl = TNL_CONTEXT(ctx);
1914 static int firsttime = 1;
1915
1916 if (firsttime) {
1917 init_rast_tab();
1918 firsttime = 0;
1919 }
1920
1921 tnl->Driver.RunPipeline = mach64RunPipeline;
1922 tnl->Driver.Render.Start = mach64RenderStart;
1923 tnl->Driver.Render.Finish = mach64RenderFinish;
1924 tnl->Driver.Render.PrimitiveNotify = mach64RenderPrimitive;
1925 tnl->Driver.Render.ResetLineStipple = _swrast_ResetLineStipple;
1926 tnl->Driver.Render.BuildVertices = mach64BuildVertices;
1927 }