r300: rework index buffer setup
[mesa.git] / src / mesa / drivers / dri / r300 / r300_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Maciej Cencora
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include <stdlib.h>
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/api_validate.h"
33 #include "main/enums.h"
34
35 #include "r300_reg.h"
36 #include "r300_context.h"
37 #include "r300_emit.h"
38 #include "r300_render.h"
39 #include "r300_state.h"
40 #include "r300_tex.h"
41
42 #include "radeon_buffer_objects.h"
43
44 #include "tnl/tnl.h"
45 #include "tnl/t_vp_build.h"
46 #include "vbo/vbo_context.h"
47 #include "swrast/swrast.h"
48 #include "swrast_setup/swrast_setup.h"
49
50
51 static int getTypeSize(GLenum type)
52 {
53 switch (type) {
54 case GL_DOUBLE:
55 return sizeof(GLdouble);
56 case GL_FLOAT:
57 return sizeof(GLfloat);
58 case GL_INT:
59 return sizeof(GLint);
60 case GL_UNSIGNED_INT:
61 return sizeof(GLuint);
62 case GL_SHORT:
63 return sizeof(GLshort);
64 case GL_UNSIGNED_SHORT:
65 return sizeof(GLushort);
66 case GL_BYTE:
67 return sizeof(GLbyte);
68 case GL_UNSIGNED_BYTE:
69 return sizeof(GLubyte);
70 default:
71 assert(0);
72 return 0;
73 }
74 }
75
76 static void r300FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
77 {
78 r300ContextPtr r300 = R300_CONTEXT(ctx);
79 GLvoid *src_ptr;
80 GLuint *out;
81 int i;
82
83 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
84 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
85 assert(mesa_ind_buf->obj->Pointer != NULL);
86 }
87 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
88
89 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
90 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
91 GLubyte *in = (GLubyte *)src_ptr;
92
93 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
94
95 assert(r300->ind_buf.bo->ptr != NULL);
96 out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
97
98 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
99 *out++ = in[i] | in[i + 1] << 16;
100 }
101
102 if (i < mesa_ind_buf->count) {
103 *out++ = in[i];
104 }
105
106 #if MESA_BIG_ENDIAN
107 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
108 GLushort *in = (GLushort *)src_ptr;
109 size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
110
111 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offet, size, 4);
112
113 assert(r300->ind_buf.bo->ptr != NULL)
114 out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
115
116 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
117 *out++ = in[i] | in[i + 1] << 16;
118 }
119
120 if (i < mesa_ind_buf->count) {
121 *out++ = in[i];
122 }
123 #endif
124 }
125
126 r300->ind_buf.is_32bit = GL_FALSE;
127 r300->ind_buf.count = mesa_ind_buf->count;
128 }
129
130
131 static void r300SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
132 {
133 r300ContextPtr r300 = R300_CONTEXT(ctx);
134 GLboolean mapped_named_bo = GL_FALSE;
135
136 if (!mesa_ind_buf) {
137 r300->ind_buf.bo = NULL;
138 return;
139 }
140
141 #if MESA_BIG_ENDIAN
142 if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
143 #else
144 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE) {
145 #endif
146 const GLvoid *src_ptr;
147 GLvoid *dst_ptr;
148
149 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
150 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
151 assert(mesa_ind_buf->obj->Pointer != NULL);
152 mapped_named_bo = GL_TRUE;
153 }
154
155 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
156
157 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
158
159 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
160
161 assert(r300->ind_buf.bo->ptr != NULL);
162 dst_ptr = ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
163 _mesa_memcpy(dst_ptr, src_ptr, size);
164
165 r300->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
166 r300->ind_buf.count = mesa_ind_buf->count;
167 } else {
168 r300FixupIndexBuffer(ctx, mesa_ind_buf);
169 }
170 }
171
172 #define CONVERT( TYPE, MACRO ) do { \
173 GLuint i, j, sz; \
174 sz = input->Size; \
175 if (input->Normalized) { \
176 for (i = 0; i < count; i++) { \
177 const TYPE *in = (TYPE *)src_ptr; \
178 for (j = 0; j < sz; j++) { \
179 *dst_ptr++ = MACRO(*in); \
180 in++; \
181 } \
182 src_ptr += stride; \
183 } \
184 } else { \
185 for (i = 0; i < count; i++) { \
186 const TYPE *in = (TYPE *)src_ptr; \
187 for (j = 0; j < sz; j++) { \
188 *dst_ptr++ = (GLfloat)(*in); \
189 in++; \
190 } \
191 src_ptr += stride; \
192 } \
193 } \
194 } while (0)
195
196 /**
197 * Convert attribute data type to float
198 * If the attribute uses named buffer object replace the bo with newly allocated bo
199 */
200 static void r300ConvertAttrib(GLcontext *ctx, int count, const struct gl_client_array *input, struct vertex_attribute *attr)
201 {
202 r300ContextPtr r300 = R300_CONTEXT(ctx);
203 const GLvoid *src_ptr;
204 GLboolean mapped_named_bo = GL_FALSE;
205 GLfloat *dst_ptr;
206 GLuint stride;
207
208 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
209
210 /* Convert value for first element only */
211 if (input->StrideB == 0)
212 count = 1;
213
214 if (input->BufferObj->Name) {
215 if (!input->BufferObj->Pointer) {
216 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
217 mapped_named_bo = GL_TRUE;
218 }
219
220 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
221 } else {
222 src_ptr = input->Ptr;
223 }
224
225 radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
226 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
227
228 if (RADEON_DEBUG & DEBUG_FALLBACKS) {
229 fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
230 fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
231 }
232
233 assert(src_ptr != NULL);
234
235 switch (input->Type) {
236 case GL_DOUBLE:
237 CONVERT(GLdouble, (GLfloat));
238 break;
239 case GL_UNSIGNED_INT:
240 CONVERT(GLuint, UINT_TO_FLOAT);
241 break;
242 case GL_INT:
243 CONVERT(GLint, INT_TO_FLOAT);
244 break;
245 case GL_UNSIGNED_SHORT:
246 CONVERT(GLushort, USHORT_TO_FLOAT);
247 break;
248 case GL_SHORT:
249 CONVERT(GLshort, SHORT_TO_FLOAT);
250 break;
251 case GL_UNSIGNED_BYTE:
252 assert(input->Format != GL_BGRA);
253 CONVERT(GLubyte, UBYTE_TO_FLOAT);
254 break;
255 case GL_BYTE:
256 CONVERT(GLbyte, BYTE_TO_FLOAT);
257 break;
258 default:
259 assert(0);
260 break;
261 }
262
263 if (mapped_named_bo) {
264 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
265 }
266 }
267
268 static void r300AlignDataToDword(GLcontext *ctx, const struct gl_client_array *input, int count, struct vertex_attribute *attr)
269 {
270 r300ContextPtr r300 = R300_CONTEXT(ctx);
271 const int dst_stride = (input->StrideB + 3) & ~3;
272 const int size = getTypeSize(input->Type) * input->Size * count;
273 GLboolean mapped_named_bo = GL_FALSE;
274
275 radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);
276
277 if (!input->BufferObj->Pointer) {
278 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
279 mapped_named_bo = GL_TRUE;
280 }
281
282 {
283 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
284 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
285 int i;
286
287 for (i = 0; i < count; ++i) {
288 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
289 src_ptr += input->StrideB;
290 dst_ptr += dst_stride;
291 }
292 }
293
294 if (mapped_named_bo) {
295 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
296 }
297
298 attr->stride = dst_stride;
299 }
300
301 static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const struct gl_client_array *input)
302 {
303 r300ContextPtr r300 = R300_CONTEXT(ctx);
304 struct r300_vertex_buffer *vbuf = &r300->vbuf;
305 struct vertex_attribute r300_attr;
306 GLenum type;
307 GLuint stride;
308
309 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
310
311 if (input->Type == GL_DOUBLE || input->Type == GL_UNSIGNED_INT || input->Type == GL_INT ||
312 #if MESA_BIG_ENDIAN
313 getTypeSize(input->Type) != 4 ||
314 #endif
315 stride < 4) {
316
317 type = GL_FLOAT;
318
319 r300ConvertAttrib(ctx, count, input, &r300_attr);
320 if (input->StrideB == 0) {
321 r300_attr.stride = 0;
322 } else {
323 r300_attr.stride = sizeof(GLfloat) * input->Size;
324 }
325 r300_attr.dwords = input->Size;
326 r300_attr.is_named_bo = GL_FALSE;
327 } else {
328 type = input->Type;
329 r300_attr.dwords = (getTypeSize(type) * input->Size + 3)/ 4;
330 if (input->BufferObj->Name) {
331 if (stride % 4 != 0) {
332 assert(((int) input->Ptr) % input->StrideB == 0);
333 r300AlignDataToDword(ctx, input, count, &r300_attr);
334 r300_attr.is_named_bo = GL_FALSE;
335 } else {
336 r300_attr.stride = input->StrideB;
337 r300_attr.bo_offset = (GLuint) input->Ptr;
338 r300_attr.bo = get_radeon_buffer_object(input->BufferObj)->bo;
339 r300_attr.is_named_bo = GL_TRUE;
340 }
341 } else {
342 int size;
343 uint32_t *dst;
344
345 if (input->StrideB == 0) {
346 size = getTypeSize(input->Type) * input->Size;
347 count = 1;
348 r300_attr.stride = 0;
349 } else {
350 size = getTypeSize(input->Type) * input->Size * count;
351 r300_attr.stride = (getTypeSize(type) * input->Size + 3) & ~3;
352 }
353
354 radeonAllocDmaRegion(&r300->radeon, &r300_attr.bo, &r300_attr.bo_offset, size, 32);
355 assert(r300_attr.bo->ptr != NULL);
356 dst = (uint32_t *)ADD_POINTERS(r300_attr.bo->ptr, r300_attr.bo_offset);
357 switch (r300_attr.dwords) {
358 case 1: radeonEmitVec4(dst, input->Ptr, input->StrideB, count); break;
359 case 2: radeonEmitVec8(dst, input->Ptr, input->StrideB, count); break;
360 case 3: radeonEmitVec12(dst, input->Ptr, input->StrideB, count); break;
361 case 4: radeonEmitVec16(dst, input->Ptr, input->StrideB, count); break;
362 default: assert(0); break;
363 }
364
365 r300_attr.is_named_bo = GL_FALSE;
366 }
367 }
368
369 r300_attr.size = input->Size;
370 r300_attr.element = attr;
371 r300_attr.dst_loc = vbuf->num_attribs;
372
373 switch (type) {
374 case GL_FLOAT:
375 switch (input->Size) {
376 case 1: r300_attr.data_type = R300_DATA_TYPE_FLOAT_1; break;
377 case 2: r300_attr.data_type = R300_DATA_TYPE_FLOAT_2; break;
378 case 3: r300_attr.data_type = R300_DATA_TYPE_FLOAT_3; break;
379 case 4: r300_attr.data_type = R300_DATA_TYPE_FLOAT_4; break;
380 }
381 r300_attr._signed = 0;
382 r300_attr.normalize = 0;
383 break;
384 case GL_SHORT:
385 r300_attr._signed = 1;
386 r300_attr.normalize = input->Normalized;
387 switch (input->Size) {
388 case 1:
389 case 2:
390 r300_attr.data_type = R300_DATA_TYPE_SHORT_2;
391 break;
392 case 3:
393 case 4:
394 r300_attr.data_type = R300_DATA_TYPE_SHORT_4;
395 break;
396 }
397 break;
398 case GL_BYTE:
399 r300_attr._signed = 1;
400 r300_attr.normalize = input->Normalized;
401 r300_attr.data_type = R300_DATA_TYPE_BYTE;
402 break;
403 case GL_UNSIGNED_SHORT:
404 r300_attr._signed = 0;
405 r300_attr.normalize = input->Normalized;
406 switch (input->Size) {
407 case 1:
408 case 2:
409 r300_attr.data_type = R300_DATA_TYPE_SHORT_2;
410 break;
411 case 3:
412 case 4:
413 r300_attr.data_type = R300_DATA_TYPE_SHORT_4;
414 break;
415 }
416 break;
417 case GL_UNSIGNED_BYTE:
418 r300_attr._signed = 0;
419 r300_attr.normalize = input->Normalized;
420 if (input->Format == GL_BGRA)
421 r300_attr.data_type = R300_DATA_TYPE_D3DCOLOR;
422 else
423 r300_attr.data_type = R300_DATA_TYPE_BYTE;
424 break;
425
426 default:
427 case GL_DOUBLE:
428 case GL_INT:
429 case GL_UNSIGNED_INT:
430 assert(0);
431 break;
432 }
433
434 switch (input->Size) {
435 case 4:
436 r300_attr.swizzle = SWIZZLE_XYZW;
437 break;
438 case 3:
439 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_ONE);
440 break;
441 case 2:
442 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_ZERO, SWIZZLE_ONE);
443 break;
444 case 1:
445 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_ZERO, SWIZZLE_ZERO, SWIZZLE_ONE);
446 break;
447 }
448
449 r300_attr.write_mask = MASK_XYZW;
450
451 vbuf->attribs[vbuf->num_attribs] = r300_attr;
452 ++vbuf->num_attribs;
453 }
454
455 static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *arrays[], int count)
456 {
457 r300ContextPtr r300 = R300_CONTEXT(ctx);
458 struct r300_vertex_buffer *vbuf = &r300->vbuf;
459
460 {
461 int i, tmp;
462
463 tmp = r300->selected_vp->code.InputsRead;
464 i = 0;
465 vbuf->num_attribs = 0;
466 while (tmp) {
467 /* find first enabled bit */
468 while (!(tmp & 1)) {
469 tmp >>= 1;
470 ++i;
471 }
472
473 r300TranslateAttrib(ctx, i, count, arrays[i]);
474
475 tmp >>= 1;
476 ++i;
477 }
478 }
479
480 r300SwitchFallback(ctx, R300_FALLBACK_AOS_LIMIT, vbuf->num_attribs > R300_MAX_AOS_ARRAYS);
481 if (r300->fallback)
482 return;
483
484 {
485 int i;
486
487 for (i = 0; i < vbuf->num_attribs; i++) {
488 struct radeon_aos *aos = &r300->radeon.tcl.aos[i];
489
490 aos->count = vbuf->attribs[i].stride == 0 ? 1 : count;
491 aos->stride = vbuf->attribs[i].stride / sizeof(float);
492 aos->offset = vbuf->attribs[i].bo_offset;
493 aos->components = vbuf->attribs[i].dwords;
494 aos->bo = vbuf->attribs[i].bo;
495
496 if (vbuf->attribs[i].is_named_bo) {
497 radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs,
498 aos->bo,
499 RADEON_GEM_DOMAIN_GTT, 0);
500 }
501 }
502 r300->radeon.tcl.aos_count = vbuf->num_attribs;
503
504 if (r300->ind_buf.bo) {
505 radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs,
506 r300->ind_buf.bo,
507 RADEON_GEM_DOMAIN_GTT, 0);
508 }
509 }
510 }
511
512 static void r300FreeData(GLcontext *ctx)
513 {
514 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
515 * to prevent double unref in radeonReleaseArrays
516 * called during context destroy
517 */
518 r300ContextPtr r300 = R300_CONTEXT(ctx);
519 {
520 int i;
521
522 for (i = 0; i < r300->vbuf.num_attribs; i++) {
523 if (!r300->vbuf.attribs[i].is_named_bo) {
524 radeon_bo_unref(r300->vbuf.attribs[i].bo);
525 }
526 r300->radeon.tcl.aos[i].bo = NULL;
527 }
528 }
529
530 {
531 if (r300->ind_buf.bo != NULL) {
532 radeon_bo_unref(r300->ind_buf.bo);
533 }
534 }
535 }
536
537 static GLboolean r300TryDrawPrims(GLcontext *ctx,
538 const struct gl_client_array *arrays[],
539 const struct _mesa_prim *prim,
540 GLuint nr_prims,
541 const struct _mesa_index_buffer *ib,
542 GLuint min_index,
543 GLuint max_index )
544 {
545 struct r300_context *r300 = R300_CONTEXT(ctx);
546 GLuint i;
547
548 if (ctx->NewState)
549 _mesa_update_state( ctx );
550
551 if (r300->options.hw_tcl_enabled)
552 _tnl_UpdateFixedFunctionProgram(ctx);
553
554 r300UpdateShaders(r300);
555
556 r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, !r300ValidateBuffers(ctx));
557
558 r300SetupIndexBuffer(ctx, ib);
559
560 /* ensure we have the cmd buf space in advance to cover
561 * the state + DMA AOS pointers */
562 rcommonEnsureCmdBufSpace(&r300->radeon,
563 r300->radeon.hw.max_state_size + (50*sizeof(int)),
564 __FUNCTION__);
565
566 r300SetVertexFormat(ctx, arrays, max_index + 1);
567
568 if (r300->fallback)
569 return GL_FALSE;
570
571 r300SetupVAP(ctx, r300->selected_vp->code.InputsRead, r300->selected_vp->code.OutputsWritten);
572
573 r300UpdateShaderStates(r300);
574
575 r300EmitCacheFlush(r300);
576 radeonEmitState(&r300->radeon);
577
578 for (i = 0; i < nr_prims; ++i) {
579 r300RunRenderPrimitive(ctx, prim[i].start, prim[i].start + prim[i].count, prim[i].mode);
580 }
581
582 r300EmitCacheFlush(r300);
583
584 r300FreeData(ctx);
585
586 return GL_TRUE;
587 }
588
589 static void r300DrawPrims(GLcontext *ctx,
590 const struct gl_client_array *arrays[],
591 const struct _mesa_prim *prim,
592 GLuint nr_prims,
593 const struct _mesa_index_buffer *ib,
594 GLuint min_index,
595 GLuint max_index)
596 {
597 struct split_limits limits;
598 GLboolean retval;
599
600 if (ib)
601 limits.max_verts = 0xffffffff;
602 else
603 limits.max_verts = 65535;
604
605 limits.max_indices = 65535;
606 limits.max_vb_size = 1024*1024;
607
608 if (min_index) {
609 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims );
610 return;
611 }
612 if ((ib && ib->count > 65535)) {
613 vbo_split_prims (ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims, &limits);
614 return;
615 }
616
617 /* Make an attempt at drawing */
618 retval = r300TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
619
620 /* If failed run tnl pipeline - it should take care of fallbacks */
621 if (!retval)
622 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
623 }
624
625 void r300InitDraw(GLcontext *ctx)
626 {
627 struct vbo_context *vbo = vbo_context(ctx);
628
629 vbo->draw_prims = r300DrawPrims;
630 }