radeon: Optimize memory handling for dma operations.
[mesa.git] / src / mesa / drivers / dri / r300 / r300_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Maciej Cencora
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include <stdlib.h>
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/api_validate.h"
33 #include "main/enums.h"
34 #include "main/simple_list.h"
35
36 #include "r300_reg.h"
37 #include "r300_context.h"
38 #include "r300_emit.h"
39 #include "r300_render.h"
40 #include "r300_state.h"
41 #include "r300_tex.h"
42
43 #include "radeon_buffer_objects.h"
44
45 #include "tnl/tnl.h"
46 #include "tnl/t_vp_build.h"
47 #include "vbo/vbo_context.h"
48 #include "swrast/swrast.h"
49 #include "swrast_setup/swrast_setup.h"
50
51
52 static int getTypeSize(GLenum type)
53 {
54 switch (type) {
55 case GL_DOUBLE:
56 return sizeof(GLdouble);
57 case GL_FLOAT:
58 return sizeof(GLfloat);
59 case GL_INT:
60 return sizeof(GLint);
61 case GL_UNSIGNED_INT:
62 return sizeof(GLuint);
63 case GL_SHORT:
64 return sizeof(GLshort);
65 case GL_UNSIGNED_SHORT:
66 return sizeof(GLushort);
67 case GL_BYTE:
68 return sizeof(GLbyte);
69 case GL_UNSIGNED_BYTE:
70 return sizeof(GLubyte);
71 default:
72 assert(0);
73 return 0;
74 }
75 }
76
77 static void r300FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
78 {
79 r300ContextPtr r300 = R300_CONTEXT(ctx);
80 GLvoid *src_ptr;
81 GLuint *out;
82 int i;
83 GLboolean mapped_named_bo = GL_FALSE;
84
85 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
86 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
87 mapped_named_bo = GL_TRUE;
88 assert(mesa_ind_buf->obj->Pointer != NULL);
89 }
90 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
91
92 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
93 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
94 GLubyte *in = (GLubyte *)src_ptr;
95
96 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
97
98 assert(r300->ind_buf.bo->ptr != NULL);
99 out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
100
101 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
102 *out++ = in[i] | in[i + 1] << 16;
103 }
104
105 if (i < mesa_ind_buf->count) {
106 *out++ = in[i];
107 }
108
109 #if MESA_BIG_ENDIAN
110 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
111 GLuint size;
112 GLushort *in = (GLushort *)src_ptr;
113 size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
114
115 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offet, size, 4);
116
117 assert(r300->ind_buf.bo->ptr != NULL);
118 out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
119
120 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
121 *out++ = in[i] | in[i + 1] << 16;
122 }
123
124 if (i < mesa_ind_buf->count) {
125 *out++ = in[i];
126 }
127 #endif
128 }
129
130 r300->ind_buf.is_32bit = GL_FALSE;
131 r300->ind_buf.count = mesa_ind_buf->count;
132
133 if (mapped_named_bo) {
134 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
135 }
136 }
137
138
139 static void r300SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
140 {
141 r300ContextPtr r300 = R300_CONTEXT(ctx);
142
143 if (!mesa_ind_buf) {
144 r300->ind_buf.bo = NULL;
145 return;
146 }
147
148 #if MESA_BIG_ENDIAN
149 if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
150 #else
151 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE) {
152 #endif
153 const GLvoid *src_ptr;
154 GLvoid *dst_ptr;
155 GLboolean mapped_named_bo = GL_FALSE;
156
157 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
158 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
159 assert(mesa_ind_buf->obj->Pointer != NULL);
160 mapped_named_bo = GL_TRUE;
161 }
162
163 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
164
165 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
166
167 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
168
169 assert(r300->ind_buf.bo->ptr != NULL);
170 dst_ptr = ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
171 _mesa_memcpy(dst_ptr, src_ptr, size);
172
173 r300->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
174 r300->ind_buf.count = mesa_ind_buf->count;
175
176 if (mapped_named_bo) {
177 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
178 }
179 } else {
180 r300FixupIndexBuffer(ctx, mesa_ind_buf);
181 }
182 }
183
184 #define CONVERT( TYPE, MACRO ) do { \
185 GLuint i, j, sz; \
186 sz = input->Size; \
187 if (input->Normalized) { \
188 for (i = 0; i < count; i++) { \
189 const TYPE *in = (TYPE *)src_ptr; \
190 for (j = 0; j < sz; j++) { \
191 *dst_ptr++ = MACRO(*in); \
192 in++; \
193 } \
194 src_ptr += stride; \
195 } \
196 } else { \
197 for (i = 0; i < count; i++) { \
198 const TYPE *in = (TYPE *)src_ptr; \
199 for (j = 0; j < sz; j++) { \
200 *dst_ptr++ = (GLfloat)(*in); \
201 in++; \
202 } \
203 src_ptr += stride; \
204 } \
205 } \
206 } while (0)
207
208 /**
209 * Convert attribute data type to float
210 * If the attribute uses named buffer object replace the bo with newly allocated bo
211 */
212 static void r300ConvertAttrib(GLcontext *ctx, int count, const struct gl_client_array *input, struct vertex_attribute *attr)
213 {
214 r300ContextPtr r300 = R300_CONTEXT(ctx);
215 const GLvoid *src_ptr;
216 GLboolean mapped_named_bo = GL_FALSE;
217 GLfloat *dst_ptr;
218 GLuint stride;
219
220 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
221
222 /* Convert value for first element only */
223 if (input->StrideB == 0)
224 count = 1;
225
226 if (input->BufferObj->Name) {
227 if (!input->BufferObj->Pointer) {
228 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
229 mapped_named_bo = GL_TRUE;
230 }
231
232 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
233 } else {
234 src_ptr = input->Ptr;
235 }
236
237 radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
238 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
239
240 if (RADEON_DEBUG & DEBUG_FALLBACKS) {
241 fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
242 fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
243 }
244
245 assert(src_ptr != NULL);
246
247 switch (input->Type) {
248 case GL_DOUBLE:
249 CONVERT(GLdouble, (GLfloat));
250 break;
251 case GL_UNSIGNED_INT:
252 CONVERT(GLuint, UINT_TO_FLOAT);
253 break;
254 case GL_INT:
255 CONVERT(GLint, INT_TO_FLOAT);
256 break;
257 case GL_UNSIGNED_SHORT:
258 CONVERT(GLushort, USHORT_TO_FLOAT);
259 break;
260 case GL_SHORT:
261 CONVERT(GLshort, SHORT_TO_FLOAT);
262 break;
263 case GL_UNSIGNED_BYTE:
264 assert(input->Format != GL_BGRA);
265 CONVERT(GLubyte, UBYTE_TO_FLOAT);
266 break;
267 case GL_BYTE:
268 CONVERT(GLbyte, BYTE_TO_FLOAT);
269 break;
270 default:
271 assert(0);
272 break;
273 }
274
275 if (mapped_named_bo) {
276 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
277 }
278 }
279
280 static void r300AlignDataToDword(GLcontext *ctx, const struct gl_client_array *input, int count, struct vertex_attribute *attr)
281 {
282 r300ContextPtr r300 = R300_CONTEXT(ctx);
283 const int dst_stride = (input->StrideB + 3) & ~3;
284 const int size = getTypeSize(input->Type) * input->Size * count;
285 GLboolean mapped_named_bo = GL_FALSE;
286
287 radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);
288
289 if (!input->BufferObj->Pointer) {
290 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
291 mapped_named_bo = GL_TRUE;
292 }
293
294 {
295 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
296 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
297 int i;
298
299 for (i = 0; i < count; ++i) {
300 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
301 src_ptr += input->StrideB;
302 dst_ptr += dst_stride;
303 }
304 }
305
306 if (mapped_named_bo) {
307 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
308 }
309
310 attr->stride = dst_stride;
311 }
312
313 static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const struct gl_client_array *input)
314 {
315 r300ContextPtr r300 = R300_CONTEXT(ctx);
316 struct r300_vertex_buffer *vbuf = &r300->vbuf;
317 struct vertex_attribute r300_attr;
318 GLenum type;
319 GLuint stride;
320
321 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
322
323 if (input->Type == GL_DOUBLE || input->Type == GL_UNSIGNED_INT || input->Type == GL_INT ||
324 #if MESA_BIG_ENDIAN
325 getTypeSize(input->Type) != 4 ||
326 #endif
327 stride < 4) {
328
329 type = GL_FLOAT;
330
331 r300ConvertAttrib(ctx, count, input, &r300_attr);
332 if (input->StrideB == 0) {
333 r300_attr.stride = 0;
334 } else {
335 r300_attr.stride = sizeof(GLfloat) * input->Size;
336 }
337 r300_attr.dwords = input->Size;
338 r300_attr.is_named_bo = GL_FALSE;
339 } else {
340 type = input->Type;
341 r300_attr.dwords = (getTypeSize(type) * input->Size + 3)/ 4;
342 if (input->BufferObj->Name) {
343 if (stride % 4 != 0) {
344 assert(((int) input->Ptr) % input->StrideB == 0);
345 r300AlignDataToDword(ctx, input, count, &r300_attr);
346 r300_attr.is_named_bo = GL_FALSE;
347 } else {
348 r300_attr.stride = input->StrideB;
349 r300_attr.bo_offset = (GLuint) input->Ptr;
350 r300_attr.bo = get_radeon_buffer_object(input->BufferObj)->bo;
351 r300_attr.is_named_bo = GL_TRUE;
352 }
353 } else {
354 int size;
355 uint32_t *dst;
356
357 if (input->StrideB == 0) {
358 size = getTypeSize(input->Type) * input->Size;
359 count = 1;
360 r300_attr.stride = 0;
361 } else {
362 size = getTypeSize(input->Type) * input->Size * count;
363 r300_attr.stride = (getTypeSize(type) * input->Size + 3) & ~3;
364 }
365
366 radeonAllocDmaRegion(&r300->radeon, &r300_attr.bo, &r300_attr.bo_offset, size, 32);
367 assert(r300_attr.bo->ptr != NULL);
368 dst = (uint32_t *)ADD_POINTERS(r300_attr.bo->ptr, r300_attr.bo_offset);
369 switch (r300_attr.dwords) {
370 case 1: radeonEmitVec4(dst, input->Ptr, input->StrideB, count); break;
371 case 2: radeonEmitVec8(dst, input->Ptr, input->StrideB, count); break;
372 case 3: radeonEmitVec12(dst, input->Ptr, input->StrideB, count); break;
373 case 4: radeonEmitVec16(dst, input->Ptr, input->StrideB, count); break;
374 default: assert(0); break;
375 }
376
377 r300_attr.is_named_bo = GL_FALSE;
378 }
379 }
380
381 r300_attr.size = input->Size;
382 r300_attr.element = attr;
383 r300_attr.dst_loc = vbuf->num_attribs;
384
385 switch (type) {
386 case GL_FLOAT:
387 switch (input->Size) {
388 case 1: r300_attr.data_type = R300_DATA_TYPE_FLOAT_1; break;
389 case 2: r300_attr.data_type = R300_DATA_TYPE_FLOAT_2; break;
390 case 3: r300_attr.data_type = R300_DATA_TYPE_FLOAT_3; break;
391 case 4: r300_attr.data_type = R300_DATA_TYPE_FLOAT_4; break;
392 }
393 r300_attr._signed = 0;
394 r300_attr.normalize = 0;
395 break;
396 case GL_SHORT:
397 r300_attr._signed = 1;
398 r300_attr.normalize = input->Normalized;
399 switch (input->Size) {
400 case 1:
401 case 2:
402 r300_attr.data_type = R300_DATA_TYPE_SHORT_2;
403 break;
404 case 3:
405 case 4:
406 r300_attr.data_type = R300_DATA_TYPE_SHORT_4;
407 break;
408 }
409 break;
410 case GL_BYTE:
411 r300_attr._signed = 1;
412 r300_attr.normalize = input->Normalized;
413 r300_attr.data_type = R300_DATA_TYPE_BYTE;
414 break;
415 case GL_UNSIGNED_SHORT:
416 r300_attr._signed = 0;
417 r300_attr.normalize = input->Normalized;
418 switch (input->Size) {
419 case 1:
420 case 2:
421 r300_attr.data_type = R300_DATA_TYPE_SHORT_2;
422 break;
423 case 3:
424 case 4:
425 r300_attr.data_type = R300_DATA_TYPE_SHORT_4;
426 break;
427 }
428 break;
429 case GL_UNSIGNED_BYTE:
430 r300_attr._signed = 0;
431 r300_attr.normalize = input->Normalized;
432 if (input->Format == GL_BGRA)
433 r300_attr.data_type = R300_DATA_TYPE_D3DCOLOR;
434 else
435 r300_attr.data_type = R300_DATA_TYPE_BYTE;
436 break;
437
438 default:
439 case GL_DOUBLE:
440 case GL_INT:
441 case GL_UNSIGNED_INT:
442 assert(0);
443 break;
444 }
445
446 switch (input->Size) {
447 case 4:
448 r300_attr.swizzle = SWIZZLE_XYZW;
449 break;
450 case 3:
451 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_ONE);
452 break;
453 case 2:
454 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_ZERO, SWIZZLE_ONE);
455 break;
456 case 1:
457 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_ZERO, SWIZZLE_ZERO, SWIZZLE_ONE);
458 break;
459 }
460
461 r300_attr.write_mask = MASK_XYZW;
462
463 vbuf->attribs[vbuf->num_attribs] = r300_attr;
464 ++vbuf->num_attribs;
465 }
466
467 static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *arrays[], int count)
468 {
469 r300ContextPtr r300 = R300_CONTEXT(ctx);
470 struct r300_vertex_buffer *vbuf = &r300->vbuf;
471 int ret;
472 {
473 int i, tmp;
474
475 tmp = r300->selected_vp->code.InputsRead;
476 i = 0;
477 vbuf->num_attribs = 0;
478 while (tmp) {
479 /* find first enabled bit */
480 while (!(tmp & 1)) {
481 tmp >>= 1;
482 ++i;
483 }
484
485 r300TranslateAttrib(ctx, i, count, arrays[i]);
486
487 tmp >>= 1;
488 ++i;
489 }
490 }
491
492 r300SwitchFallback(ctx, R300_FALLBACK_AOS_LIMIT, vbuf->num_attribs > R300_MAX_AOS_ARRAYS);
493 if (r300->fallback)
494 return;
495
496 {
497 int i;
498
499 for (i = 0; i < vbuf->num_attribs; i++) {
500 struct radeon_aos *aos = &r300->radeon.tcl.aos[i];
501
502 aos->count = vbuf->attribs[i].stride == 0 ? 1 : count;
503 aos->stride = vbuf->attribs[i].stride / sizeof(float);
504 aos->offset = vbuf->attribs[i].bo_offset;
505 aos->components = vbuf->attribs[i].dwords;
506 aos->bo = vbuf->attribs[i].bo;
507
508 if (vbuf->attribs[i].is_named_bo) {
509 radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs, r300->vbuf.attribs[i].bo, RADEON_GEM_DOMAIN_GTT, 0);
510 }
511 }
512
513 r300->radeon.tcl.aos_count = vbuf->num_attribs;
514 ret = radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs, first_elem(&r300->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
515 if (ret)
516 r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, GL_TRUE);
517 }
518 }
519
520 static void r300FreeData(GLcontext *ctx)
521 {
522 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
523 * to prevent double unref in radeonReleaseArrays
524 * called during context destroy
525 */
526 r300ContextPtr r300 = R300_CONTEXT(ctx);
527 {
528 int i;
529
530 for (i = 0; i < r300->vbuf.num_attribs; i++) {
531 if (!r300->vbuf.attribs[i].is_named_bo) {
532 radeon_bo_unref(r300->vbuf.attribs[i].bo);
533 }
534 r300->radeon.tcl.aos[i].bo = NULL;
535 }
536 }
537
538 {
539 if (r300->ind_buf.bo != NULL) {
540 radeon_bo_unref(r300->ind_buf.bo);
541 }
542 }
543 }
544
545 static GLboolean r300TryDrawPrims(GLcontext *ctx,
546 const struct gl_client_array *arrays[],
547 const struct _mesa_prim *prim,
548 GLuint nr_prims,
549 const struct _mesa_index_buffer *ib,
550 GLuint min_index,
551 GLuint max_index )
552 {
553 struct r300_context *r300 = R300_CONTEXT(ctx);
554 GLuint i;
555
556 if (ctx->NewState)
557 _mesa_update_state( ctx );
558
559 if (r300->options.hw_tcl_enabled)
560 _tnl_UpdateFixedFunctionProgram(ctx);
561
562 r300UpdateShaders(r300);
563
564 r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, !r300ValidateBuffers(ctx));
565
566 /* ensure we have the cmd buf space in advance to cover
567 * the state + DMA AOS pointers */
568 rcommonEnsureCmdBufSpace(&r300->radeon,
569 r300->radeon.hw.max_state_size + (60*sizeof(int)),
570 __FUNCTION__);
571
572 r300SetupIndexBuffer(ctx, ib);
573
574 r300SetVertexFormat(ctx, arrays, max_index + 1);
575
576 if (r300->fallback)
577 return GL_FALSE;
578
579 r300SetupVAP(ctx, r300->selected_vp->code.InputsRead, r300->selected_vp->code.OutputsWritten);
580
581 r300UpdateShaderStates(r300);
582
583 r300EmitCacheFlush(r300);
584 radeonEmitState(&r300->radeon);
585
586 for (i = 0; i < nr_prims; ++i) {
587 r300RunRenderPrimitive(ctx, prim[i].start, prim[i].start + prim[i].count, prim[i].mode);
588 }
589
590 r300EmitCacheFlush(r300);
591
592 r300FreeData(ctx);
593
594 return GL_TRUE;
595 }
596
597 static void r300DrawPrims(GLcontext *ctx,
598 const struct gl_client_array *arrays[],
599 const struct _mesa_prim *prim,
600 GLuint nr_prims,
601 const struct _mesa_index_buffer *ib,
602 GLboolean index_bounds_valid,
603 GLuint min_index,
604 GLuint max_index)
605 {
606 GLboolean retval;
607
608 /* This check should get folded into just the places that
609 * min/max index are really needed.
610 */
611 if (!index_bounds_valid) {
612 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
613 }
614
615 if (min_index) {
616 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims );
617 return;
618 }
619
620 /* Make an attempt at drawing */
621 retval = r300TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
622
623 /* If failed run tnl pipeline - it should take care of fallbacks */
624 if (!retval)
625 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
626 }
627
628 void r300InitDraw(GLcontext *ctx)
629 {
630 struct vbo_context *vbo = vbo_context(ctx);
631
632 vbo->draw_prims = r300DrawPrims;
633 }