r300: Predict emit size for next rendering operation.
[mesa.git] / src / mesa / drivers / dri / r300 / r300_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2009 Maciej Cencora
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 **************************************************************************/
26
27 #include <stdlib.h>
28
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/api_validate.h"
33 #include "main/enums.h"
34 #include "main/simple_list.h"
35
36 #include "r300_reg.h"
37 #include "r300_context.h"
38 #include "r300_emit.h"
39 #include "r300_render.h"
40 #include "r300_state.h"
41 #include "r300_tex.h"
42 #include "r300_cmdbuf.h"
43
44 #include "radeon_buffer_objects.h"
45
46 #include "tnl/tnl.h"
47 #include "tnl/t_vp_build.h"
48 #include "vbo/vbo_context.h"
49 #include "swrast/swrast.h"
50 #include "swrast_setup/swrast_setup.h"
51
52
53 static int getTypeSize(GLenum type)
54 {
55 switch (type) {
56 case GL_DOUBLE:
57 return sizeof(GLdouble);
58 case GL_FLOAT:
59 return sizeof(GLfloat);
60 case GL_INT:
61 return sizeof(GLint);
62 case GL_UNSIGNED_INT:
63 return sizeof(GLuint);
64 case GL_SHORT:
65 return sizeof(GLshort);
66 case GL_UNSIGNED_SHORT:
67 return sizeof(GLushort);
68 case GL_BYTE:
69 return sizeof(GLbyte);
70 case GL_UNSIGNED_BYTE:
71 return sizeof(GLubyte);
72 default:
73 assert(0);
74 return 0;
75 }
76 }
77
78 static void r300FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
79 {
80 r300ContextPtr r300 = R300_CONTEXT(ctx);
81 GLvoid *src_ptr;
82 GLuint *out;
83 int i;
84 GLboolean mapped_named_bo = GL_FALSE;
85
86 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
87 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
88 mapped_named_bo = GL_TRUE;
89 assert(mesa_ind_buf->obj->Pointer != NULL);
90 }
91 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
92
93 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
94 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
95 GLubyte *in = (GLubyte *)src_ptr;
96
97 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
98
99 assert(r300->ind_buf.bo->ptr != NULL);
100 out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
101
102 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
103 *out++ = in[i] | in[i + 1] << 16;
104 }
105
106 if (i < mesa_ind_buf->count) {
107 *out++ = in[i];
108 }
109
110 #if MESA_BIG_ENDIAN
111 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
112 GLuint size;
113 GLushort *in = (GLushort *)src_ptr;
114 size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
115
116 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offet, size, 4);
117
118 assert(r300->ind_buf.bo->ptr != NULL);
119 out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
120
121 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
122 *out++ = in[i] | in[i + 1] << 16;
123 }
124
125 if (i < mesa_ind_buf->count) {
126 *out++ = in[i];
127 }
128 #endif
129 }
130
131 r300->ind_buf.is_32bit = GL_FALSE;
132 r300->ind_buf.count = mesa_ind_buf->count;
133
134 if (mapped_named_bo) {
135 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
136 }
137 }
138
139
140 static void r300SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
141 {
142 r300ContextPtr r300 = R300_CONTEXT(ctx);
143
144 if (!mesa_ind_buf) {
145 r300->ind_buf.bo = NULL;
146 return;
147 }
148
149 #if MESA_BIG_ENDIAN
150 if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
151 #else
152 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE) {
153 #endif
154 const GLvoid *src_ptr;
155 GLvoid *dst_ptr;
156 GLboolean mapped_named_bo = GL_FALSE;
157
158 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
159 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
160 assert(mesa_ind_buf->obj->Pointer != NULL);
161 mapped_named_bo = GL_TRUE;
162 }
163
164 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
165
166 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
167
168 radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
169
170 assert(r300->ind_buf.bo->ptr != NULL);
171 dst_ptr = ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
172 _mesa_memcpy(dst_ptr, src_ptr, size);
173
174 r300->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
175 r300->ind_buf.count = mesa_ind_buf->count;
176
177 if (mapped_named_bo) {
178 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
179 }
180 } else {
181 r300FixupIndexBuffer(ctx, mesa_ind_buf);
182 }
183 }
184
185 #define CONVERT( TYPE, MACRO ) do { \
186 GLuint i, j, sz; \
187 sz = input->Size; \
188 if (input->Normalized) { \
189 for (i = 0; i < count; i++) { \
190 const TYPE *in = (TYPE *)src_ptr; \
191 for (j = 0; j < sz; j++) { \
192 *dst_ptr++ = MACRO(*in); \
193 in++; \
194 } \
195 src_ptr += stride; \
196 } \
197 } else { \
198 for (i = 0; i < count; i++) { \
199 const TYPE *in = (TYPE *)src_ptr; \
200 for (j = 0; j < sz; j++) { \
201 *dst_ptr++ = (GLfloat)(*in); \
202 in++; \
203 } \
204 src_ptr += stride; \
205 } \
206 } \
207 } while (0)
208
209 /**
210 * Convert attribute data type to float
211 * If the attribute uses named buffer object replace the bo with newly allocated bo
212 */
213 static void r300ConvertAttrib(GLcontext *ctx, int count, const struct gl_client_array *input, struct vertex_attribute *attr)
214 {
215 r300ContextPtr r300 = R300_CONTEXT(ctx);
216 const GLvoid *src_ptr;
217 GLboolean mapped_named_bo = GL_FALSE;
218 GLfloat *dst_ptr;
219 GLuint stride;
220
221 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
222
223 /* Convert value for first element only */
224 if (input->StrideB == 0)
225 count = 1;
226
227 if (input->BufferObj->Name) {
228 if (!input->BufferObj->Pointer) {
229 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
230 mapped_named_bo = GL_TRUE;
231 }
232
233 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
234 } else {
235 src_ptr = input->Ptr;
236 }
237
238 radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
239 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
240
241 if (RADEON_DEBUG & DEBUG_FALLBACKS) {
242 fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
243 fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
244 }
245
246 assert(src_ptr != NULL);
247
248 switch (input->Type) {
249 case GL_DOUBLE:
250 CONVERT(GLdouble, (GLfloat));
251 break;
252 case GL_UNSIGNED_INT:
253 CONVERT(GLuint, UINT_TO_FLOAT);
254 break;
255 case GL_INT:
256 CONVERT(GLint, INT_TO_FLOAT);
257 break;
258 case GL_UNSIGNED_SHORT:
259 CONVERT(GLushort, USHORT_TO_FLOAT);
260 break;
261 case GL_SHORT:
262 CONVERT(GLshort, SHORT_TO_FLOAT);
263 break;
264 case GL_UNSIGNED_BYTE:
265 assert(input->Format != GL_BGRA);
266 CONVERT(GLubyte, UBYTE_TO_FLOAT);
267 break;
268 case GL_BYTE:
269 CONVERT(GLbyte, BYTE_TO_FLOAT);
270 break;
271 default:
272 assert(0);
273 break;
274 }
275
276 if (mapped_named_bo) {
277 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
278 }
279 }
280
281 static void r300AlignDataToDword(GLcontext *ctx, const struct gl_client_array *input, int count, struct vertex_attribute *attr)
282 {
283 r300ContextPtr r300 = R300_CONTEXT(ctx);
284 const int dst_stride = (input->StrideB + 3) & ~3;
285 const int size = getTypeSize(input->Type) * input->Size * count;
286 GLboolean mapped_named_bo = GL_FALSE;
287
288 radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);
289
290 if (!input->BufferObj->Pointer) {
291 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
292 mapped_named_bo = GL_TRUE;
293 }
294
295 {
296 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
297 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
298 int i;
299
300 for (i = 0; i < count; ++i) {
301 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
302 src_ptr += input->StrideB;
303 dst_ptr += dst_stride;
304 }
305 }
306
307 if (mapped_named_bo) {
308 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
309 }
310
311 attr->stride = dst_stride;
312 }
313
314 static void r300TranslateAttrib(GLcontext *ctx, GLuint attr, int count, const struct gl_client_array *input)
315 {
316 r300ContextPtr r300 = R300_CONTEXT(ctx);
317 struct r300_vertex_buffer *vbuf = &r300->vbuf;
318 struct vertex_attribute r300_attr;
319 GLenum type;
320 GLuint stride;
321
322 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
323
324 if (input->Type == GL_DOUBLE || input->Type == GL_UNSIGNED_INT || input->Type == GL_INT ||
325 #if MESA_BIG_ENDIAN
326 getTypeSize(input->Type) != 4 ||
327 #endif
328 stride < 4) {
329
330 type = GL_FLOAT;
331
332 if (input->StrideB == 0) {
333 r300_attr.stride = 0;
334 } else {
335 r300_attr.stride = sizeof(GLfloat) * input->Size;
336 }
337 r300_attr.dwords = input->Size;
338 r300_attr.is_named_bo = GL_FALSE;
339 } else {
340 type = input->Type;
341 r300_attr.dwords = (getTypeSize(type) * input->Size + 3)/ 4;
342 if (!input->BufferObj->Name) {
343
344 if (input->StrideB == 0) {
345 r300_attr.stride = 0;
346 } else {
347 r300_attr.stride = (getTypeSize(type) * input->Size + 3) & ~3;
348 }
349
350 r300_attr.is_named_bo = GL_FALSE;
351 }
352 }
353
354 r300_attr.size = input->Size;
355 r300_attr.element = attr;
356 r300_attr.dst_loc = vbuf->num_attribs;
357
358 switch (type) {
359 case GL_FLOAT:
360 switch (input->Size) {
361 case 1: r300_attr.data_type = R300_DATA_TYPE_FLOAT_1; break;
362 case 2: r300_attr.data_type = R300_DATA_TYPE_FLOAT_2; break;
363 case 3: r300_attr.data_type = R300_DATA_TYPE_FLOAT_3; break;
364 case 4: r300_attr.data_type = R300_DATA_TYPE_FLOAT_4; break;
365 }
366 r300_attr._signed = 0;
367 r300_attr.normalize = 0;
368 break;
369 case GL_SHORT:
370 r300_attr._signed = 1;
371 r300_attr.normalize = input->Normalized;
372 switch (input->Size) {
373 case 1:
374 case 2:
375 r300_attr.data_type = R300_DATA_TYPE_SHORT_2;
376 break;
377 case 3:
378 case 4:
379 r300_attr.data_type = R300_DATA_TYPE_SHORT_4;
380 break;
381 }
382 break;
383 case GL_BYTE:
384 r300_attr._signed = 1;
385 r300_attr.normalize = input->Normalized;
386 r300_attr.data_type = R300_DATA_TYPE_BYTE;
387 break;
388 case GL_UNSIGNED_SHORT:
389 r300_attr._signed = 0;
390 r300_attr.normalize = input->Normalized;
391 switch (input->Size) {
392 case 1:
393 case 2:
394 r300_attr.data_type = R300_DATA_TYPE_SHORT_2;
395 break;
396 case 3:
397 case 4:
398 r300_attr.data_type = R300_DATA_TYPE_SHORT_4;
399 break;
400 }
401 break;
402 case GL_UNSIGNED_BYTE:
403 r300_attr._signed = 0;
404 r300_attr.normalize = input->Normalized;
405 if (input->Format == GL_BGRA)
406 r300_attr.data_type = R300_DATA_TYPE_D3DCOLOR;
407 else
408 r300_attr.data_type = R300_DATA_TYPE_BYTE;
409 break;
410
411 default:
412 case GL_DOUBLE:
413 case GL_INT:
414 case GL_UNSIGNED_INT:
415 assert(0);
416 break;
417 }
418
419 switch (input->Size) {
420 case 4:
421 r300_attr.swizzle = SWIZZLE_XYZW;
422 break;
423 case 3:
424 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_ONE);
425 break;
426 case 2:
427 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_ZERO, SWIZZLE_ONE);
428 break;
429 case 1:
430 r300_attr.swizzle = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_ZERO, SWIZZLE_ZERO, SWIZZLE_ONE);
431 break;
432 }
433
434 r300_attr.write_mask = MASK_XYZW;
435
436 vbuf->attribs[vbuf->num_attribs] = r300_attr;
437 ++vbuf->num_attribs;
438 }
439
440 static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *arrays[], int count)
441 {
442 r300ContextPtr r300 = R300_CONTEXT(ctx);
443 struct r300_vertex_buffer *vbuf = &r300->vbuf;
444 {
445 int i, tmp;
446
447 tmp = r300->selected_vp->code.InputsRead;
448 i = 0;
449 vbuf->num_attribs = 0;
450 while (tmp) {
451 /* find first enabled bit */
452 while (!(tmp & 1)) {
453 tmp >>= 1;
454 ++i;
455 }
456
457 r300TranslateAttrib(ctx, i, count, arrays[i]);
458
459 tmp >>= 1;
460 ++i;
461 }
462 }
463
464 r300SwitchFallback(ctx, R300_FALLBACK_AOS_LIMIT, vbuf->num_attribs > R300_MAX_AOS_ARRAYS);
465 if (r300->fallback)
466 return;
467 }
468
469 static void r300AllocDmaRegions(GLcontext *ctx, const struct gl_client_array *input[], int count)
470 {
471 r300ContextPtr r300 = R300_CONTEXT(ctx);
472 struct r300_vertex_buffer *vbuf = &r300->vbuf;
473 GLuint stride;
474 int ret;
475 int i, index;
476
477 for (index = 0; index < vbuf->num_attribs; index++) {
478 struct radeon_aos *aos = &r300->radeon.tcl.aos[index];
479 i = vbuf->attribs[index].element;
480
481 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
482
483 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
484 #if MESA_BIG_ENDIAN
485 getTypeSize(input[i]->Type) != 4 ||
486 #endif
487 stride < 4) {
488
489 r300ConvertAttrib(ctx, count, input[i], &vbuf->attribs[index]);
490 } else {
491 if (input[i]->BufferObj->Name) {
492 if (stride % 4 != 0) {
493 assert(((intptr_t) input[i]->Ptr) % input[i]->StrideB == 0);
494 r300AlignDataToDword(ctx, input[i], count, &vbuf->attribs[index]);
495 vbuf->attribs[index].is_named_bo = GL_FALSE;
496 } else {
497 vbuf->attribs[index].stride = input[i]->StrideB;
498 vbuf->attribs[index].bo_offset = (intptr_t) input[i]->Ptr;
499 vbuf->attribs[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
500 vbuf->attribs[index].is_named_bo = GL_TRUE;
501 }
502 } else {
503
504 int size;
505 int local_count = count;
506 uint32_t *dst;
507
508 if (input[i]->StrideB == 0) {
509 size = getTypeSize(input[i]->Type) * input[i]->Size;
510 local_count = 1;
511 } else {
512 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
513 }
514
515 radeonAllocDmaRegion(&r300->radeon, &vbuf->attribs[index].bo, &vbuf->attribs[index].bo_offset, size, 32);
516 assert(vbuf->attribs[index].bo->ptr != NULL);
517 dst = (uint32_t *)ADD_POINTERS(vbuf->attribs[index].bo->ptr, vbuf->attribs[index].bo_offset);
518 switch (vbuf->attribs[index].dwords) {
519 case 1: radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
520 case 2: radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
521 case 3: radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
522 case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
523 default: assert(0); break;
524 }
525
526 }
527 }
528
529 aos->count = vbuf->attribs[index].stride == 0 ? 1 : count;
530 aos->stride = vbuf->attribs[index].stride / sizeof(float);
531 aos->components = vbuf->attribs[index].dwords;
532 aos->bo = vbuf->attribs[index].bo;
533 aos->offset = vbuf->attribs[index].bo_offset;
534
535 if (vbuf->attribs[index].is_named_bo) {
536 radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs, r300->vbuf.attribs[index].bo, RADEON_GEM_DOMAIN_GTT, 0);
537 }
538 }
539
540 r300->radeon.tcl.aos_count = vbuf->num_attribs;
541 ret = radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs, first_elem(&r300->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
542 r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, ret);
543
544 }
545
546 static void r300FreeData(GLcontext *ctx)
547 {
548 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
549 * to prevent double unref in radeonReleaseArrays
550 * called during context destroy
551 */
552 r300ContextPtr r300 = R300_CONTEXT(ctx);
553 {
554 int i;
555
556 for (i = 0; i < r300->vbuf.num_attribs; i++) {
557 if (!r300->vbuf.attribs[i].is_named_bo) {
558 radeon_bo_unref(r300->vbuf.attribs[i].bo);
559 }
560 r300->radeon.tcl.aos[i].bo = NULL;
561 }
562 }
563
564 {
565 if (r300->ind_buf.bo != NULL) {
566 radeon_bo_unref(r300->ind_buf.bo);
567 }
568 }
569 }
570
571 static GLuint r300PredictTryDrawPrimsSize(GLcontext *ctx, GLuint nr_prims)
572 {
573 struct r300_context *r300 = R300_CONTEXT(ctx);
574 struct r300_vertex_buffer *vbuf = &r300->vbuf;
575 int flushed;
576 GLuint dwords;
577 GLuint state_size;
578
579 dwords = 2*CACHE_FLUSH_BUFSZ;
580 dwords += PRE_EMIT_STATE_BUFSZ;
581 dwords += (AOS_BUFSZ(vbuf->num_attribs)
582 + SCISSORS_BUFSZ
583 + FIREAOS_BUFSZ )*nr_prims;
584
585 state_size= radeonCountEmitSize(&r300->radeon);
586 flushed = rcommonEnsureCmdBufSpace(&r300->radeon,
587 dwords + state_size,
588 __FUNCTION__);
589 if (flushed)
590 dwords += radeonCountEmitSize(&r300->radeon);
591 else
592 dwords += state_size;
593
594 if (RADEON_DEBUG & DEBUG_PRIMS)
595 fprintf(stderr, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
596 return dwords;
597 }
598
599 static GLboolean r300TryDrawPrims(GLcontext *ctx,
600 const struct gl_client_array *arrays[],
601 const struct _mesa_prim *prim,
602 GLuint nr_prims,
603 const struct _mesa_index_buffer *ib,
604 GLuint min_index,
605 GLuint max_index )
606 {
607 struct r300_context *r300 = R300_CONTEXT(ctx);
608 GLuint i;
609
610 if (RADEON_DEBUG & DEBUG_PRIMS)
611 fprintf(stderr, "%s: %u (%d-%d) cs begin at %d\n",
612 __FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );
613
614 if (ctx->NewState)
615 _mesa_update_state( ctx );
616
617 if (r300->options.hw_tcl_enabled)
618 _tnl_UpdateFixedFunctionProgram(ctx);
619
620 r300UpdateShaders(r300);
621
622 r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, !r300ValidateBuffers(ctx));
623
624 rcommonEnsureCmdBufSpace(&r300->radeon,
625 r300->radeon.hw.max_state_size + (60*sizeof(int)),
626 __FUNCTION__);
627
628 r300SetVertexFormat(ctx, arrays, max_index + 1);
629
630 if (r300->fallback)
631 return GL_FALSE;
632
633 r300SetupVAP(ctx, r300->selected_vp->code.InputsRead, r300->selected_vp->code.OutputsWritten);
634
635 r300UpdateShaderStates(r300);
636
637 /* ensure we have the cmd buf space in advance to cover
638 * the state + DMA AOS pointers */
639 r300PredictTryDrawPrimsSize(ctx, nr_prims);
640
641 r300SetupIndexBuffer(ctx, ib);
642
643 r300AllocDmaRegions(ctx, arrays, max_index + 1);
644
645 if (r300->fallback)
646 return GL_FALSE;
647
648 r300EmitCacheFlush(r300);
649 radeonEmitState(&r300->radeon);
650
651 for (i = 0; i < nr_prims; ++i) {
652 r300RunRenderPrimitive(ctx, prim[i].start, prim[i].start + prim[i].count, prim[i].mode);
653 }
654
655 r300EmitCacheFlush(r300);
656
657 r300FreeData(ctx);
658
659 return GL_TRUE;
660 }
661
662 static void r300DrawPrims(GLcontext *ctx,
663 const struct gl_client_array *arrays[],
664 const struct _mesa_prim *prim,
665 GLuint nr_prims,
666 const struct _mesa_index_buffer *ib,
667 GLboolean index_bounds_valid,
668 GLuint min_index,
669 GLuint max_index)
670 {
671 GLboolean retval;
672
673 /* This check should get folded into just the places that
674 * min/max index are really needed.
675 */
676 if (!index_bounds_valid) {
677 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
678 }
679
680 if (min_index) {
681 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims );
682 return;
683 }
684
685 /* Make an attempt at drawing */
686 retval = r300TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
687
688 /* If failed run tnl pipeline - it should take care of fallbacks */
689 if (!retval)
690 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
691 }
692
693 void r300InitDraw(GLcontext *ctx)
694 {
695 struct vbo_context *vbo = vbo_context(ctx);
696
697 vbo->draw_prims = r300DrawPrims;
698 }