1 /**************************************************************************
3 * Copyright 2009 Maciej Cencora
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/api_validate.h"
33 #include "main/enums.h"
34 #include "main/simple_list.h"
37 #include "r300_context.h"
38 #include "r300_emit.h"
39 #include "r300_render.h"
40 #include "r300_state.h"
42 #include "r300_cmdbuf.h"
44 #include "radeon_buffer_objects.h"
45 #include "radeon_common_context.h"
48 #include "tnl/t_vp_build.h"
49 #include "vbo/vbo_context.h"
50 #include "swrast/swrast.h"
51 #include "swrast_setup/swrast_setup.h"
54 static int getTypeSize(GLenum type
)
58 return sizeof(GLdouble
);
60 return sizeof(GLfloat
);
64 return sizeof(GLuint
);
66 return sizeof(GLshort
);
67 case GL_UNSIGNED_SHORT
:
68 return sizeof(GLushort
);
70 return sizeof(GLbyte
);
71 case GL_UNSIGNED_BYTE
:
72 return sizeof(GLubyte
);
79 static void r300FixupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
81 r300ContextPtr r300
= R300_CONTEXT(ctx
);
85 GLboolean mapped_named_bo
= GL_FALSE
;
87 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
88 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
89 mapped_named_bo
= GL_TRUE
;
90 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
92 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
94 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
95 "%s: Fixing index buffer format. type %d\n",
96 __func__
, mesa_ind_buf
->type
);
98 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
) {
99 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
100 GLubyte
*in
= (GLubyte
*)src_ptr
;
102 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
103 radeon_bo_map(r300
->ind_buf
.bo
, 1);
104 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
105 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
107 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
108 *out
++ = in
[i
] | in
[i
+ 1] << 16;
111 if (i
< mesa_ind_buf
->count
) {
114 radeon_bo_unmap(r300
->ind_buf
.bo
);
116 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
117 GLushort
*in
= (GLushort
*)src_ptr
;
118 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
120 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
,
121 &r300
->ind_buf
.bo_offset
, size
, 4);
123 radeon_bo_map(r300
->ind_buf
.bo
, 1);
124 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
125 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
127 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
128 *out
++ = in
[i
] | in
[i
+ 1] << 16;
131 if (i
< mesa_ind_buf
->count
) {
134 radeon_bo_unmap(r300
->ind_buf
.bo
);
138 r300
->ind_buf
.is_32bit
= GL_FALSE
;
139 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
141 if (mapped_named_bo
) {
142 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
147 static void r300SetupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
149 r300ContextPtr r300
= R300_CONTEXT(ctx
);
152 r300
->ind_buf
.bo
= NULL
;
155 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
158 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
) {
160 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
) {
162 const GLvoid
*src_ptr
;
164 GLboolean mapped_named_bo
= GL_FALSE
;
166 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
167 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
168 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
169 mapped_named_bo
= GL_TRUE
;
172 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
174 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
176 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
178 radeon_bo_map(r300
->ind_buf
.bo
, 1);
179 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
180 dst_ptr
= ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
181 _mesa_memcpy(dst_ptr
, src_ptr
, size
);
183 radeon_bo_unmap(r300
->ind_buf
.bo
);
184 r300
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
185 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
187 if (mapped_named_bo
) {
188 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
191 r300FixupIndexBuffer(ctx
, mesa_ind_buf
);
195 #define CONVERT( TYPE, MACRO ) do { \
198 if (input->Normalized) { \
199 for (i = 0; i < count; i++) { \
200 const TYPE *in = (TYPE *)src_ptr; \
201 for (j = 0; j < sz; j++) { \
202 *dst_ptr++ = MACRO(*in); \
208 for (i = 0; i < count; i++) { \
209 const TYPE *in = (TYPE *)src_ptr; \
210 for (j = 0; j < sz; j++) { \
211 *dst_ptr++ = (GLfloat)(*in); \
220 * Convert attribute data type to float
221 * If the attribute uses named buffer object replace the bo with newly allocated bo
223 static void r300ConvertAttrib(GLcontext
*ctx
, int count
, const struct gl_client_array
*input
, struct vertex_attribute
*attr
)
225 r300ContextPtr r300
= R300_CONTEXT(ctx
);
226 const GLvoid
*src_ptr
;
227 GLboolean mapped_named_bo
= GL_FALSE
;
231 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
233 /* Convert value for first element only */
234 if (input
->StrideB
== 0)
237 if (input
->BufferObj
->Name
) {
238 if (!input
->BufferObj
->Pointer
) {
239 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
240 mapped_named_bo
= GL_TRUE
;
243 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
245 src_ptr
= input
->Ptr
;
248 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, sizeof(GLfloat
) * input
->Size
* count
, 32);
249 radeon_bo_map(attr
->bo
, 1);
250 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
252 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
253 "%s: Converting vertex attributes, attribute data format %x,"
254 "stride %d, components %d\n"
255 , __FUNCTION__
, input
->Type
256 , stride
, input
->Size
);
258 assert(src_ptr
!= NULL
);
260 switch (input
->Type
) {
262 CONVERT(GLdouble
, (GLfloat
));
264 case GL_UNSIGNED_INT
:
265 CONVERT(GLuint
, UINT_TO_FLOAT
);
268 CONVERT(GLint
, INT_TO_FLOAT
);
270 case GL_UNSIGNED_SHORT
:
271 CONVERT(GLushort
, USHORT_TO_FLOAT
);
274 CONVERT(GLshort
, SHORT_TO_FLOAT
);
276 case GL_UNSIGNED_BYTE
:
277 assert(input
->Format
!= GL_BGRA
);
278 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
281 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
288 radeon_bo_unmap(attr
->bo
);
289 if (mapped_named_bo
) {
290 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
294 static void r300AlignDataToDword(GLcontext
*ctx
, const struct gl_client_array
*input
, int count
, struct vertex_attribute
*attr
)
296 r300ContextPtr r300
= R300_CONTEXT(ctx
);
297 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
298 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
299 GLboolean mapped_named_bo
= GL_FALSE
;
301 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
303 radeon_bo_map(attr
->bo
, 1);
305 if (!input
->BufferObj
->Pointer
) {
306 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
307 mapped_named_bo
= GL_TRUE
;
310 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
, "%s. Vertex alignment doesn't match hw requirements.\n", __func__
);
313 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
314 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
317 for (i
= 0; i
< count
; ++i
) {
318 _mesa_memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
319 src_ptr
+= input
->StrideB
;
320 dst_ptr
+= dst_stride
;
324 if (mapped_named_bo
) {
325 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
328 radeon_bo_unmap(attr
->bo
);
329 attr
->stride
= dst_stride
;
332 static void r300TranslateAttrib(GLcontext
*ctx
, GLuint attr
, int count
, const struct gl_client_array
*input
)
334 r300ContextPtr r300
= R300_CONTEXT(ctx
);
335 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
336 struct vertex_attribute r300_attr
;
340 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
341 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
343 if (input
->Type
== GL_DOUBLE
|| input
->Type
== GL_UNSIGNED_INT
|| input
->Type
== GL_INT
||
345 getTypeSize(input
->Type
) != 4 ||
351 if (input
->StrideB
== 0) {
352 r300_attr
.stride
= 0;
354 r300_attr
.stride
= sizeof(GLfloat
) * input
->Size
;
356 r300_attr
.dwords
= input
->Size
;
357 r300_attr
.is_named_bo
= GL_FALSE
;
360 r300_attr
.dwords
= (getTypeSize(type
) * input
->Size
+ 3)/ 4;
361 if (!input
->BufferObj
->Name
) {
363 if (input
->StrideB
== 0) {
364 r300_attr
.stride
= 0;
366 r300_attr
.stride
= (getTypeSize(type
) * input
->Size
+ 3) & ~3;
369 r300_attr
.is_named_bo
= GL_FALSE
;
373 r300_attr
.size
= input
->Size
;
374 r300_attr
.element
= attr
;
375 r300_attr
.dst_loc
= vbuf
->num_attribs
;
379 switch (input
->Size
) {
380 case 1: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_1
; break;
381 case 2: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_2
; break;
382 case 3: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_3
; break;
383 case 4: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_4
; break;
385 r300_attr
._signed
= 0;
386 r300_attr
.normalize
= 0;
389 r300_attr
._signed
= 1;
390 r300_attr
.normalize
= input
->Normalized
;
391 switch (input
->Size
) {
394 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
398 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
403 r300_attr
._signed
= 1;
404 r300_attr
.normalize
= input
->Normalized
;
405 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
407 case GL_UNSIGNED_SHORT
:
408 r300_attr
._signed
= 0;
409 r300_attr
.normalize
= input
->Normalized
;
410 switch (input
->Size
) {
413 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
417 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
421 case GL_UNSIGNED_BYTE
:
422 r300_attr
._signed
= 0;
423 r300_attr
.normalize
= input
->Normalized
;
424 if (input
->Format
== GL_BGRA
)
425 r300_attr
.data_type
= R300_DATA_TYPE_D3DCOLOR
;
427 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
433 case GL_UNSIGNED_INT
:
438 switch (input
->Size
) {
440 r300_attr
.swizzle
= SWIZZLE_XYZW
;
443 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_ONE
);
446 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
449 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_ZERO
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
453 r300_attr
.write_mask
= MASK_XYZW
;
455 vbuf
->attribs
[vbuf
->num_attribs
] = r300_attr
;
459 static void r300SetVertexFormat(GLcontext
*ctx
, const struct gl_client_array
*arrays
[], int count
)
461 r300ContextPtr r300
= R300_CONTEXT(ctx
);
462 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
463 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
467 tmp
= r300
->selected_vp
->code
.InputsRead
;
469 vbuf
->num_attribs
= 0;
471 /* find first enabled bit */
477 r300TranslateAttrib(ctx
, i
, count
, arrays
[i
]);
484 r300SwitchFallback(ctx
, R300_FALLBACK_AOS_LIMIT
, vbuf
->num_attribs
> R300_MAX_AOS_ARRAYS
);
489 static void r300AllocDmaRegions(GLcontext
*ctx
, const struct gl_client_array
*input
[], int count
)
491 r300ContextPtr r300
= R300_CONTEXT(ctx
);
492 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
496 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
,
497 "%s: count %d num_attribs %d\n",
498 __func__
, count
, vbuf
->num_attribs
);
500 for (index
= 0; index
< vbuf
->num_attribs
; index
++) {
501 struct radeon_aos
*aos
= &r300
->radeon
.tcl
.aos
[index
];
502 i
= vbuf
->attribs
[index
].element
;
504 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
506 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
||
508 getTypeSize(input
[i
]->Type
) != 4 ||
512 r300ConvertAttrib(ctx
, count
, input
[i
], &vbuf
->attribs
[index
]);
514 if (input
[i
]->BufferObj
->Name
) {
515 if (stride
% 4 != 0) {
516 assert(((intptr_t) input
[i
]->Ptr
) % input
[i
]->StrideB
== 0);
517 r300AlignDataToDword(ctx
, input
[i
], count
, &vbuf
->attribs
[index
]);
518 vbuf
->attribs
[index
].is_named_bo
= GL_FALSE
;
520 vbuf
->attribs
[index
].stride
= input
[i
]->StrideB
;
521 vbuf
->attribs
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
522 vbuf
->attribs
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
523 vbuf
->attribs
[index
].is_named_bo
= GL_TRUE
;
528 int local_count
= count
;
531 if (input
[i
]->StrideB
== 0) {
532 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
535 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
538 radeonAllocDmaRegion(&r300
->radeon
, &vbuf
->attribs
[index
].bo
, &vbuf
->attribs
[index
].bo_offset
, size
, 32);
539 radeon_bo_map(vbuf
->attribs
[index
].bo
, 1);
540 assert(vbuf
->attribs
[index
].bo
->ptr
!= NULL
);
541 dst
= (uint32_t *)ADD_POINTERS(vbuf
->attribs
[index
].bo
->ptr
, vbuf
->attribs
[index
].bo_offset
);
542 switch (vbuf
->attribs
[index
].dwords
) {
543 case 1: radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
544 case 2: radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
545 case 3: radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
546 case 4: radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
547 default: assert(0); break;
549 radeon_bo_unmap(vbuf
->attribs
[index
].bo
);
554 aos
->count
= vbuf
->attribs
[index
].stride
== 0 ? 1 : count
;
555 aos
->stride
= vbuf
->attribs
[index
].stride
/ sizeof(float);
556 aos
->components
= vbuf
->attribs
[index
].dwords
;
557 aos
->bo
= vbuf
->attribs
[index
].bo
;
558 aos
->offset
= vbuf
->attribs
[index
].bo_offset
;
560 if (vbuf
->attribs
[index
].is_named_bo
) {
561 radeon_cs_space_add_persistent_bo(r300
->radeon
.cmdbuf
.cs
, r300
->vbuf
.attribs
[index
].bo
, RADEON_GEM_DOMAIN_GTT
, 0);
565 r300
->radeon
.tcl
.aos_count
= vbuf
->num_attribs
;
566 ret
= radeon_cs_space_check_with_bo(r300
->radeon
.cmdbuf
.cs
, first_elem(&r300
->radeon
.dma
.reserved
)->bo
, RADEON_GEM_DOMAIN_GTT
, 0);
567 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, ret
);
571 static void r300FreeData(GLcontext
*ctx
)
573 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
574 * to prevent double unref in radeonReleaseArrays
575 * called during context destroy
577 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
578 r300ContextPtr r300
= R300_CONTEXT(ctx
);
582 for (i
= 0; i
< r300
->vbuf
.num_attribs
; i
++) {
583 if (!r300
->vbuf
.attribs
[i
].is_named_bo
) {
584 radeon_bo_unref(r300
->vbuf
.attribs
[i
].bo
);
586 r300
->radeon
.tcl
.aos
[i
].bo
= NULL
;
591 if (r300
->ind_buf
.bo
!= NULL
) {
592 radeon_bo_unref(r300
->ind_buf
.bo
);
597 static GLuint
r300PredictTryDrawPrimsSize(GLcontext
*ctx
, GLuint nr_prims
)
599 struct r300_context
*r300
= R300_CONTEXT(ctx
);
600 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
605 dwords
= 2*CACHE_FLUSH_BUFSZ
;
606 dwords
+= PRE_EMIT_STATE_BUFSZ
;
607 dwords
+= (AOS_BUFSZ(vbuf
->num_attribs
)
609 + FIREAOS_BUFSZ
)*nr_prims
;
611 state_size
= radeonCountStateEmitSize(&r300
->radeon
);
612 flushed
= rcommonEnsureCmdBufSpace(&r300
->radeon
,
616 dwords
+= radeonCountStateEmitSize(&r300
->radeon
);
618 dwords
+= state_size
;
620 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
624 static GLboolean
r300TryDrawPrims(GLcontext
*ctx
,
625 const struct gl_client_array
*arrays
[],
626 const struct _mesa_prim
*prim
,
628 const struct _mesa_index_buffer
*ib
,
632 struct r300_context
*r300
= R300_CONTEXT(ctx
);
635 radeon_print(RADEON_RENDER
, RADEON_NORMAL
, "%s: %u (%d-%d) cs begin at %d\n",
636 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
639 _mesa_update_state( ctx
);
641 if (r300
->options
.hw_tcl_enabled
)
642 _tnl_UpdateFixedFunctionProgram(ctx
);
644 r300UpdateShaders(r300
);
646 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, !r300ValidateBuffers(ctx
));
648 r300SetVertexFormat(ctx
, arrays
, max_index
+ 1);
653 r300SetupVAP(ctx
, r300
->selected_vp
->code
.InputsRead
, r300
->selected_vp
->code
.OutputsWritten
);
655 r300UpdateShaderStates(r300
);
657 /* ensure we have the cmd buf space in advance to cover
658 * the state + DMA AOS pointers */
659 GLuint emit_end
= r300PredictTryDrawPrimsSize(ctx
, nr_prims
)
660 + r300
->radeon
.cmdbuf
.cs
->cdw
;
662 r300SetupIndexBuffer(ctx
, ib
);
664 r300AllocDmaRegions(ctx
, arrays
, max_index
+ 1);
669 r300EmitCacheFlush(r300
);
670 radeonEmitState(&r300
->radeon
);
672 for (i
= 0; i
< nr_prims
; ++i
) {
673 r300RunRenderPrimitive(ctx
, prim
[i
].start
, prim
[i
].start
+ prim
[i
].count
, prim
[i
].mode
);
676 r300EmitCacheFlush(r300
);
680 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: %u (%d-%d) cs ending at %d\n",
681 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
683 if (emit_end
< r300
->radeon
.cmdbuf
.cs
->cdw
)
684 WARN_ONCE("Rendering was %d commands larger than predicted size."
685 " We might overflow command buffer.\n", r300
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
690 static void r300DrawPrims(GLcontext
*ctx
,
691 const struct gl_client_array
*arrays
[],
692 const struct _mesa_prim
*prim
,
694 const struct _mesa_index_buffer
*ib
,
695 GLboolean index_bounds_valid
,
701 /* This check should get folded into just the places that
702 * min/max index are really needed.
704 if (!index_bounds_valid
) {
705 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
709 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
710 "%s: Rebasing primitives. %p nr_prims %d min_index %u max_index %u\n",
711 __func__
, prim
, nr_prims
, min_index
, max_index
);
712 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r300DrawPrims
);
716 /* Make an attempt at drawing */
717 retval
= r300TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
719 /* If failed run tnl pipeline - it should take care of fallbacks */
721 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
724 void r300InitDraw(GLcontext
*ctx
)
726 struct vbo_context
*vbo
= vbo_context(ctx
);
728 vbo
->draw_prims
= r300DrawPrims
;