1 /**************************************************************************
3 * Copyright 2009 Maciej Cencora
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/enums.h"
33 #include "main/simple_list.h"
36 #include "r300_context.h"
37 #include "r300_emit.h"
38 #include "r300_render.h"
39 #include "r300_state.h"
41 #include "r300_cmdbuf.h"
43 #include "radeon_buffer_objects.h"
44 #include "radeon_common_context.h"
47 #include "tnl/t_vp_build.h"
48 #include "vbo/vbo_context.h"
51 static int getTypeSize(GLenum type
)
55 return sizeof(GLdouble
);
57 return sizeof(GLhalfARB
);
59 return sizeof(GLfloat
);
63 return sizeof(GLuint
);
65 return sizeof(GLshort
);
66 case GL_UNSIGNED_SHORT
:
67 return sizeof(GLushort
);
69 return sizeof(GLbyte
);
70 case GL_UNSIGNED_BYTE
:
71 return sizeof(GLubyte
);
78 static void r300FixupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
80 r300ContextPtr r300
= R300_CONTEXT(ctx
);
84 GLboolean mapped_named_bo
= GL_FALSE
;
86 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
87 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
88 mapped_named_bo
= GL_TRUE
;
89 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
91 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
93 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
94 "%s: Fixing index buffer format. type %d\n",
95 __func__
, mesa_ind_buf
->type
);
97 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
) {
98 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
99 GLubyte
*in
= (GLubyte
*)src_ptr
;
101 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
102 radeon_bo_map(r300
->ind_buf
.bo
, 1);
103 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
104 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
106 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
107 *out
++ = in
[i
] | in
[i
+ 1] << 16;
110 if (i
< mesa_ind_buf
->count
) {
113 radeon_bo_unmap(r300
->ind_buf
.bo
);
115 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
116 GLushort
*in
= (GLushort
*)src_ptr
;
117 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
119 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
,
120 &r300
->ind_buf
.bo_offset
, size
, 4);
122 radeon_bo_map(r300
->ind_buf
.bo
, 1);
123 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
124 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
126 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
127 *out
++ = in
[i
] | in
[i
+ 1] << 16;
130 if (i
< mesa_ind_buf
->count
) {
133 radeon_bo_unmap(r300
->ind_buf
.bo
);
137 r300
->ind_buf
.is_32bit
= GL_FALSE
;
138 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
140 if (mapped_named_bo
) {
141 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
146 static void r300SetupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
148 r300ContextPtr r300
= R300_CONTEXT(ctx
);
151 r300
->ind_buf
.bo
= NULL
;
154 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
157 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
) {
159 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
) {
161 const GLvoid
*src_ptr
;
163 GLboolean mapped_named_bo
= GL_FALSE
;
165 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
166 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
167 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
168 mapped_named_bo
= GL_TRUE
;
171 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
173 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
175 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
177 radeon_bo_map(r300
->ind_buf
.bo
, 1);
178 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
179 dst_ptr
= ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
180 memcpy(dst_ptr
, src_ptr
, size
);
182 radeon_bo_unmap(r300
->ind_buf
.bo
);
183 r300
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
184 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
186 if (mapped_named_bo
) {
187 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
190 r300FixupIndexBuffer(ctx
, mesa_ind_buf
);
194 #define CONVERT( TYPE, MACRO ) do { \
197 if (input->Normalized) { \
198 for (i = 0; i < count; i++) { \
199 const TYPE *in = (TYPE *)src_ptr; \
200 for (j = 0; j < sz; j++) { \
201 *dst_ptr++ = MACRO(*in); \
207 for (i = 0; i < count; i++) { \
208 const TYPE *in = (TYPE *)src_ptr; \
209 for (j = 0; j < sz; j++) { \
210 *dst_ptr++ = (GLfloat)(*in); \
219 * Convert attribute data type to float
220 * If the attribute uses named buffer object replace the bo with newly allocated bo
222 static void r300ConvertAttrib(GLcontext
*ctx
, int count
, const struct gl_client_array
*input
, struct vertex_attribute
*attr
)
224 r300ContextPtr r300
= R300_CONTEXT(ctx
);
225 const GLvoid
*src_ptr
;
226 GLboolean mapped_named_bo
= GL_FALSE
;
230 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
232 /* Convert value for first element only */
233 if (input
->StrideB
== 0)
236 if (input
->BufferObj
->Name
) {
237 if (!input
->BufferObj
->Pointer
) {
238 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
239 mapped_named_bo
= GL_TRUE
;
242 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
244 src_ptr
= input
->Ptr
;
247 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, sizeof(GLfloat
) * input
->Size
* count
, 32);
248 radeon_bo_map(attr
->bo
, 1);
249 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
251 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
252 "%s: Converting vertex attributes, attribute data format %x,"
253 "stride %d, components %d\n"
254 , __FUNCTION__
, input
->Type
255 , stride
, input
->Size
);
257 assert(src_ptr
!= NULL
);
259 switch (input
->Type
) {
261 CONVERT(GLdouble
, (GLfloat
));
263 case GL_UNSIGNED_INT
:
264 CONVERT(GLuint
, UINT_TO_FLOAT
);
267 CONVERT(GLint
, INT_TO_FLOAT
);
269 case GL_UNSIGNED_SHORT
:
270 CONVERT(GLushort
, USHORT_TO_FLOAT
);
273 CONVERT(GLshort
, SHORT_TO_FLOAT
);
275 case GL_UNSIGNED_BYTE
:
276 assert(input
->Format
!= GL_BGRA
);
277 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
280 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
287 radeon_bo_unmap(attr
->bo
);
288 if (mapped_named_bo
) {
289 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
293 static void r300AlignDataToDword(GLcontext
*ctx
, const struct gl_client_array
*input
, int count
, struct vertex_attribute
*attr
)
295 r300ContextPtr r300
= R300_CONTEXT(ctx
);
296 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
297 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
298 GLboolean mapped_named_bo
= GL_FALSE
;
300 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
302 radeon_bo_map(attr
->bo
, 1);
304 if (!input
->BufferObj
->Pointer
) {
305 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
306 mapped_named_bo
= GL_TRUE
;
309 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
, "%s. Vertex alignment doesn't match hw requirements.\n", __func__
);
312 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
313 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
316 for (i
= 0; i
< count
; ++i
) {
317 memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
318 src_ptr
+= input
->StrideB
;
319 dst_ptr
+= dst_stride
;
323 if (mapped_named_bo
) {
324 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
327 radeon_bo_unmap(attr
->bo
);
328 attr
->stride
= dst_stride
;
331 static void r300TranslateAttrib(GLcontext
*ctx
, GLuint attr
, int count
, const struct gl_client_array
*input
)
333 r300ContextPtr r300
= R300_CONTEXT(ctx
);
334 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
335 struct vertex_attribute r300_attr
= { 0 };
339 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
340 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
342 if (input
->Type
== GL_DOUBLE
|| input
->Type
== GL_UNSIGNED_INT
|| input
->Type
== GL_INT
||
344 getTypeSize(input
->Type
) != 4 ||
350 if (input
->StrideB
== 0) {
351 r300_attr
.stride
= 0;
353 r300_attr
.stride
= sizeof(GLfloat
) * input
->Size
;
355 r300_attr
.dwords
= input
->Size
;
356 r300_attr
.is_named_bo
= GL_FALSE
;
359 r300_attr
.dwords
= (getTypeSize(type
) * input
->Size
+ 3)/ 4;
360 if (!input
->BufferObj
->Name
) {
362 if (input
->StrideB
== 0) {
363 r300_attr
.stride
= 0;
365 r300_attr
.stride
= (getTypeSize(type
) * input
->Size
+ 3) & ~3;
368 r300_attr
.is_named_bo
= GL_FALSE
;
372 r300_attr
.size
= input
->Size
;
373 r300_attr
.element
= attr
;
374 r300_attr
.dst_loc
= vbuf
->num_attribs
;
378 switch (input
->Size
) {
379 case 1: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_1
; break;
380 case 2: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_2
; break;
381 case 3: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_3
; break;
382 case 4: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_4
; break;
384 r300_attr
._signed
= 0;
385 r300_attr
.normalize
= 0;
388 switch (input
->Size
) {
391 r300_attr
.data_type
= R300_DATA_TYPE_FLT16_2
;
395 r300_attr
.data_type
= R300_DATA_TYPE_FLT16_4
;
400 r300_attr
._signed
= 1;
401 r300_attr
.normalize
= input
->Normalized
;
402 switch (input
->Size
) {
405 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
409 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
414 r300_attr
._signed
= 1;
415 r300_attr
.normalize
= input
->Normalized
;
416 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
418 case GL_UNSIGNED_SHORT
:
419 r300_attr
._signed
= 0;
420 r300_attr
.normalize
= input
->Normalized
;
421 switch (input
->Size
) {
424 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
428 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
432 case GL_UNSIGNED_BYTE
:
433 r300_attr
._signed
= 0;
434 r300_attr
.normalize
= input
->Normalized
;
435 if (input
->Format
== GL_BGRA
)
436 r300_attr
.data_type
= R300_DATA_TYPE_D3DCOLOR
;
438 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
444 case GL_UNSIGNED_INT
:
449 switch (input
->Size
) {
451 r300_attr
.swizzle
= SWIZZLE_XYZW
;
454 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_ONE
);
457 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
460 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_ZERO
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
464 r300_attr
.write_mask
= MASK_XYZW
;
466 vbuf
->attribs
[vbuf
->num_attribs
] = r300_attr
;
470 static void r300SetVertexFormat(GLcontext
*ctx
, const struct gl_client_array
*arrays
[], int count
)
472 r300ContextPtr r300
= R300_CONTEXT(ctx
);
473 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
474 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
478 tmp
= r300
->selected_vp
->code
.InputsRead
;
480 vbuf
->num_attribs
= 0;
482 /* find first enabled bit */
488 r300TranslateAttrib(ctx
, i
, count
, arrays
[i
]);
495 r300SwitchFallback(ctx
, R300_FALLBACK_AOS_LIMIT
, vbuf
->num_attribs
> R300_MAX_AOS_ARRAYS
);
500 static void r300AllocDmaRegions(GLcontext
*ctx
, const struct gl_client_array
*input
[], int count
)
502 r300ContextPtr r300
= R300_CONTEXT(ctx
);
503 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
507 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
,
508 "%s: count %d num_attribs %d\n",
509 __func__
, count
, vbuf
->num_attribs
);
511 for (index
= 0; index
< vbuf
->num_attribs
; index
++) {
512 struct radeon_aos
*aos
= &r300
->radeon
.tcl
.aos
[index
];
513 i
= vbuf
->attribs
[index
].element
;
515 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
517 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
||
519 getTypeSize(input
[i
]->Type
) != 4 ||
523 r300ConvertAttrib(ctx
, count
, input
[i
], &vbuf
->attribs
[index
]);
525 if (input
[i
]->BufferObj
->Name
) {
526 if (stride
% 4 != 0) {
527 assert(((intptr_t) input
[i
]->Ptr
) % input
[i
]->StrideB
== 0);
528 r300AlignDataToDword(ctx
, input
[i
], count
, &vbuf
->attribs
[index
]);
529 vbuf
->attribs
[index
].is_named_bo
= GL_FALSE
;
531 vbuf
->attribs
[index
].stride
= input
[i
]->StrideB
;
532 vbuf
->attribs
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
533 vbuf
->attribs
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
534 vbuf
->attribs
[index
].is_named_bo
= GL_TRUE
;
539 int local_count
= count
;
542 if (input
[i
]->StrideB
== 0) {
543 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
546 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
549 radeonAllocDmaRegion(&r300
->radeon
, &vbuf
->attribs
[index
].bo
, &vbuf
->attribs
[index
].bo_offset
, size
, 32);
550 radeon_bo_map(vbuf
->attribs
[index
].bo
, 1);
551 assert(vbuf
->attribs
[index
].bo
->ptr
!= NULL
);
552 dst
= (uint32_t *)ADD_POINTERS(vbuf
->attribs
[index
].bo
->ptr
, vbuf
->attribs
[index
].bo_offset
);
553 switch (vbuf
->attribs
[index
].dwords
) {
554 case 1: radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
555 case 2: radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
556 case 3: radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
557 case 4: radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
558 default: assert(0); break;
560 radeon_bo_unmap(vbuf
->attribs
[index
].bo
);
565 aos
->count
= vbuf
->attribs
[index
].stride
== 0 ? 1 : count
;
566 aos
->stride
= vbuf
->attribs
[index
].stride
/ sizeof(float);
567 aos
->components
= vbuf
->attribs
[index
].dwords
;
568 aos
->bo
= vbuf
->attribs
[index
].bo
;
569 aos
->offset
= vbuf
->attribs
[index
].bo_offset
;
571 if (vbuf
->attribs
[index
].is_named_bo
) {
572 radeon_cs_space_add_persistent_bo(r300
->radeon
.cmdbuf
.cs
, r300
->vbuf
.attribs
[index
].bo
, RADEON_GEM_DOMAIN_GTT
, 0);
576 r300
->radeon
.tcl
.aos_count
= vbuf
->num_attribs
;
577 ret
= radeon_cs_space_check_with_bo(r300
->radeon
.cmdbuf
.cs
, first_elem(&r300
->radeon
.dma
.reserved
)->bo
, RADEON_GEM_DOMAIN_GTT
, 0);
578 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, ret
);
582 static void r300FreeData(GLcontext
*ctx
)
584 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
585 * to prevent double unref in radeonReleaseArrays
586 * called during context destroy
588 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
589 r300ContextPtr r300
= R300_CONTEXT(ctx
);
593 for (i
= 0; i
< r300
->vbuf
.num_attribs
; i
++) {
594 if (!r300
->vbuf
.attribs
[i
].is_named_bo
) {
595 radeon_bo_unref(r300
->vbuf
.attribs
[i
].bo
);
597 r300
->radeon
.tcl
.aos
[i
].bo
= NULL
;
602 if (r300
->ind_buf
.bo
!= NULL
) {
603 radeon_bo_unref(r300
->ind_buf
.bo
);
608 static GLuint
r300PredictTryDrawPrimsSize(GLcontext
*ctx
,
609 GLuint nr_prims
, const struct _mesa_prim
*prim
)
611 struct r300_context
*r300
= R300_CONTEXT(ctx
);
612 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
617 GLuint extra_prims
= 0;
619 /* Check for primitive splitting. */
620 for (i
= 0; i
< nr_prims
; ++i
) {
621 const GLuint num_verts
= r300NumVerts(r300
, prim
[i
].count
, prim
[i
].mode
);
622 extra_prims
+= num_verts
/(65535 - 32);
624 nr_prims
+= extra_prims
;
626 dwords
= 2*CACHE_FLUSH_BUFSZ
;
627 dwords
+= PRE_EMIT_STATE_BUFSZ
;
628 dwords
+= (AOS_BUFSZ(vbuf
->num_attribs
)
630 + FIREAOS_BUFSZ
)*nr_prims
;
632 state_size
= radeonCountStateEmitSize(&r300
->radeon
);
633 flushed
= rcommonEnsureCmdBufSpace(&r300
->radeon
,
637 dwords
+= radeonCountStateEmitSize(&r300
->radeon
);
639 dwords
+= state_size
;
641 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
645 static GLboolean
r300TryDrawPrims(GLcontext
*ctx
,
646 const struct gl_client_array
*arrays
[],
647 const struct _mesa_prim
*prim
,
649 const struct _mesa_index_buffer
*ib
,
653 struct r300_context
*r300
= R300_CONTEXT(ctx
);
656 radeon_print(RADEON_RENDER
, RADEON_NORMAL
, "%s: %u (%d-%d) cs begin at %d\n",
657 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
660 _mesa_update_state( ctx
);
662 if (r300
->options
.hw_tcl_enabled
)
663 _tnl_UpdateFixedFunctionProgram(ctx
);
665 r300UpdateShaders(r300
);
667 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, !r300ValidateBuffers(ctx
));
669 r300SetVertexFormat(ctx
, arrays
, max_index
+ 1);
674 r300SetupVAP(ctx
, r300
->selected_vp
->code
.InputsRead
, r300
->selected_vp
->code
.OutputsWritten
);
676 r300UpdateShaderStates(r300
);
678 /* ensure we have the cmd buf space in advance to cover
679 * the state + DMA AOS pointers */
680 GLuint emit_end
= r300PredictTryDrawPrimsSize(ctx
, nr_prims
, prim
)
681 + r300
->radeon
.cmdbuf
.cs
->cdw
;
683 r300SetupIndexBuffer(ctx
, ib
);
685 r300AllocDmaRegions(ctx
, arrays
, max_index
+ 1);
690 r300EmitCacheFlush(r300
);
691 radeonEmitState(&r300
->radeon
);
693 for (i
= 0; i
< nr_prims
; ++i
) {
694 r300RunRenderPrimitive(ctx
, prim
[i
].start
, prim
[i
].start
+ prim
[i
].count
, prim
[i
].mode
);
697 r300EmitCacheFlush(r300
);
701 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: %u (%d-%d) cs ending at %d\n",
702 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
704 if (emit_end
< r300
->radeon
.cmdbuf
.cs
->cdw
)
705 WARN_ONCE("Rendering was %d commands larger than predicted size."
706 " We might overflow command buffer.\n", r300
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
711 static void r300DrawPrims(GLcontext
*ctx
,
712 const struct gl_client_array
*arrays
[],
713 const struct _mesa_prim
*prim
,
715 const struct _mesa_index_buffer
*ib
,
716 GLboolean index_bounds_valid
,
722 /* This check should get folded into just the places that
723 * min/max index are really needed.
725 if (!index_bounds_valid
) {
726 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
730 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
731 "%s: Rebasing primitives. %p nr_prims %d min_index %u max_index %u\n",
732 __func__
, prim
, nr_prims
, min_index
, max_index
);
733 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r300DrawPrims
);
737 /* Make an attempt at drawing */
738 retval
= r300TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
740 /* If failed run tnl pipeline - it should take care of fallbacks */
742 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
745 void r300InitDraw(GLcontext
*ctx
)
747 struct vbo_context
*vbo
= vbo_context(ctx
);
749 vbo
->draw_prims
= r300DrawPrims
;