1 /**************************************************************************
3 * Copyright 2009 Maciej Cencora
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/api_validate.h"
33 #include "main/enums.h"
34 #include "main/simple_list.h"
37 #include "r300_context.h"
38 #include "r300_emit.h"
39 #include "r300_render.h"
40 #include "r300_state.h"
42 #include "r300_cmdbuf.h"
44 #include "radeon_buffer_objects.h"
45 #include "radeon_common_context.h"
48 #include "tnl/t_vp_build.h"
49 #include "vbo/vbo_context.h"
50 #include "swrast/swrast.h"
51 #include "swrast_setup/swrast_setup.h"
54 static int getTypeSize(GLenum type
)
58 return sizeof(GLdouble
);
60 return sizeof(GLhalfARB
);
62 return sizeof(GLfloat
);
66 return sizeof(GLuint
);
68 return sizeof(GLshort
);
69 case GL_UNSIGNED_SHORT
:
70 return sizeof(GLushort
);
72 return sizeof(GLbyte
);
73 case GL_UNSIGNED_BYTE
:
74 return sizeof(GLubyte
);
81 static void r300FixupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
83 r300ContextPtr r300
= R300_CONTEXT(ctx
);
87 GLboolean mapped_named_bo
= GL_FALSE
;
89 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
90 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
91 mapped_named_bo
= GL_TRUE
;
92 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
94 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
96 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
97 "%s: Fixing index buffer format. type %d\n",
98 __func__
, mesa_ind_buf
->type
);
100 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
) {
101 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
102 GLubyte
*in
= (GLubyte
*)src_ptr
;
104 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
105 radeon_bo_map(r300
->ind_buf
.bo
, 1);
106 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
107 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
109 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
110 *out
++ = in
[i
] | in
[i
+ 1] << 16;
113 if (i
< mesa_ind_buf
->count
) {
116 radeon_bo_unmap(r300
->ind_buf
.bo
);
118 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
119 GLushort
*in
= (GLushort
*)src_ptr
;
120 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
122 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
,
123 &r300
->ind_buf
.bo_offset
, size
, 4);
125 radeon_bo_map(r300
->ind_buf
.bo
, 1);
126 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
127 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
129 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
130 *out
++ = in
[i
] | in
[i
+ 1] << 16;
133 if (i
< mesa_ind_buf
->count
) {
136 radeon_bo_unmap(r300
->ind_buf
.bo
);
140 r300
->ind_buf
.is_32bit
= GL_FALSE
;
141 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
143 if (mapped_named_bo
) {
144 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
149 static void r300SetupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
151 r300ContextPtr r300
= R300_CONTEXT(ctx
);
154 r300
->ind_buf
.bo
= NULL
;
157 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
160 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
) {
162 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
) {
164 const GLvoid
*src_ptr
;
166 GLboolean mapped_named_bo
= GL_FALSE
;
168 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
169 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
170 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
171 mapped_named_bo
= GL_TRUE
;
174 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
176 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
178 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
180 radeon_bo_map(r300
->ind_buf
.bo
, 1);
181 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
182 dst_ptr
= ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
183 _mesa_memcpy(dst_ptr
, src_ptr
, size
);
185 radeon_bo_unmap(r300
->ind_buf
.bo
);
186 r300
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
187 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
189 if (mapped_named_bo
) {
190 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
193 r300FixupIndexBuffer(ctx
, mesa_ind_buf
);
197 #define CONVERT( TYPE, MACRO ) do { \
200 if (input->Normalized) { \
201 for (i = 0; i < count; i++) { \
202 const TYPE *in = (TYPE *)src_ptr; \
203 for (j = 0; j < sz; j++) { \
204 *dst_ptr++ = MACRO(*in); \
210 for (i = 0; i < count; i++) { \
211 const TYPE *in = (TYPE *)src_ptr; \
212 for (j = 0; j < sz; j++) { \
213 *dst_ptr++ = (GLfloat)(*in); \
222 * Convert attribute data type to float
223 * If the attribute uses named buffer object replace the bo with newly allocated bo
225 static void r300ConvertAttrib(GLcontext
*ctx
, int count
, const struct gl_client_array
*input
, struct vertex_attribute
*attr
)
227 r300ContextPtr r300
= R300_CONTEXT(ctx
);
228 const GLvoid
*src_ptr
;
229 GLboolean mapped_named_bo
= GL_FALSE
;
233 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
235 /* Convert value for first element only */
236 if (input
->StrideB
== 0)
239 if (input
->BufferObj
->Name
) {
240 if (!input
->BufferObj
->Pointer
) {
241 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
242 mapped_named_bo
= GL_TRUE
;
245 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
247 src_ptr
= input
->Ptr
;
250 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, sizeof(GLfloat
) * input
->Size
* count
, 32);
251 radeon_bo_map(attr
->bo
, 1);
252 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
254 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
255 "%s: Converting vertex attributes, attribute data format %x,"
256 "stride %d, components %d\n"
257 , __FUNCTION__
, input
->Type
258 , stride
, input
->Size
);
260 assert(src_ptr
!= NULL
);
262 switch (input
->Type
) {
264 CONVERT(GLdouble
, (GLfloat
));
266 case GL_UNSIGNED_INT
:
267 CONVERT(GLuint
, UINT_TO_FLOAT
);
270 CONVERT(GLint
, INT_TO_FLOAT
);
272 case GL_UNSIGNED_SHORT
:
273 CONVERT(GLushort
, USHORT_TO_FLOAT
);
276 CONVERT(GLshort
, SHORT_TO_FLOAT
);
278 case GL_UNSIGNED_BYTE
:
279 assert(input
->Format
!= GL_BGRA
);
280 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
283 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
290 radeon_bo_unmap(attr
->bo
);
291 if (mapped_named_bo
) {
292 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
296 static void r300AlignDataToDword(GLcontext
*ctx
, const struct gl_client_array
*input
, int count
, struct vertex_attribute
*attr
)
298 r300ContextPtr r300
= R300_CONTEXT(ctx
);
299 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
300 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
301 GLboolean mapped_named_bo
= GL_FALSE
;
303 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
305 radeon_bo_map(attr
->bo
, 1);
307 if (!input
->BufferObj
->Pointer
) {
308 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
309 mapped_named_bo
= GL_TRUE
;
312 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
, "%s. Vertex alignment doesn't match hw requirements.\n", __func__
);
315 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
316 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
319 for (i
= 0; i
< count
; ++i
) {
320 _mesa_memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
321 src_ptr
+= input
->StrideB
;
322 dst_ptr
+= dst_stride
;
326 if (mapped_named_bo
) {
327 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
330 radeon_bo_unmap(attr
->bo
);
331 attr
->stride
= dst_stride
;
334 static void r300TranslateAttrib(GLcontext
*ctx
, GLuint attr
, int count
, const struct gl_client_array
*input
)
336 r300ContextPtr r300
= R300_CONTEXT(ctx
);
337 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
338 struct vertex_attribute r300_attr
;
342 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
343 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
345 if (input
->Type
== GL_DOUBLE
|| input
->Type
== GL_UNSIGNED_INT
|| input
->Type
== GL_INT
||
347 getTypeSize(input
->Type
) != 4 ||
353 if (input
->StrideB
== 0) {
354 r300_attr
.stride
= 0;
356 r300_attr
.stride
= sizeof(GLfloat
) * input
->Size
;
358 r300_attr
.dwords
= input
->Size
;
359 r300_attr
.is_named_bo
= GL_FALSE
;
362 r300_attr
.dwords
= (getTypeSize(type
) * input
->Size
+ 3)/ 4;
363 if (!input
->BufferObj
->Name
) {
365 if (input
->StrideB
== 0) {
366 r300_attr
.stride
= 0;
368 r300_attr
.stride
= (getTypeSize(type
) * input
->Size
+ 3) & ~3;
371 r300_attr
.is_named_bo
= GL_FALSE
;
375 r300_attr
.size
= input
->Size
;
376 r300_attr
.element
= attr
;
377 r300_attr
.dst_loc
= vbuf
->num_attribs
;
381 switch (input
->Size
) {
382 case 1: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_1
; break;
383 case 2: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_2
; break;
384 case 3: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_3
; break;
385 case 4: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_4
; break;
387 r300_attr
._signed
= 0;
388 r300_attr
.normalize
= 0;
391 switch (input
->Size
) {
394 r300_attr
.data_type
= R300_DATA_TYPE_FLT16_2
;
398 r300_attr
.data_type
= R300_DATA_TYPE_FLT16_4
;
403 r300_attr
._signed
= 1;
404 r300_attr
.normalize
= input
->Normalized
;
405 switch (input
->Size
) {
408 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
412 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
417 r300_attr
._signed
= 1;
418 r300_attr
.normalize
= input
->Normalized
;
419 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
421 case GL_UNSIGNED_SHORT
:
422 r300_attr
._signed
= 0;
423 r300_attr
.normalize
= input
->Normalized
;
424 switch (input
->Size
) {
427 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
431 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
435 case GL_UNSIGNED_BYTE
:
436 r300_attr
._signed
= 0;
437 r300_attr
.normalize
= input
->Normalized
;
438 if (input
->Format
== GL_BGRA
)
439 r300_attr
.data_type
= R300_DATA_TYPE_D3DCOLOR
;
441 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
447 case GL_UNSIGNED_INT
:
452 switch (input
->Size
) {
454 r300_attr
.swizzle
= SWIZZLE_XYZW
;
457 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_ONE
);
460 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
463 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_ZERO
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
467 r300_attr
.write_mask
= MASK_XYZW
;
469 vbuf
->attribs
[vbuf
->num_attribs
] = r300_attr
;
473 static void r300SetVertexFormat(GLcontext
*ctx
, const struct gl_client_array
*arrays
[], int count
)
475 r300ContextPtr r300
= R300_CONTEXT(ctx
);
476 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
477 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
481 tmp
= r300
->selected_vp
->code
.InputsRead
;
483 vbuf
->num_attribs
= 0;
485 /* find first enabled bit */
491 r300TranslateAttrib(ctx
, i
, count
, arrays
[i
]);
498 r300SwitchFallback(ctx
, R300_FALLBACK_AOS_LIMIT
, vbuf
->num_attribs
> R300_MAX_AOS_ARRAYS
);
503 static void r300AllocDmaRegions(GLcontext
*ctx
, const struct gl_client_array
*input
[], int count
)
505 r300ContextPtr r300
= R300_CONTEXT(ctx
);
506 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
510 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
,
511 "%s: count %d num_attribs %d\n",
512 __func__
, count
, vbuf
->num_attribs
);
514 for (index
= 0; index
< vbuf
->num_attribs
; index
++) {
515 struct radeon_aos
*aos
= &r300
->radeon
.tcl
.aos
[index
];
516 i
= vbuf
->attribs
[index
].element
;
518 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
520 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
||
522 getTypeSize(input
[i
]->Type
) != 4 ||
526 r300ConvertAttrib(ctx
, count
, input
[i
], &vbuf
->attribs
[index
]);
528 if (input
[i
]->BufferObj
->Name
) {
529 if (stride
% 4 != 0) {
530 assert(((intptr_t) input
[i
]->Ptr
) % input
[i
]->StrideB
== 0);
531 r300AlignDataToDword(ctx
, input
[i
], count
, &vbuf
->attribs
[index
]);
532 vbuf
->attribs
[index
].is_named_bo
= GL_FALSE
;
534 vbuf
->attribs
[index
].stride
= input
[i
]->StrideB
;
535 vbuf
->attribs
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
536 vbuf
->attribs
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
537 vbuf
->attribs
[index
].is_named_bo
= GL_TRUE
;
542 int local_count
= count
;
545 if (input
[i
]->StrideB
== 0) {
546 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
549 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
552 radeonAllocDmaRegion(&r300
->radeon
, &vbuf
->attribs
[index
].bo
, &vbuf
->attribs
[index
].bo_offset
, size
, 32);
553 radeon_bo_map(vbuf
->attribs
[index
].bo
, 1);
554 assert(vbuf
->attribs
[index
].bo
->ptr
!= NULL
);
555 dst
= (uint32_t *)ADD_POINTERS(vbuf
->attribs
[index
].bo
->ptr
, vbuf
->attribs
[index
].bo_offset
);
556 switch (vbuf
->attribs
[index
].dwords
) {
557 case 1: radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
558 case 2: radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
559 case 3: radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
560 case 4: radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
561 default: assert(0); break;
563 radeon_bo_unmap(vbuf
->attribs
[index
].bo
);
568 aos
->count
= vbuf
->attribs
[index
].stride
== 0 ? 1 : count
;
569 aos
->stride
= vbuf
->attribs
[index
].stride
/ sizeof(float);
570 aos
->components
= vbuf
->attribs
[index
].dwords
;
571 aos
->bo
= vbuf
->attribs
[index
].bo
;
572 aos
->offset
= vbuf
->attribs
[index
].bo_offset
;
574 if (vbuf
->attribs
[index
].is_named_bo
) {
575 radeon_cs_space_add_persistent_bo(r300
->radeon
.cmdbuf
.cs
, r300
->vbuf
.attribs
[index
].bo
, RADEON_GEM_DOMAIN_GTT
, 0);
579 r300
->radeon
.tcl
.aos_count
= vbuf
->num_attribs
;
580 ret
= radeon_cs_space_check_with_bo(r300
->radeon
.cmdbuf
.cs
, first_elem(&r300
->radeon
.dma
.reserved
)->bo
, RADEON_GEM_DOMAIN_GTT
, 0);
581 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, ret
);
585 static void r300FreeData(GLcontext
*ctx
)
587 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
588 * to prevent double unref in radeonReleaseArrays
589 * called during context destroy
591 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
592 r300ContextPtr r300
= R300_CONTEXT(ctx
);
596 for (i
= 0; i
< r300
->vbuf
.num_attribs
; i
++) {
597 if (!r300
->vbuf
.attribs
[i
].is_named_bo
) {
598 radeon_bo_unref(r300
->vbuf
.attribs
[i
].bo
);
600 r300
->radeon
.tcl
.aos
[i
].bo
= NULL
;
605 if (r300
->ind_buf
.bo
!= NULL
) {
606 radeon_bo_unref(r300
->ind_buf
.bo
);
611 static GLuint
r300PredictTryDrawPrimsSize(GLcontext
*ctx
, GLuint nr_prims
)
613 struct r300_context
*r300
= R300_CONTEXT(ctx
);
614 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
619 dwords
= 2*CACHE_FLUSH_BUFSZ
;
620 dwords
+= PRE_EMIT_STATE_BUFSZ
;
621 dwords
+= (AOS_BUFSZ(vbuf
->num_attribs
)
623 + FIREAOS_BUFSZ
)*nr_prims
;
625 state_size
= radeonCountStateEmitSize(&r300
->radeon
);
626 flushed
= rcommonEnsureCmdBufSpace(&r300
->radeon
,
630 dwords
+= radeonCountStateEmitSize(&r300
->radeon
);
632 dwords
+= state_size
;
634 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
638 static GLboolean
r300TryDrawPrims(GLcontext
*ctx
,
639 const struct gl_client_array
*arrays
[],
640 const struct _mesa_prim
*prim
,
642 const struct _mesa_index_buffer
*ib
,
646 struct r300_context
*r300
= R300_CONTEXT(ctx
);
649 radeon_print(RADEON_RENDER
, RADEON_NORMAL
, "%s: %u (%d-%d) cs begin at %d\n",
650 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
653 _mesa_update_state( ctx
);
655 if (r300
->options
.hw_tcl_enabled
)
656 _tnl_UpdateFixedFunctionProgram(ctx
);
658 r300UpdateShaders(r300
);
660 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, !r300ValidateBuffers(ctx
));
662 r300SetVertexFormat(ctx
, arrays
, max_index
+ 1);
667 r300SetupVAP(ctx
, r300
->selected_vp
->code
.InputsRead
, r300
->selected_vp
->code
.OutputsWritten
);
669 r300UpdateShaderStates(r300
);
671 /* ensure we have the cmd buf space in advance to cover
672 * the state + DMA AOS pointers */
673 GLuint emit_end
= r300PredictTryDrawPrimsSize(ctx
, nr_prims
)
674 + r300
->radeon
.cmdbuf
.cs
->cdw
;
676 r300SetupIndexBuffer(ctx
, ib
);
678 r300AllocDmaRegions(ctx
, arrays
, max_index
+ 1);
683 r300EmitCacheFlush(r300
);
684 radeonEmitState(&r300
->radeon
);
686 for (i
= 0; i
< nr_prims
; ++i
) {
687 r300RunRenderPrimitive(ctx
, prim
[i
].start
, prim
[i
].start
+ prim
[i
].count
, prim
[i
].mode
);
690 r300EmitCacheFlush(r300
);
694 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: %u (%d-%d) cs ending at %d\n",
695 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
697 if (emit_end
< r300
->radeon
.cmdbuf
.cs
->cdw
)
698 WARN_ONCE("Rendering was %d commands larger than predicted size."
699 " We might overflow command buffer.\n", r300
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
704 static void r300DrawPrims(GLcontext
*ctx
,
705 const struct gl_client_array
*arrays
[],
706 const struct _mesa_prim
*prim
,
708 const struct _mesa_index_buffer
*ib
,
709 GLboolean index_bounds_valid
,
715 /* This check should get folded into just the places that
716 * min/max index are really needed.
718 if (!index_bounds_valid
) {
719 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
723 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
724 "%s: Rebasing primitives. %p nr_prims %d min_index %u max_index %u\n",
725 __func__
, prim
, nr_prims
, min_index
, max_index
);
726 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r300DrawPrims
);
730 /* Make an attempt at drawing */
731 retval
= r300TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
733 /* If failed run tnl pipeline - it should take care of fallbacks */
735 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
738 void r300InitDraw(GLcontext
*ctx
)
740 struct vbo_context
*vbo
= vbo_context(ctx
);
742 vbo
->draw_prims
= r300DrawPrims
;