1 /**************************************************************************
3 * Copyright 2009 Maciej Cencora
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHOR(S) AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/state.h"
32 #include "main/api_validate.h"
33 #include "main/enums.h"
34 #include "main/simple_list.h"
37 #include "r300_context.h"
38 #include "r300_emit.h"
39 #include "r300_render.h"
40 #include "r300_state.h"
42 #include "r300_cmdbuf.h"
44 #include "radeon_buffer_objects.h"
45 #include "radeon_common_context.h"
48 #include "tnl/t_vp_build.h"
49 #include "vbo/vbo_context.h"
50 #include "swrast/swrast.h"
51 #include "swrast_setup/swrast_setup.h"
54 static int getTypeSize(GLenum type
)
58 return sizeof(GLdouble
);
60 return sizeof(GLfloat
);
64 return sizeof(GLuint
);
66 return sizeof(GLshort
);
67 case GL_UNSIGNED_SHORT
:
68 return sizeof(GLushort
);
70 return sizeof(GLbyte
);
71 case GL_UNSIGNED_BYTE
:
72 return sizeof(GLubyte
);
79 static void r300FixupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
81 r300ContextPtr r300
= R300_CONTEXT(ctx
);
85 GLboolean mapped_named_bo
= GL_FALSE
;
87 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
88 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
89 mapped_named_bo
= GL_TRUE
;
90 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
92 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
94 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
95 "%s: Fixing index buffer format. type %d\n",
96 __func__
, mesa_ind_buf
->type
);
98 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
) {
99 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
100 GLubyte
*in
= (GLubyte
*)src_ptr
;
102 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
104 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
105 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
107 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
108 *out
++ = in
[i
] | in
[i
+ 1] << 16;
111 if (i
< mesa_ind_buf
->count
) {
116 } else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
117 GLushort
*in
= (GLushort
*)src_ptr
;
118 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
120 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
,
121 &r300
->ind_buf
.bo_offset
, size
, 4);
123 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
124 out
= (GLuint
*)ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
126 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2) {
127 *out
++ = in
[i
] | in
[i
+ 1] << 16;
130 if (i
< mesa_ind_buf
->count
) {
136 r300
->ind_buf
.is_32bit
= GL_FALSE
;
137 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
139 if (mapped_named_bo
) {
140 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
145 static void r300SetupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
147 r300ContextPtr r300
= R300_CONTEXT(ctx
);
150 r300
->ind_buf
.bo
= NULL
;
153 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
156 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
) {
158 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
) {
160 const GLvoid
*src_ptr
;
162 GLboolean mapped_named_bo
= GL_FALSE
;
164 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
) {
165 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
166 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
167 mapped_named_bo
= GL_TRUE
;
170 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
172 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
174 radeonAllocDmaRegion(&r300
->radeon
, &r300
->ind_buf
.bo
, &r300
->ind_buf
.bo_offset
, size
, 4);
176 assert(r300
->ind_buf
.bo
->ptr
!= NULL
);
177 dst_ptr
= ADD_POINTERS(r300
->ind_buf
.bo
->ptr
, r300
->ind_buf
.bo_offset
);
178 _mesa_memcpy(dst_ptr
, src_ptr
, size
);
180 r300
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
181 r300
->ind_buf
.count
= mesa_ind_buf
->count
;
183 if (mapped_named_bo
) {
184 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
187 r300FixupIndexBuffer(ctx
, mesa_ind_buf
);
191 #define CONVERT( TYPE, MACRO ) do { \
194 if (input->Normalized) { \
195 for (i = 0; i < count; i++) { \
196 const TYPE *in = (TYPE *)src_ptr; \
197 for (j = 0; j < sz; j++) { \
198 *dst_ptr++ = MACRO(*in); \
204 for (i = 0; i < count; i++) { \
205 const TYPE *in = (TYPE *)src_ptr; \
206 for (j = 0; j < sz; j++) { \
207 *dst_ptr++ = (GLfloat)(*in); \
216 * Convert attribute data type to float
217 * If the attribute uses named buffer object replace the bo with newly allocated bo
219 static void r300ConvertAttrib(GLcontext
*ctx
, int count
, const struct gl_client_array
*input
, struct vertex_attribute
*attr
)
221 r300ContextPtr r300
= R300_CONTEXT(ctx
);
222 const GLvoid
*src_ptr
;
223 GLboolean mapped_named_bo
= GL_FALSE
;
227 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
229 /* Convert value for first element only */
230 if (input
->StrideB
== 0)
233 if (input
->BufferObj
->Name
) {
234 if (!input
->BufferObj
->Pointer
) {
235 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
236 mapped_named_bo
= GL_TRUE
;
239 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
241 src_ptr
= input
->Ptr
;
244 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, sizeof(GLfloat
) * input
->Size
* count
, 32);
245 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
247 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
248 "%s: Converting vertex attributes, attribute data format %x,"
249 "stride %d, components %d\n"
250 , __FUNCTION__
, input
->Type
251 , stride
, input
->Size
);
253 assert(src_ptr
!= NULL
);
255 switch (input
->Type
) {
257 CONVERT(GLdouble
, (GLfloat
));
259 case GL_UNSIGNED_INT
:
260 CONVERT(GLuint
, UINT_TO_FLOAT
);
263 CONVERT(GLint
, INT_TO_FLOAT
);
265 case GL_UNSIGNED_SHORT
:
266 CONVERT(GLushort
, USHORT_TO_FLOAT
);
269 CONVERT(GLshort
, SHORT_TO_FLOAT
);
271 case GL_UNSIGNED_BYTE
:
272 assert(input
->Format
!= GL_BGRA
);
273 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
276 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
283 if (mapped_named_bo
) {
284 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
288 static void r300AlignDataToDword(GLcontext
*ctx
, const struct gl_client_array
*input
, int count
, struct vertex_attribute
*attr
)
290 r300ContextPtr r300
= R300_CONTEXT(ctx
);
291 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
292 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
293 GLboolean mapped_named_bo
= GL_FALSE
;
295 radeonAllocDmaRegion(&r300
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
297 if (!input
->BufferObj
->Pointer
) {
298 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
299 mapped_named_bo
= GL_TRUE
;
302 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
, "%s. Vertex alignment doesn't match hw requirements.\n", __func__
);
305 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
306 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
309 for (i
= 0; i
< count
; ++i
) {
310 _mesa_memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
311 src_ptr
+= input
->StrideB
;
312 dst_ptr
+= dst_stride
;
316 if (mapped_named_bo
) {
317 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
320 attr
->stride
= dst_stride
;
323 static void r300TranslateAttrib(GLcontext
*ctx
, GLuint attr
, int count
, const struct gl_client_array
*input
)
325 r300ContextPtr r300
= R300_CONTEXT(ctx
);
326 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
327 struct vertex_attribute r300_attr
;
331 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s\n", __func__
);
332 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
334 if (input
->Type
== GL_DOUBLE
|| input
->Type
== GL_UNSIGNED_INT
|| input
->Type
== GL_INT
||
336 getTypeSize(input
->Type
) != 4 ||
342 if (input
->StrideB
== 0) {
343 r300_attr
.stride
= 0;
345 r300_attr
.stride
= sizeof(GLfloat
) * input
->Size
;
347 r300_attr
.dwords
= input
->Size
;
348 r300_attr
.is_named_bo
= GL_FALSE
;
351 r300_attr
.dwords
= (getTypeSize(type
) * input
->Size
+ 3)/ 4;
352 if (!input
->BufferObj
->Name
) {
354 if (input
->StrideB
== 0) {
355 r300_attr
.stride
= 0;
357 r300_attr
.stride
= (getTypeSize(type
) * input
->Size
+ 3) & ~3;
360 r300_attr
.is_named_bo
= GL_FALSE
;
364 r300_attr
.size
= input
->Size
;
365 r300_attr
.element
= attr
;
366 r300_attr
.dst_loc
= vbuf
->num_attribs
;
370 switch (input
->Size
) {
371 case 1: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_1
; break;
372 case 2: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_2
; break;
373 case 3: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_3
; break;
374 case 4: r300_attr
.data_type
= R300_DATA_TYPE_FLOAT_4
; break;
376 r300_attr
._signed
= 0;
377 r300_attr
.normalize
= 0;
380 r300_attr
._signed
= 1;
381 r300_attr
.normalize
= input
->Normalized
;
382 switch (input
->Size
) {
385 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
389 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
394 r300_attr
._signed
= 1;
395 r300_attr
.normalize
= input
->Normalized
;
396 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
398 case GL_UNSIGNED_SHORT
:
399 r300_attr
._signed
= 0;
400 r300_attr
.normalize
= input
->Normalized
;
401 switch (input
->Size
) {
404 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_2
;
408 r300_attr
.data_type
= R300_DATA_TYPE_SHORT_4
;
412 case GL_UNSIGNED_BYTE
:
413 r300_attr
._signed
= 0;
414 r300_attr
.normalize
= input
->Normalized
;
415 if (input
->Format
== GL_BGRA
)
416 r300_attr
.data_type
= R300_DATA_TYPE_D3DCOLOR
;
418 r300_attr
.data_type
= R300_DATA_TYPE_BYTE
;
424 case GL_UNSIGNED_INT
:
429 switch (input
->Size
) {
431 r300_attr
.swizzle
= SWIZZLE_XYZW
;
434 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_Z
, SWIZZLE_ONE
);
437 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_Y
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
440 r300_attr
.swizzle
= MAKE_SWIZZLE4(SWIZZLE_X
, SWIZZLE_ZERO
, SWIZZLE_ZERO
, SWIZZLE_ONE
);
444 r300_attr
.write_mask
= MASK_XYZW
;
446 vbuf
->attribs
[vbuf
->num_attribs
] = r300_attr
;
450 static void r300SetVertexFormat(GLcontext
*ctx
, const struct gl_client_array
*arrays
[], int count
)
452 r300ContextPtr r300
= R300_CONTEXT(ctx
);
453 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
454 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
458 tmp
= r300
->selected_vp
->code
.InputsRead
;
460 vbuf
->num_attribs
= 0;
462 /* find first enabled bit */
468 r300TranslateAttrib(ctx
, i
, count
, arrays
[i
]);
475 r300SwitchFallback(ctx
, R300_FALLBACK_AOS_LIMIT
, vbuf
->num_attribs
> R300_MAX_AOS_ARRAYS
);
480 static void r300AllocDmaRegions(GLcontext
*ctx
, const struct gl_client_array
*input
[], int count
)
482 r300ContextPtr r300
= R300_CONTEXT(ctx
);
483 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
487 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
,
488 "%s: count %d num_attribs %d\n",
489 __func__
, count
, vbuf
->num_attribs
);
491 for (index
= 0; index
< vbuf
->num_attribs
; index
++) {
492 struct radeon_aos
*aos
= &r300
->radeon
.tcl
.aos
[index
];
493 i
= vbuf
->attribs
[index
].element
;
495 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
497 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
||
499 getTypeSize(input
[i
]->Type
) != 4 ||
503 r300ConvertAttrib(ctx
, count
, input
[i
], &vbuf
->attribs
[index
]);
505 if (input
[i
]->BufferObj
->Name
) {
506 if (stride
% 4 != 0) {
507 assert(((intptr_t) input
[i
]->Ptr
) % input
[i
]->StrideB
== 0);
508 r300AlignDataToDword(ctx
, input
[i
], count
, &vbuf
->attribs
[index
]);
509 vbuf
->attribs
[index
].is_named_bo
= GL_FALSE
;
511 vbuf
->attribs
[index
].stride
= input
[i
]->StrideB
;
512 vbuf
->attribs
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
513 vbuf
->attribs
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
514 vbuf
->attribs
[index
].is_named_bo
= GL_TRUE
;
519 int local_count
= count
;
522 if (input
[i
]->StrideB
== 0) {
523 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
526 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
529 radeonAllocDmaRegion(&r300
->radeon
, &vbuf
->attribs
[index
].bo
, &vbuf
->attribs
[index
].bo_offset
, size
, 32);
530 assert(vbuf
->attribs
[index
].bo
->ptr
!= NULL
);
531 dst
= (uint32_t *)ADD_POINTERS(vbuf
->attribs
[index
].bo
->ptr
, vbuf
->attribs
[index
].bo_offset
);
532 switch (vbuf
->attribs
[index
].dwords
) {
533 case 1: radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
534 case 2: radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
535 case 3: radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
536 case 4: radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
); break;
537 default: assert(0); break;
543 aos
->count
= vbuf
->attribs
[index
].stride
== 0 ? 1 : count
;
544 aos
->stride
= vbuf
->attribs
[index
].stride
/ sizeof(float);
545 aos
->components
= vbuf
->attribs
[index
].dwords
;
546 aos
->bo
= vbuf
->attribs
[index
].bo
;
547 aos
->offset
= vbuf
->attribs
[index
].bo_offset
;
549 if (vbuf
->attribs
[index
].is_named_bo
) {
550 radeon_cs_space_add_persistent_bo(r300
->radeon
.cmdbuf
.cs
, r300
->vbuf
.attribs
[index
].bo
, RADEON_GEM_DOMAIN_GTT
, 0);
554 r300
->radeon
.tcl
.aos_count
= vbuf
->num_attribs
;
555 ret
= radeon_cs_space_check_with_bo(r300
->radeon
.cmdbuf
.cs
, first_elem(&r300
->radeon
.dma
.reserved
)->bo
, RADEON_GEM_DOMAIN_GTT
, 0);
556 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, ret
);
560 static void r300FreeData(GLcontext
*ctx
)
562 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
563 * to prevent double unref in radeonReleaseArrays
564 * called during context destroy
566 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s\n", __func__
);
567 r300ContextPtr r300
= R300_CONTEXT(ctx
);
571 for (i
= 0; i
< r300
->vbuf
.num_attribs
; i
++) {
572 if (!r300
->vbuf
.attribs
[i
].is_named_bo
) {
573 radeon_bo_unref(r300
->vbuf
.attribs
[i
].bo
);
575 r300
->radeon
.tcl
.aos
[i
].bo
= NULL
;
580 if (r300
->ind_buf
.bo
!= NULL
) {
581 radeon_bo_unref(r300
->ind_buf
.bo
);
586 static GLuint
r300PredictTryDrawPrimsSize(GLcontext
*ctx
, GLuint nr_prims
)
588 struct r300_context
*r300
= R300_CONTEXT(ctx
);
589 struct r300_vertex_buffer
*vbuf
= &r300
->vbuf
;
594 dwords
= 2*CACHE_FLUSH_BUFSZ
;
595 dwords
+= PRE_EMIT_STATE_BUFSZ
;
596 dwords
+= (AOS_BUFSZ(vbuf
->num_attribs
)
598 + FIREAOS_BUFSZ
)*nr_prims
;
600 state_size
= radeonCountStateEmitSize(&r300
->radeon
);
601 flushed
= rcommonEnsureCmdBufSpace(&r300
->radeon
,
605 dwords
+= radeonCountStateEmitSize(&r300
->radeon
);
607 dwords
+= state_size
;
609 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
613 static GLboolean
r300TryDrawPrims(GLcontext
*ctx
,
614 const struct gl_client_array
*arrays
[],
615 const struct _mesa_prim
*prim
,
617 const struct _mesa_index_buffer
*ib
,
621 struct r300_context
*r300
= R300_CONTEXT(ctx
);
624 radeon_print(RADEON_RENDER
, RADEON_NORMAL
, "%s: %u (%d-%d) cs begin at %d\n",
625 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
628 _mesa_update_state( ctx
);
630 if (r300
->options
.hw_tcl_enabled
)
631 _tnl_UpdateFixedFunctionProgram(ctx
);
633 r300UpdateShaders(r300
);
635 r300SwitchFallback(ctx
, R300_FALLBACK_INVALID_BUFFERS
, !r300ValidateBuffers(ctx
));
637 r300SetVertexFormat(ctx
, arrays
, max_index
+ 1);
642 r300SetupVAP(ctx
, r300
->selected_vp
->code
.InputsRead
, r300
->selected_vp
->code
.OutputsWritten
);
644 r300UpdateShaderStates(r300
);
646 /* ensure we have the cmd buf space in advance to cover
647 * the state + DMA AOS pointers */
648 GLuint emit_end
= r300PredictTryDrawPrimsSize(ctx
, nr_prims
)
649 + r300
->radeon
.cmdbuf
.cs
->cdw
;
651 r300SetupIndexBuffer(ctx
, ib
);
653 r300AllocDmaRegions(ctx
, arrays
, max_index
+ 1);
658 r300EmitCacheFlush(r300
);
659 radeonEmitState(&r300
->radeon
);
661 for (i
= 0; i
< nr_prims
; ++i
) {
662 r300RunRenderPrimitive(ctx
, prim
[i
].start
, prim
[i
].start
+ prim
[i
].count
, prim
[i
].mode
);
665 r300EmitCacheFlush(r300
);
669 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: %u (%d-%d) cs ending at %d\n",
670 __FUNCTION__
, nr_prims
, min_index
, max_index
, r300
->radeon
.cmdbuf
.cs
->cdw
);
672 if (emit_end
< r300
->radeon
.cmdbuf
.cs
->cdw
)
673 WARN_ONCE("Rendering was %d commands larger than predicted size."
674 " We might overflow command buffer.\n", r300
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
679 static void r300DrawPrims(GLcontext
*ctx
,
680 const struct gl_client_array
*arrays
[],
681 const struct _mesa_prim
*prim
,
683 const struct _mesa_index_buffer
*ib
,
684 GLboolean index_bounds_valid
,
690 /* This check should get folded into just the places that
691 * min/max index are really needed.
693 if (!index_bounds_valid
) {
694 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
698 radeon_print(RADEON_FALLBACKS
, RADEON_IMPORTANT
,
699 "%s: Rebasing primitives. %p nr_prims %d min_index %u max_index %u\n",
700 __func__
, prim
, nr_prims
, min_index
, max_index
);
701 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r300DrawPrims
);
705 /* Make an attempt at drawing */
706 retval
= r300TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
708 /* If failed run tnl pipeline - it should take care of fallbacks */
710 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
713 void r300InitDraw(GLcontext
*ctx
)
715 struct vbo_context
*vbo
= vbo_context(ctx
);
717 vbo
->draw_prims
= r300DrawPrims
;