2 * Copyright (C) 2008-2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
27 #include "main/glheader.h"
28 #include "main/state.h"
29 #include "main/imports.h"
30 #include "main/enums.h"
31 #include "main/macros.h"
32 #include "main/context.h"
34 #include "main/simple_list.h"
35 #include "main/api_arrayelt.h"
36 #include "swrast/swrast.h"
37 #include "swrast_setup/swrast_setup.h"
41 #include "tnl/t_vp_build.h"
42 #include "tnl/t_context.h"
43 #include "tnl/t_vertex.h"
44 #include "vbo/vbo_context.h"
46 #include "r600_context.h"
47 #include "r600_cmdbuf.h"
49 #include "evergreen_fragprog.h"
50 #include "evergreen_vertprog.h"
52 #include "evergreen_state.h"
53 #include "evergreen_tex.h"
55 #include "radeon_buffer_objects.h"
56 #include "radeon_common_context.h"
58 static unsigned int evergreenPrimitiveType(int prim
) //same
60 switch (prim
& PRIM_MODE_MASK
)
63 return DI_PT_POINTLIST
;
66 return DI_PT_LINELIST
;
69 return DI_PT_LINESTRIP
;
72 return DI_PT_LINELOOP
;
77 case GL_TRIANGLE_STRIP
:
78 return DI_PT_TRISTRIP
;
84 return DI_PT_QUADLIST
;
87 return DI_PT_QUADSTRIP
;
99 static int evergreenNumVerts(int num_verts
, int prim
) //same
103 switch (prim
& PRIM_MODE_MASK
) {
108 verts_off
= num_verts
% 2;
112 verts_off
= num_verts
;
116 verts_off
= num_verts
;
119 verts_off
= num_verts
% 3;
121 case GL_TRIANGLE_STRIP
:
123 verts_off
= num_verts
;
125 case GL_TRIANGLE_FAN
:
127 verts_off
= num_verts
;
130 verts_off
= num_verts
% 4;
134 verts_off
= num_verts
;
136 verts_off
= num_verts
% 2;
140 verts_off
= num_verts
;
148 return num_verts
- verts_off
;
151 static void evergreenRunRenderPrimitive(struct gl_context
* ctx
, int start
, int end
, int prim
,
152 GLint basevertex
) //same
154 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
155 BATCH_LOCALS(&context
->radeon
);
156 int type
, total_emit
;
158 uint32_t vgt_draw_initiator
= 0;
159 uint32_t vgt_index_type
= 0;
160 uint32_t vgt_primitive_type
= 0;
161 uint32_t vgt_num_indices
= 0;
163 type
= evergreenPrimitiveType(prim
);
164 num_indices
= evergreenNumVerts(end
- start
, prim
);
166 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
167 "%s type %x num_indices %d\n",
168 __func__
, type
, num_indices
);
170 if (type
< 0 || num_indices
<= 0)
173 SETfield(vgt_primitive_type
, type
,
174 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
176 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
178 if(GL_TRUE
!= context
->ind_buf
.is_32bit
)
180 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
183 vgt_num_indices
= num_indices
;
184 SETfield(vgt_draw_initiator
, DI_SRC_SEL_DMA
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
185 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
187 total_emit
= 3 /* VGT_PRIMITIVE_TYPE */
188 + 2 /* VGT_INDEX_TYPE */
189 + 2 /* NUM_INSTANCES */
190 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
191 + 5 + 2; /* DRAW_INDEX */
193 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
195 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE
, 1);
196 R600_OUT_BATCH(vgt_primitive_type
);
198 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
199 R600_OUT_BATCH(vgt_index_type
);
201 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
204 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST
, 2));
205 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC
- ASIC_CTL_CONST_BASE_INDEX
);
206 R600_OUT_BATCH(basevertex
); //VTX_BASE_VTX_LOC
207 R600_OUT_BATCH(0); //VTX_START_INST_LOC
209 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX
, 3));
210 R600_OUT_BATCH(context
->ind_buf
.bo_offset
);
212 R600_OUT_BATCH(vgt_num_indices
);
213 R600_OUT_BATCH(vgt_draw_initiator
);
214 R600_OUT_BATCH_RELOC(context
->ind_buf
.bo_offset
,
216 context
->ind_buf
.bo_offset
,
217 RADEON_GEM_DOMAIN_GTT
, 0, 0);
222 static void evergreenRunRenderPrimitiveImmediate(struct gl_context
* ctx
, int start
, int end
, int prim
) //same
224 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
225 BATCH_LOCALS(&context
->radeon
);
227 uint32_t num_indices
, total_emit
= 0;
228 uint32_t vgt_draw_initiator
= 0;
229 uint32_t vgt_index_type
= 0;
230 uint32_t vgt_primitive_type
= 0;
231 uint32_t vgt_num_indices
= 0;
233 type
= evergreenPrimitiveType(prim
);
234 num_indices
= evergreenNumVerts(end
- start
, prim
);
236 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
237 "%s type %x num_indices %d\n",
238 __func__
, type
, num_indices
);
240 if (type
< 0 || num_indices
<= 0)
243 SETfield(vgt_primitive_type
, type
,
244 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
246 if (num_indices
> 0xffff)
248 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
252 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
255 vgt_num_indices
= num_indices
;
256 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
260 SETfield(vgt_draw_initiator
, DI_SRC_SEL_AUTO_INDEX
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
264 if (num_indices
> 0xffff)
266 total_emit
+= num_indices
;
270 total_emit
+= (num_indices
+ 1) / 2;
272 SETfield(vgt_draw_initiator
, DI_SRC_SEL_IMMEDIATE
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
275 total_emit
+= 3 /* VGT_PRIMITIVE_TYPE */
276 + 2 /* VGT_INDEX_TYPE */
277 + 2 /* NUM_INSTANCES */
278 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
281 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
283 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE
, 1);
284 R600_OUT_BATCH(vgt_primitive_type
);
286 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
287 R600_OUT_BATCH(vgt_index_type
);
289 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
292 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST
, 2));
293 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC
- ASIC_CTL_CONST_BASE_INDEX
);
294 R600_OUT_BATCH(0); //VTX_BASE_VTX_LOC
295 R600_OUT_BATCH(0); //VTX_START_INST_LOC
299 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO
, 1));
300 R600_OUT_BATCH(vgt_num_indices
);
301 R600_OUT_BATCH(vgt_draw_initiator
);
305 if (num_indices
> 0xffff)
307 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (num_indices
+ 1)));
308 R600_OUT_BATCH(vgt_num_indices
);
309 R600_OUT_BATCH(vgt_draw_initiator
);
310 for (i
= start
; i
< (start
+ num_indices
); i
++)
317 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (((num_indices
+ 1) / 2) + 1)));
318 R600_OUT_BATCH(vgt_num_indices
);
319 R600_OUT_BATCH(vgt_draw_initiator
);
320 for (i
= start
; i
< (start
+ num_indices
); i
+= 2)
322 if ((i
+ 1) == (start
+ num_indices
))
328 R600_OUT_BATCH(((i
+ 1) << 16) | (i
));
338 #define CONVERT( TYPE, MACRO ) do { \
341 if (input->Normalized) { \
342 for (i = 0; i < count; i++) { \
343 const TYPE *in = (TYPE *)src_ptr; \
344 for (j = 0; j < sz; j++) { \
345 *dst_ptr++ = MACRO(*in); \
351 for (i = 0; i < count; i++) { \
352 const TYPE *in = (TYPE *)src_ptr; \
353 for (j = 0; j < sz; j++) { \
354 *dst_ptr++ = (GLfloat)(*in); \
363 * Convert attribute data type to float
364 * If the attribute uses named buffer object replace the bo with newly allocated bo
366 static void evergreenConvertAttrib(struct gl_context
*ctx
, int count
,
367 const struct gl_client_array
*input
,
368 struct StreamDesc
*attr
)
370 context_t
*context
= R700_CONTEXT(ctx
);
371 const GLvoid
*src_ptr
;
372 GLboolean mapped_named_bo
= GL_FALSE
;
376 stride
= (input
->StrideB
== 0) ? evergreen_getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
378 /* Convert value for first element only */
379 if (input
->StrideB
== 0)
384 if (input
->BufferObj
->Name
)
386 if (!input
->BufferObj
->Pointer
)
388 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
389 mapped_named_bo
= GL_TRUE
;
392 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
396 src_ptr
= input
->Ptr
;
399 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
,
400 sizeof(GLfloat
) * input
->Size
* count
, 32);
402 radeon_bo_map(attr
->bo
, 1);
404 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
406 assert(src_ptr
!= NULL
);
411 CONVERT(GLdouble
, (GLfloat
));
413 case GL_UNSIGNED_INT
:
414 CONVERT(GLuint
, UINT_TO_FLOAT
);
417 CONVERT(GLint
, INT_TO_FLOAT
);
419 case GL_UNSIGNED_SHORT
:
420 CONVERT(GLushort
, USHORT_TO_FLOAT
);
423 CONVERT(GLshort
, SHORT_TO_FLOAT
);
425 case GL_UNSIGNED_BYTE
:
426 assert(input
->Format
!= GL_BGRA
);
427 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
430 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
437 radeon_bo_unmap(attr
->bo
);
441 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
445 static void evergreenFixupIndexBuffer(struct gl_context
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
447 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
451 GLboolean mapped_named_bo
= GL_FALSE
;
453 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
455 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
456 mapped_named_bo
= GL_TRUE
;
457 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
459 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
461 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
)
463 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
464 GLubyte
*in
= (GLubyte
*)src_ptr
;
466 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
467 &context
->ind_buf
.bo_offset
, size
, 4);
469 radeon_bo_map(context
->ind_buf
.bo
, 1);
470 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
471 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
473 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
475 *out
++ = in
[i
] | in
[i
+ 1] << 16;
478 if (i
< mesa_ind_buf
->count
)
483 radeon_bo_unmap(context
->ind_buf
.bo
);
487 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
488 GLushort
*in
= (GLushort
*)src_ptr
;
489 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
491 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
492 &context
->ind_buf
.bo_offset
, size
, 4);
494 radeon_bo_map(context
->ind_buf
.bo
, 1);
495 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
496 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
498 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
500 *out
++ = in
[i
] | in
[i
+ 1] << 16;
503 if (i
< mesa_ind_buf
->count
)
507 radeon_bo_unmap(context
->ind_buf
.bo
);
511 context
->ind_buf
.is_32bit
= GL_FALSE
;
512 context
->ind_buf
.count
= mesa_ind_buf
->count
;
516 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
520 static GLboolean
evergreen_check_fallbacks(struct gl_context
*ctx
) //same
522 if (ctx
->RenderMode
!= GL_RENDER
)
528 /* start 3d, idle, cb/db flush */
529 #define PRE_EMIT_STATE_BUFSZ 5 + 5 + 14
531 static GLuint
evergreenPredictRenderSize(struct gl_context
* ctx
,
532 const struct _mesa_prim
*prim
,
533 const struct _mesa_index_buffer
*ib
,
536 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
541 dwords
= PRE_EMIT_STATE_BUFSZ
;
543 dwords
+= nr_prims
* 18;
545 for (i
= 0; i
< nr_prims
; ++i
)
547 if (prim
[i
].start
== 0)
549 else if (prim
[i
].count
> 0xffff)
550 dwords
+= prim
[i
].count
+ 14;
552 dwords
+= ((prim
[i
].count
+ 1) / 2) + 14;
556 state_size
= radeonCountStateEmitSize(&context
->radeon
);
557 flushed
= rcommonEnsureCmdBufSpace(&context
->radeon
,
561 dwords
+= radeonCountStateEmitSize(&context
->radeon
);
563 dwords
+= state_size
;
565 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
570 static void evergreenSetupIndexBuffer(struct gl_context
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
572 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
575 context
->ind_buf
.bo
= NULL
;
580 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
)
582 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
)
585 const GLvoid
*src_ptr
;
587 GLboolean mapped_named_bo
= GL_FALSE
;
589 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
591 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
592 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
593 mapped_named_bo
= GL_TRUE
;
596 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
598 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
600 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
601 &context
->ind_buf
.bo_offset
, size
, 4);
602 radeon_bo_map(context
->ind_buf
.bo
, 1);
603 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
604 dst_ptr
= ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
606 memcpy(dst_ptr
, src_ptr
, size
);
608 radeon_bo_unmap(context
->ind_buf
.bo
);
609 context
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
610 context
->ind_buf
.count
= mesa_ind_buf
->count
;
614 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
619 evergreenFixupIndexBuffer(ctx
, mesa_ind_buf
);
624 static void evergreenAlignDataToDword(struct gl_context
*ctx
,
625 const struct gl_client_array
*input
,
627 struct StreamDesc
*attr
)
629 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
630 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
631 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
632 GLboolean mapped_named_bo
= GL_FALSE
;
634 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
636 radeon_bo_map(attr
->bo
, 1);
638 if (!input
->BufferObj
->Pointer
)
640 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
641 mapped_named_bo
= GL_TRUE
;
645 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
646 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
649 for (i
= 0; i
< count
; ++i
)
651 memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
652 src_ptr
+= input
->StrideB
;
653 dst_ptr
+= dst_stride
;
657 radeon_bo_unmap(attr
->bo
);
660 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
663 attr
->stride
= dst_stride
;
667 static void evergreenSetupStreams(struct gl_context
*ctx
, const struct gl_client_array
*input
[], int count
)
669 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
674 EVERGREEN_STATECHANGE(context
, vtx
);
676 for(index
= 0; index
< context
->nNumActiveAos
; index
++)
678 struct radeon_aos
*aos
= &context
->radeon
.tcl
.aos
[index
];
679 i
= context
->stream_desc
[index
].element
;
681 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
683 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
685 || getTypeSize(input
[i
]->Type
) != 4
689 evergreenConvertAttrib(ctx
, count
, input
[i
], &context
->stream_desc
[index
]);
693 if (input
[i
]->BufferObj
->Name
)
695 context
->stream_desc
[index
].stride
= input
[i
]->StrideB
;
696 context
->stream_desc
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
697 context
->stream_desc
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
698 context
->stream_desc
[index
].is_named_bo
= GL_TRUE
;
703 int local_count
= count
;
706 if (input
[i
]->StrideB
== 0)
708 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
713 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
716 radeonAllocDmaRegion(&context
->radeon
, &context
->stream_desc
[index
].bo
,
717 &context
->stream_desc
[index
].bo_offset
, size
, 32);
719 radeon_bo_map(context
->stream_desc
[index
].bo
, 1);
720 assert(context
->stream_desc
[index
].bo
->ptr
!= NULL
);
723 dst
= (uint32_t *)ADD_POINTERS(context
->stream_desc
[index
].bo
->ptr
,
724 context
->stream_desc
[index
].bo_offset
);
726 switch (context
->stream_desc
[index
].dwords
)
729 radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
732 radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
735 radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
738 radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
745 radeon_bo_unmap(context
->stream_desc
[index
].bo
);
749 aos
->count
= context
->stream_desc
[index
].stride
== 0 ? 1 : count
;
750 aos
->stride
= context
->stream_desc
[index
].stride
/ sizeof(float);
751 aos
->components
= context
->stream_desc
[index
].dwords
;
752 aos
->bo
= context
->stream_desc
[index
].bo
;
753 aos
->offset
= context
->stream_desc
[index
].bo_offset
;
755 if(context
->stream_desc
[index
].is_named_bo
)
757 radeon_cs_space_add_persistent_bo(context
->radeon
.cmdbuf
.cs
,
758 context
->stream_desc
[index
].bo
,
759 RADEON_GEM_DOMAIN_GTT
, 0);
763 ret
= radeon_cs_space_check_with_bo(context
->radeon
.cmdbuf
.cs
,
764 first_elem(&context
->radeon
.dma
.reserved
)->bo
,
765 RADEON_GEM_DOMAIN_GTT
, 0);
768 static void evergreenFreeData(struct gl_context
*ctx
)
770 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
771 * to prevent double unref in radeonReleaseArrays
772 * called during context destroy
774 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
778 for (i
= 0; i
< context
->nNumActiveAos
; i
++)
780 if (!context
->stream_desc
[i
].is_named_bo
)
782 radeon_bo_unref(context
->stream_desc
[i
].bo
);
784 context
->radeon
.tcl
.aos
[i
].bo
= NULL
;
787 if(context
->vp_Constbo
!= NULL
)
789 radeon_bo_unref(context
->vp_Constbo
);
790 context
->vp_Constbo
= NULL
;
792 if(context
->fp_Constbo
!= NULL
)
794 radeon_bo_unref(context
->fp_Constbo
);
795 context
->fp_Constbo
= NULL
;
798 if (context
->ind_buf
.bo
!= NULL
)
800 radeon_bo_unref(context
->ind_buf
.bo
);
804 static GLboolean
evergreenTryDrawPrims(struct gl_context
*ctx
,
805 const struct gl_client_array
*arrays
[],
806 const struct _mesa_prim
*prim
,
808 const struct _mesa_index_buffer
*ib
,
812 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
813 radeonContextPtr radeon
= &context
->radeon
;
815 struct radeon_renderbuffer
*rrb
;
818 _mesa_update_state( ctx
);
820 if (evergreen_check_fallbacks(ctx
))
823 _tnl_UpdateFixedFunctionProgram(ctx
);
824 evergreenSetVertexFormat(ctx
, arrays
, max_index
+ 1);
827 /* shaders need to be updated before buffers are validated */
828 evergreenUpdateShaders(ctx
);
829 if (!evergreenValidateBuffers(ctx
))
832 /* always emit CB base to prevent
833 * lock ups on some chips.
835 EVERGREEN_STATECHANGE(context
, cb
);
836 /* mark vtx as dirty since it changes per-draw */
837 EVERGREEN_STATECHANGE(context
, vtx
);
839 evergreenSetScissor(context
);
841 evergreenSetupVertexProgram(ctx
);
842 evergreenSetupFragmentProgram(ctx
);
843 evergreenUpdateShaderStates(ctx
);
845 GLuint emit_end
= evergreenPredictRenderSize(ctx
, prim
, ib
, nr_prims
)
846 + context
->radeon
.cmdbuf
.cs
->cdw
;
848 /* evergreenPredictRenderSize will call radeonReleaseDmaRegions, so update VP/FP const buf after it. */
849 evergreenSetupVPconstants(ctx
);
850 evergreenSetupFPconstants(ctx
);
852 evergreenSetupIndexBuffer(ctx
, ib
);
854 evergreenSetupStreams(ctx
, arrays
, max_index
+ 1);
856 radeonEmitState(radeon
);
858 radeon_debug_add_indent();
860 for (i
= 0; i
< nr_prims
; ++i
)
862 if (context
->ind_buf
.bo
)
863 evergreenRunRenderPrimitive(ctx
,
865 prim
[i
].start
+ prim
[i
].count
,
869 evergreenRunRenderPrimitiveImmediate(ctx
,
871 prim
[i
].start
+ prim
[i
].count
,
875 radeon_debug_remove_indent();
877 /* Flush render op cached for last several quads. */
878 /* XXX drm should handle this in fence submit */
880 //evergreeWaitForIdleClean(context);
882 rrb
= radeon_get_colorbuffer(&context
->radeon
);
884 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
885 CB_ACTION_ENA_bit
| (1 << (id
+ 6)));
887 rrb
= radeon_get_depthbuffer(&context
->radeon
);
889 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
890 DB_ACTION_ENA_bit
| DB_DEST_BASE_ENA_bit
);
892 evergreenFreeData(ctx
);
894 if (emit_end
< context
->radeon
.cmdbuf
.cs
->cdw
)
896 WARN_ONCE("Rendering was %d commands larger than predicted size."
897 " We might overflow command buffer.\n", context
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
903 static void evergreenDrawPrims(struct gl_context
*ctx
,
904 const struct gl_client_array
*arrays
[],
905 const struct _mesa_prim
*prim
,
907 const struct _mesa_index_buffer
*ib
,
908 GLboolean index_bounds_valid
,
912 GLboolean retval
= GL_FALSE
;
914 context_t
*context
= EVERGREEN_CONTEXT(ctx
);
915 radeonContextPtr radeon
= &context
->radeon
;
916 radeon_prepare_render(radeon
);
918 /* This check should get folded into just the places that
919 * min/max index are really needed.
921 if (!vbo_all_varyings_in_vbos(arrays
)) {
922 if (!index_bounds_valid
)
923 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
924 /* do we want to rebase, minimizes the
925 * amount of data to upload? */
927 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, evergreenDrawPrims
);
931 /* Make an attempt at drawing */
932 retval
= evergreenTryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
934 /* If failed run tnl pipeline - it should take care of fallbacks */
936 _swsetup_Wakeup(ctx
);
937 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
941 void evergreenInitDraw(struct gl_context
*ctx
)
943 struct vbo_context
*vbo
= vbo_context(ctx
);
946 vbo
->draw_prims
= evergreenDrawPrims
;