2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "tnl/t_pipeline.h"
46 #include "vbo/vbo_context.h"
48 #include "r600_context.h"
49 #include "r600_cmdbuf.h"
53 #include "r700_vertprog.h"
54 #include "r700_fragprog.h"
55 #include "r700_state.h"
57 #include "radeon_buffer_objects.h"
58 #include "radeon_common_context.h"
60 void r700WaitForIdle(context_t
*context
);
61 void r700WaitForIdleClean(context_t
*context
);
62 GLboolean
r700SendTextureState(context_t
*context
);
63 static unsigned int r700PrimitiveType(int prim
);
64 void r600UpdateTextureState(GLcontext
* ctx
);
65 GLboolean
r700SyncSurf(context_t
*context
,
66 struct radeon_bo
*pbo
,
68 uint32_t write_domain
,
71 void r700WaitForIdle(context_t
*context
)
73 BATCH_LOCALS(&context
->radeon
);
74 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
75 BEGIN_BATCH_NO_AUTOSTATE(3);
77 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
78 R600_OUT_BATCH(mmWAIT_UNTIL
- ASIC_CONFIG_BASE_INDEX
);
79 R600_OUT_BATCH(WAIT_3D_IDLE_bit
);
85 void r700WaitForIdleClean(context_t
*context
)
87 BATCH_LOCALS(&context
->radeon
);
88 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
89 BEGIN_BATCH_NO_AUTOSTATE(5);
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE
, 0));
92 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT
);
94 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
95 R600_OUT_BATCH(mmWAIT_UNTIL
- ASIC_CONFIG_BASE_INDEX
);
96 R600_OUT_BATCH(WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
102 void r700Start3D(context_t
*context
)
104 BATCH_LOCALS(&context
->radeon
);
105 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
106 if (context
->radeon
.radeonScreen
->chip_family
< CHIP_FAMILY_RV770
)
108 BEGIN_BATCH_NO_AUTOSTATE(2);
109 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF
, 0));
114 BEGIN_BATCH_NO_AUTOSTATE(3);
115 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL
, 1));
116 R600_OUT_BATCH(0x80000000);
117 R600_OUT_BATCH(0x80000000);
122 r700WaitForIdleClean(context
);
125 GLboolean
r700SyncSurf(context_t
*context
,
126 struct radeon_bo
*pbo
,
127 uint32_t read_domain
,
128 uint32_t write_domain
,
131 BATCH_LOCALS(&context
->radeon
);
132 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
133 uint32_t cp_coher_size
;
138 if (pbo
->size
== 0xffffffff)
139 cp_coher_size
= 0xffffffff;
141 cp_coher_size
= ((pbo
->size
+ 255) >> 8);
143 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
144 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC
, 3));
145 R600_OUT_BATCH(sync_type
);
146 R600_OUT_BATCH(cp_coher_size
);
149 R600_OUT_BATCH_RELOC(0,
152 read_domain
, write_domain
, 0);
159 static unsigned int r700PrimitiveType(int prim
)
161 switch (prim
& PRIM_MODE_MASK
)
164 return DI_PT_POINTLIST
;
167 return DI_PT_LINELIST
;
170 return DI_PT_LINESTRIP
;
173 return DI_PT_LINELOOP
;
176 return DI_PT_TRILIST
;
178 case GL_TRIANGLE_STRIP
:
179 return DI_PT_TRISTRIP
;
181 case GL_TRIANGLE_FAN
:
185 return DI_PT_QUADLIST
;
188 return DI_PT_QUADSTRIP
;
191 return DI_PT_POLYGON
;
200 static int r700NumVerts(int num_verts
, int prim
)
204 switch (prim
& PRIM_MODE_MASK
) {
209 verts_off
= num_verts
% 2;
213 verts_off
= num_verts
;
217 verts_off
= num_verts
;
220 verts_off
= num_verts
% 3;
222 case GL_TRIANGLE_STRIP
:
224 verts_off
= num_verts
;
226 case GL_TRIANGLE_FAN
:
228 verts_off
= num_verts
;
231 verts_off
= num_verts
% 4;
235 verts_off
= num_verts
;
237 verts_off
= num_verts
% 2;
241 verts_off
= num_verts
;
249 return num_verts
- verts_off
;
252 static void r700RunRenderPrimitive(GLcontext
* ctx
, int start
, int end
, int prim
)
254 context_t
*context
= R700_CONTEXT(ctx
);
255 BATCH_LOCALS(&context
->radeon
);
256 int type
, i
, total_emit
;
258 uint32_t vgt_draw_initiator
= 0;
259 uint32_t vgt_index_type
= 0;
260 uint32_t vgt_primitive_type
= 0;
261 uint32_t vgt_num_indices
= 0;
262 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
263 struct vertex_buffer
*vb
= &tnl
->vb
;
264 GLboolean bUseDrawIndex
;
266 if(NULL
!= context
->ind_buf
.bo
)
268 bUseDrawIndex
= GL_TRUE
;
272 bUseDrawIndex
= GL_FALSE
;
275 type
= r700PrimitiveType(prim
);
276 num_indices
= r700NumVerts(end
- start
, prim
);
278 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
279 "%s type %x num_indices %d\n",
280 __func__
, type
, num_indices
);
282 if (type
< 0 || num_indices
<= 0)
285 if(GL_TRUE
== bUseDrawIndex
)
287 total_emit
= 3 /* VGT_PRIMITIVE_TYPE */
288 + 2 /* VGT_INDEX_TYPE */
289 + 2 /* NUM_INSTANCES */
290 + 5 + 2; /* DRAW_INDEX */
294 total_emit
= 3 /* VGT_PRIMITIVE_TYPE */
295 + 2 /* VGT_INDEX_TYPE */
296 + 2 /* NUM_INSTANCES */
297 + num_indices
+ 3; /* DRAW_INDEX_IMMD */
300 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
302 SETfield(vgt_primitive_type
, type
,
303 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
304 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
305 R600_OUT_BATCH(mmVGT_PRIMITIVE_TYPE
- ASIC_CONFIG_BASE_INDEX
);
306 R600_OUT_BATCH(vgt_primitive_type
);
309 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
311 if(GL_TRUE
== bUseDrawIndex
)
313 if(GL_TRUE
!= context
->ind_buf
.is_32bit
)
315 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
319 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
320 R600_OUT_BATCH(vgt_index_type
);
323 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
327 vgt_num_indices
= num_indices
;
329 if(GL_TRUE
== bUseDrawIndex
)
331 SETfield(vgt_draw_initiator
, DI_SRC_SEL_DMA
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
335 SETfield(vgt_draw_initiator
, DI_SRC_SEL_IMMEDIATE
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
338 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
340 if(GL_TRUE
== bUseDrawIndex
)
342 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX
, 3));
343 R600_OUT_BATCH(context
->ind_buf
.bo_offset
);
345 R600_OUT_BATCH(vgt_num_indices
);
346 R600_OUT_BATCH(vgt_draw_initiator
);
347 R600_OUT_BATCH_RELOC(context
->ind_buf
.bo_offset
,
349 context
->ind_buf
.bo_offset
,
350 RADEON_GEM_DOMAIN_GTT
, 0, 0);
354 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (num_indices
+ 1)));
355 R600_OUT_BATCH(vgt_num_indices
);
356 R600_OUT_BATCH(vgt_draw_initiator
);
358 for (i
= start
; i
< (start
+ num_indices
); i
++)
362 R600_OUT_BATCH(vb
->Elts
[i
]);
375 /* start 3d, idle, cb/db flush */
376 #define PRE_EMIT_STATE_BUFSZ 10 + 5 + 14
378 static GLuint
r700PredictRenderSize(GLcontext
* ctx
, GLuint nr_prims
)
380 context_t
*context
= R700_CONTEXT(ctx
);
381 struct r700_vertex_program
*vp
= context
->selected_vp
;
385 /* pre calculate aos count so state prediction works */
386 context
->radeon
.tcl
.aos_count
= _mesa_bitcount(vp
->mesa_program
->Base
.InputsRead
);
388 dwords
= PRE_EMIT_STATE_BUFSZ
;
390 dwords
+= nr_prims
* 14;
392 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
393 struct vertex_buffer
*vb
= &tnl
->vb
;
395 for (i
= 0; i
< vb
->PrimitiveCount
; i
++)
396 dwords
+= vb
->Primitive
[i
].count
+ 10;
398 state_size
= radeonCountStateEmitSize(&context
->radeon
);
399 flushed
= rcommonEnsureCmdBufSpace(&context
->radeon
,
400 dwords
+ state_size
, __FUNCTION__
);
403 dwords
+= radeonCountStateEmitSize(&context
->radeon
);
405 dwords
+= state_size
;
407 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
,
408 "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
412 static GLboolean
r700RunRender(GLcontext
* ctx
,
413 struct tnl_pipeline_stage
*stage
)
415 context_t
*context
= R700_CONTEXT(ctx
);
416 radeonContextPtr radeon
= &context
->radeon
;
417 unsigned int i
, id
= 0;
418 TNLcontext
*tnl
= TNL_CONTEXT(ctx
);
419 struct vertex_buffer
*vb
= &tnl
->vb
;
420 struct radeon_renderbuffer
*rrb
;
422 radeon_print(RADEON_RENDER
, RADEON_NORMAL
, "%s: cs begin at %d\n",
423 __func__
, context
->radeon
.cmdbuf
.cs
->cdw
);
425 /* always emit CB base to prevent
426 * lock ups on some chips.
428 R600_STATECHANGE(context
, cb_target
);
429 /* mark vtx as dirty since it changes per-draw */
430 R600_STATECHANGE(context
, vtx
);
432 r700SetScissor(context
);
433 r700SetupVertexProgram(ctx
);
434 r700SetupFragmentProgram(ctx
);
435 r600UpdateTextureState(ctx
);
437 GLuint emit_end
= r700PredictRenderSize(ctx
, 0)
438 + context
->radeon
.cmdbuf
.cs
->cdw
;
439 r700SetupStreams(ctx
);
441 radeonEmitState(radeon
);
443 radeon_debug_add_indent();
444 /* richard test code */
445 for (i
= 0; i
< vb
->PrimitiveCount
; i
++) {
446 GLuint prim
= _tnl_translate_prim(&vb
->Primitive
[i
]);
447 GLuint start
= vb
->Primitive
[i
].start
;
448 GLuint end
= vb
->Primitive
[i
].start
+ vb
->Primitive
[i
].count
;
449 r700RunRenderPrimitive(ctx
, start
, end
, prim
);
451 radeon_debug_remove_indent();
453 /* Flush render op cached for last several quads. */
454 r700WaitForIdleClean(context
);
456 rrb
= radeon_get_colorbuffer(&context
->radeon
);
458 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
459 CB_ACTION_ENA_bit
| (1 << (id
+ 6)));
461 rrb
= radeon_get_depthbuffer(&context
->radeon
);
463 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
464 DB_ACTION_ENA_bit
| DB_DEST_BASE_ENA_bit
);
466 radeonReleaseArrays(ctx
, ~0);
468 radeon_print(RADEON_RENDER
, RADEON_TRACE
, "%s: cs end at %d\n",
469 __func__
, context
->radeon
.cmdbuf
.cs
->cdw
);
471 if ( emit_end
< context
->radeon
.cmdbuf
.cs
->cdw
)
472 WARN_ONCE("Rendering was %d commands larger than predicted size."
473 " We might overflow command buffer.\n", context
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
478 static GLboolean
r700RunNonTCLRender(GLcontext
* ctx
,
479 struct tnl_pipeline_stage
*stage
) /* -------------------- */
481 GLboolean bRet
= GL_TRUE
;
486 static GLboolean
r700RunTCLRender(GLcontext
* ctx
, /*----------------------*/
487 struct tnl_pipeline_stage
*stage
)
489 GLboolean bRet
= GL_FALSE
;
491 /* TODO : sw fallback */
493 /* Need shader bo's setup before bo check */
494 r700UpdateShaders(ctx
);
497 * Ensure all enabled and complete textures are uploaded along with any buffers being used.
499 if(!r600ValidateBuffers(ctx
))
504 bRet
= r700RunRender(ctx
, stage
);
507 //GL_FALSE will stop to do other pipe stage in _tnl_run_pipeline
508 //The render here DOES finish the whole pipe, so GL_FALSE should be returned for success.
511 const struct tnl_pipeline_stage _r700_render_stage
= {
512 "r700 Hardware Rasterization",
520 const struct tnl_pipeline_stage _r700_tcl_stage
= {
521 "r700 Hardware Transform, Clipping and Lighting",
529 const struct tnl_pipeline_stage
*r700_pipeline
[] =
532 &_tnl_vertex_transform_stage
,
533 &_tnl_normal_transform_stage
,
534 &_tnl_lighting_stage
,
535 &_tnl_fog_coordinate_stage
,
537 &_tnl_texture_transform_stage
,
538 &_tnl_vertex_program_stage
,
545 #define CONVERT( TYPE, MACRO ) do { \
548 if (input->Normalized) { \
549 for (i = 0; i < count; i++) { \
550 const TYPE *in = (TYPE *)src_ptr; \
551 for (j = 0; j < sz; j++) { \
552 *dst_ptr++ = MACRO(*in); \
558 for (i = 0; i < count; i++) { \
559 const TYPE *in = (TYPE *)src_ptr; \
560 for (j = 0; j < sz; j++) { \
561 *dst_ptr++ = (GLfloat)(*in); \
570 * Convert attribute data type to float
571 * If the attribute uses named buffer object replace the bo with newly allocated bo
573 static void r700ConvertAttrib(GLcontext
*ctx
, int count
,
574 const struct gl_client_array
*input
,
575 struct StreamDesc
*attr
)
577 context_t
*context
= R700_CONTEXT(ctx
);
578 const GLvoid
*src_ptr
;
579 GLboolean mapped_named_bo
= GL_FALSE
;
583 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
585 /* Convert value for first element only */
586 if (input
->StrideB
== 0)
591 if (input
->BufferObj
->Name
)
593 if (!input
->BufferObj
->Pointer
)
595 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
596 mapped_named_bo
= GL_TRUE
;
599 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
603 src_ptr
= input
->Ptr
;
606 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
,
607 sizeof(GLfloat
) * input
->Size
* count
, 32);
608 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
610 assert(src_ptr
!= NULL
);
615 CONVERT(GLdouble
, (GLfloat
));
617 case GL_UNSIGNED_INT
:
618 CONVERT(GLuint
, UINT_TO_FLOAT
);
621 CONVERT(GLint
, INT_TO_FLOAT
);
623 case GL_UNSIGNED_SHORT
:
624 CONVERT(GLushort
, USHORT_TO_FLOAT
);
627 CONVERT(GLshort
, SHORT_TO_FLOAT
);
629 case GL_UNSIGNED_BYTE
:
630 assert(input
->Format
!= GL_BGRA
);
631 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
634 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
643 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
647 static void r700AlignDataToDword(GLcontext
*ctx
,
648 const struct gl_client_array
*input
,
650 struct StreamDesc
*attr
)
652 context_t
*context
= R700_CONTEXT(ctx
);
653 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
654 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
655 GLboolean mapped_named_bo
= GL_FALSE
;
657 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
659 if (!input
->BufferObj
->Pointer
)
661 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
662 mapped_named_bo
= GL_TRUE
;
666 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
667 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
670 for (i
= 0; i
< count
; ++i
)
672 _mesa_memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
673 src_ptr
+= input
->StrideB
;
674 dst_ptr
+= dst_stride
;
680 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
683 attr
->stride
= dst_stride
;
686 static void r700SetupStreams2(GLcontext
*ctx
, const struct gl_client_array
*input
[], int count
)
688 context_t
*context
= R700_CONTEXT(ctx
);
693 R600_STATECHANGE(context
, vtx
);
695 for(index
= 0; index
< context
->nNumActiveAos
; index
++)
697 struct radeon_aos
*aos
= &context
->radeon
.tcl
.aos
[index
];
698 i
= context
->stream_desc
[index
].element
;
700 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
702 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
||
704 getTypeSize(input
[i
]->Type
) != 4 ||
708 r700ConvertAttrib(ctx
, count
, input
[i
], &context
->stream_desc
[index
]);
712 if (input
[i
]->BufferObj
->Name
)
716 assert(((intptr_t) input
[i
]->Ptr
) % input
[i
]->StrideB
== 0);
717 r700AlignDataToDword(ctx
, input
[i
], count
, &context
->stream_desc
[index
]);
718 context
->stream_desc
[index
].is_named_bo
= GL_FALSE
;
722 context
->stream_desc
[index
].stride
= input
[i
]->StrideB
;
723 context
->stream_desc
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
724 context
->stream_desc
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
725 context
->stream_desc
[index
].is_named_bo
= GL_TRUE
;
731 int local_count
= count
;
734 if (input
[i
]->StrideB
== 0)
736 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
741 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
744 radeonAllocDmaRegion(&context
->radeon
, &context
->stream_desc
[index
].bo
,
745 &context
->stream_desc
[index
].bo_offset
, size
, 32);
746 assert(context
->stream_desc
[index
].bo
->ptr
!= NULL
);
747 dst
= (uint32_t *)ADD_POINTERS(context
->stream_desc
[index
].bo
->ptr
,
748 context
->stream_desc
[index
].bo_offset
);
750 switch (context
->stream_desc
[index
].dwords
)
753 radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
754 context
->stream_desc
[index
].stride
= 4;
757 radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
758 context
->stream_desc
[index
].stride
= 8;
761 radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
762 context
->stream_desc
[index
].stride
= 12;
765 radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
766 context
->stream_desc
[index
].stride
= 16;
775 aos
->count
= context
->stream_desc
[index
].stride
== 0 ? 1 : count
;
776 aos
->stride
= context
->stream_desc
[index
].stride
/ sizeof(float);
777 aos
->components
= context
->stream_desc
[index
].dwords
;
778 aos
->bo
= context
->stream_desc
[index
].bo
;
779 aos
->offset
= context
->stream_desc
[index
].bo_offset
;
781 if(context
->stream_desc
[index
].is_named_bo
)
783 radeon_cs_space_add_persistent_bo(context
->radeon
.cmdbuf
.cs
,
784 context
->stream_desc
[index
].bo
,
785 RADEON_GEM_DOMAIN_GTT
, 0);
789 context
->radeon
.tcl
.aos_count
= context
->nNumActiveAos
;
790 ret
= radeon_cs_space_check_with_bo(context
->radeon
.cmdbuf
.cs
,
791 first_elem(&context
->radeon
.dma
.reserved
)->bo
,
792 RADEON_GEM_DOMAIN_GTT
, 0);
795 static void r700FreeData(GLcontext
*ctx
)
797 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
798 * to prevent double unref in radeonReleaseArrays
799 * called during context destroy
801 context_t
*context
= R700_CONTEXT(ctx
);
805 for (i
= 0; i
< context
->nNumActiveAos
; i
++)
807 if (!context
->stream_desc
[i
].is_named_bo
)
809 radeon_bo_unref(context
->stream_desc
[i
].bo
);
811 context
->radeon
.tcl
.aos
[i
].bo
= NULL
;
814 if (context
->ind_buf
.bo
!= NULL
)
816 radeon_bo_unref(context
->ind_buf
.bo
);
820 static void r700FixupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
822 context_t
*context
= R700_CONTEXT(ctx
);
826 GLboolean mapped_named_bo
= GL_FALSE
;
828 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
830 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
831 mapped_named_bo
= GL_TRUE
;
832 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
834 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
836 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
)
838 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
839 GLubyte
*in
= (GLubyte
*)src_ptr
;
841 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
842 &context
->ind_buf
.bo_offset
, size
, 4);
844 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
845 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
847 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
849 *out
++ = in
[i
] | in
[i
+ 1] << 16;
852 if (i
< mesa_ind_buf
->count
)
860 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
861 GLushort
*in
= (GLushort
*)src_ptr
;
862 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
864 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
865 &context
->ind_buf
.bo_offset
, size
, 4);
867 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
868 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
870 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
872 *out
++ = in
[i
] | in
[i
+ 1] << 16;
875 if (i
< mesa_ind_buf
->count
)
882 context
->ind_buf
.is_32bit
= GL_FALSE
;
883 context
->ind_buf
.count
= mesa_ind_buf
->count
;
887 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
891 static void r700SetupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
893 context_t
*context
= R700_CONTEXT(ctx
);
896 context
->ind_buf
.bo
= NULL
;
901 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
)
904 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
)
907 const GLvoid
*src_ptr
;
909 GLboolean mapped_named_bo
= GL_FALSE
;
911 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
913 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
914 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
915 mapped_named_bo
= GL_TRUE
;
918 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
920 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
922 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
923 &context
->ind_buf
.bo_offset
, size
, 4);
924 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
925 dst_ptr
= ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
927 _mesa_memcpy(dst_ptr
, src_ptr
, size
);
929 context
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
930 context
->ind_buf
.count
= mesa_ind_buf
->count
;
934 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
939 r700FixupIndexBuffer(ctx
, mesa_ind_buf
);
943 static GLboolean
r700TryDrawPrims(GLcontext
*ctx
,
944 const struct gl_client_array
*arrays
[],
945 const struct _mesa_prim
*prim
,
947 const struct _mesa_index_buffer
*ib
,
951 context_t
*context
= R700_CONTEXT(ctx
);
952 radeonContextPtr radeon
= &context
->radeon
;
954 struct radeon_renderbuffer
*rrb
;
958 _mesa_update_state( ctx
);
961 _tnl_UpdateFixedFunctionProgram(ctx
);
962 r700SetVertexFormat(ctx
, arrays
, max_index
+ 1);
963 /* shaders need to be updated before buffers are validated */
964 r700UpdateShaders2(ctx
);
965 if (!r600ValidateBuffers(ctx
))
968 /* always emit CB base to prevent
969 * lock ups on some chips.
971 R600_STATECHANGE(context
, cb_target
);
972 /* mark vtx as dirty since it changes per-draw */
973 R600_STATECHANGE(context
, vtx
);
975 r700SetScissor(context
);
976 r700SetupVertexProgram(ctx
);
977 r700SetupFragmentProgram(ctx
);
978 r600UpdateTextureState(ctx
);
980 GLuint emit_end
= r700PredictRenderSize(ctx
, nr_prims
)
981 + context
->radeon
.cmdbuf
.cs
->cdw
;
983 r700SetupIndexBuffer(ctx
, ib
);
984 r700SetupStreams2(ctx
, arrays
, max_index
+ 1);
986 radeonEmitState(radeon
);
988 radeon_debug_add_indent();
989 for (i
= 0; i
< nr_prims
; ++i
)
991 r700RunRenderPrimitive(ctx
,
993 prim
[i
].start
+ prim
[i
].count
,
996 radeon_debug_remove_indent();
998 /* Flush render op cached for last several quads. */
999 r700WaitForIdleClean(context
);
1001 rrb
= radeon_get_colorbuffer(&context
->radeon
);
1003 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
1004 CB_ACTION_ENA_bit
| (1 << (id
+ 6)));
1006 rrb
= radeon_get_depthbuffer(&context
->radeon
);
1008 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
1009 DB_ACTION_ENA_bit
| DB_DEST_BASE_ENA_bit
);
1013 if (emit_end
< context
->radeon
.cmdbuf
.cs
->cdw
)
1015 WARN_ONCE("Rendering was %d commands larger than predicted size."
1016 " We might overflow command buffer.\n", context
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
1022 static void r700DrawPrimsRe(GLcontext
*ctx
,
1023 const struct gl_client_array
*arrays
[],
1024 const struct _mesa_prim
*prim
,
1026 const struct _mesa_index_buffer
*ib
,
1027 GLboolean index_bounds_valid
,
1031 GLboolean retval
= GL_FALSE
;
1033 /* This check should get folded into just the places that
1034 * min/max index are really needed.
1036 if (!index_bounds_valid
) {
1037 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
1041 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r700DrawPrimsRe
);
1045 /* Make an attempt at drawing */
1046 retval
= r700TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
1048 /* If failed run tnl pipeline - it should take care of fallbacks */
1050 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
1053 static void r700DrawPrims(GLcontext
*ctx
,
1054 const struct gl_client_array
*arrays
[],
1055 const struct _mesa_prim
*prim
,
1057 const struct _mesa_index_buffer
*ib
,
1058 GLboolean index_bounds_valid
,
1062 context_t
*context
= R700_CONTEXT(ctx
);
1064 /* For non indexed drawing, using tnl pipe. */
1067 context
->ind_buf
.bo
= NULL
;
1069 _tnl_vbo_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
,
1070 index_bounds_valid
, min_index
, max_index
);
1074 r700DrawPrimsRe(ctx
, arrays
, prim
, nr_prims
, ib
, index_bounds_valid
, min_index
, max_index
);
1077 void r700InitDraw(GLcontext
*ctx
)
1079 struct vbo_context
*vbo
= vbo_context(ctx
);
1082 vbo
->draw_prims
= r700DrawPrims
;