2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "vbo/vbo_context.h"
47 #include "r600_context.h"
48 #include "r600_cmdbuf.h"
52 #include "r700_vertprog.h"
53 #include "r700_fragprog.h"
54 #include "r700_state.h"
56 #include "radeon_buffer_objects.h"
57 #include "radeon_common_context.h"
59 void r700WaitForIdle(context_t
*context
);
60 void r700WaitForIdleClean(context_t
*context
);
61 static unsigned int r700PrimitiveType(int prim
);
62 GLboolean
r700SyncSurf(context_t
*context
,
63 struct radeon_bo
*pbo
,
65 uint32_t write_domain
,
68 void r700WaitForIdle(context_t
*context
)
70 BATCH_LOCALS(&context
->radeon
);
71 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
72 BEGIN_BATCH_NO_AUTOSTATE(3);
74 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
75 R600_OUT_BATCH(mmWAIT_UNTIL
- ASIC_CONFIG_BASE_INDEX
);
76 R600_OUT_BATCH(WAIT_3D_IDLE_bit
);
82 void r700WaitForIdleClean(context_t
*context
)
84 BATCH_LOCALS(&context
->radeon
);
85 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
86 BEGIN_BATCH_NO_AUTOSTATE(5);
88 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE
, 0));
89 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT
);
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
92 R600_OUT_BATCH(mmWAIT_UNTIL
- ASIC_CONFIG_BASE_INDEX
);
93 R600_OUT_BATCH(WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
99 void r700Start3D(context_t
*context
)
101 BATCH_LOCALS(&context
->radeon
);
102 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
103 if (context
->radeon
.radeonScreen
->chip_family
< CHIP_FAMILY_RV770
)
105 BEGIN_BATCH_NO_AUTOSTATE(2);
106 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF
, 0));
111 BEGIN_BATCH_NO_AUTOSTATE(3);
112 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL
, 1));
113 R600_OUT_BATCH(0x80000000);
114 R600_OUT_BATCH(0x80000000);
120 GLboolean
r700SyncSurf(context_t
*context
,
121 struct radeon_bo
*pbo
,
122 uint32_t read_domain
,
123 uint32_t write_domain
,
126 BATCH_LOCALS(&context
->radeon
);
127 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
128 uint32_t cp_coher_size
;
133 if (pbo
->size
== 0xffffffff)
134 cp_coher_size
= 0xffffffff;
136 cp_coher_size
= ((pbo
->size
+ 255) >> 8);
138 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
139 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC
, 3));
140 R600_OUT_BATCH(sync_type
);
141 R600_OUT_BATCH(cp_coher_size
);
144 R600_OUT_BATCH_RELOC(0,
147 read_domain
, write_domain
, 0);
154 static unsigned int r700PrimitiveType(int prim
)
156 switch (prim
& PRIM_MODE_MASK
)
159 return DI_PT_POINTLIST
;
162 return DI_PT_LINELIST
;
165 return DI_PT_LINESTRIP
;
168 return DI_PT_LINELOOP
;
171 return DI_PT_TRILIST
;
173 case GL_TRIANGLE_STRIP
:
174 return DI_PT_TRISTRIP
;
176 case GL_TRIANGLE_FAN
:
180 return DI_PT_QUADLIST
;
183 return DI_PT_QUADSTRIP
;
186 return DI_PT_POLYGON
;
195 static int r700NumVerts(int num_verts
, int prim
)
199 switch (prim
& PRIM_MODE_MASK
) {
204 verts_off
= num_verts
% 2;
208 verts_off
= num_verts
;
212 verts_off
= num_verts
;
215 verts_off
= num_verts
% 3;
217 case GL_TRIANGLE_STRIP
:
219 verts_off
= num_verts
;
221 case GL_TRIANGLE_FAN
:
223 verts_off
= num_verts
;
226 verts_off
= num_verts
% 4;
230 verts_off
= num_verts
;
232 verts_off
= num_verts
% 2;
236 verts_off
= num_verts
;
244 return num_verts
- verts_off
;
247 static void r700RunRenderPrimitive(struct gl_context
* ctx
, int start
, int end
,
248 int prim
, GLint basevertex
)
250 context_t
*context
= R700_CONTEXT(ctx
);
251 BATCH_LOCALS(&context
->radeon
);
252 int type
, total_emit
;
254 uint32_t vgt_draw_initiator
= 0;
255 uint32_t vgt_index_type
= 0;
256 uint32_t vgt_primitive_type
= 0;
257 uint32_t vgt_num_indices
= 0;
259 type
= r700PrimitiveType(prim
);
260 num_indices
= r700NumVerts(end
- start
, prim
);
262 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
263 "%s type %x num_indices %d\n",
264 __func__
, type
, num_indices
);
266 if (type
< 0 || num_indices
<= 0)
269 SETfield(vgt_primitive_type
, type
,
270 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
272 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
274 if(GL_TRUE
!= context
->ind_buf
.is_32bit
)
276 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
279 /* 16-bit indexes are packed in a 32-bit value */
280 SETfield(vgt_index_type
,
286 SWAP_MODE_shift
, SWAP_MODE_mask
);
289 vgt_num_indices
= num_indices
;
290 SETfield(vgt_draw_initiator
, DI_SRC_SEL_DMA
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
291 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
293 total_emit
= 3 /* VGT_PRIMITIVE_TYPE */
294 + 2 /* VGT_INDEX_TYPE */
295 + 2 /* NUM_INSTANCES */
296 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
297 + 5 + 2; /* DRAW_INDEX */
299 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
301 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE
, 1);
302 R600_OUT_BATCH(vgt_primitive_type
);
304 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
305 R600_OUT_BATCH(vgt_index_type
);
307 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
310 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST
, 2));
311 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC
- ASIC_CTL_CONST_BASE_INDEX
);
312 R600_OUT_BATCH(basevertex
); //VTX_BASE_VTX_LOC
313 R600_OUT_BATCH(0); //VTX_START_INST_LOC
315 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX
, 3));
316 R600_OUT_BATCH(context
->ind_buf
.bo_offset
);
318 R600_OUT_BATCH(vgt_num_indices
);
319 R600_OUT_BATCH(vgt_draw_initiator
);
320 R600_OUT_BATCH_RELOC(context
->ind_buf
.bo_offset
,
322 context
->ind_buf
.bo_offset
,
323 RADEON_GEM_DOMAIN_GTT
, 0, 0);
328 static void r700RunRenderPrimitiveImmediate(struct gl_context
* ctx
, int start
, int end
, int prim
)
330 context_t
*context
= R700_CONTEXT(ctx
);
331 BATCH_LOCALS(&context
->radeon
);
333 uint32_t num_indices
, total_emit
= 0;
334 uint32_t vgt_draw_initiator
= 0;
335 uint32_t vgt_index_type
= 0;
336 uint32_t vgt_primitive_type
= 0;
337 uint32_t vgt_num_indices
= 0;
339 type
= r700PrimitiveType(prim
);
340 num_indices
= r700NumVerts(end
- start
, prim
);
342 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
343 "%s type %x num_indices %d\n",
344 __func__
, type
, num_indices
);
346 if (type
< 0 || num_indices
<= 0)
349 SETfield(vgt_primitive_type
, type
,
350 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
352 if (num_indices
> 0xffff)
354 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
358 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
361 /* 16-bit indexes are packed in a 32-bit value */
362 SETfield(vgt_index_type
,
368 SWAP_MODE_shift
, SWAP_MODE_mask
);
370 vgt_num_indices
= num_indices
;
371 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
375 SETfield(vgt_draw_initiator
, DI_SRC_SEL_AUTO_INDEX
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
379 if (num_indices
> 0xffff)
381 total_emit
+= num_indices
;
385 total_emit
+= (num_indices
+ 1) / 2;
387 SETfield(vgt_draw_initiator
, DI_SRC_SEL_IMMEDIATE
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
390 total_emit
+= 3 /* VGT_PRIMITIVE_TYPE */
391 + 2 /* VGT_INDEX_TYPE */
392 + 2 /* NUM_INSTANCES */
393 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
396 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
398 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE
, 1);
399 R600_OUT_BATCH(vgt_primitive_type
);
401 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
402 R600_OUT_BATCH(vgt_index_type
);
404 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
407 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST
, 2));
408 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC
- ASIC_CTL_CONST_BASE_INDEX
);
409 R600_OUT_BATCH(0); //VTX_BASE_VTX_LOC
410 R600_OUT_BATCH(0); //VTX_START_INST_LOC
414 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO
, 1));
415 R600_OUT_BATCH(vgt_num_indices
);
416 R600_OUT_BATCH(vgt_draw_initiator
);
420 if (num_indices
> 0xffff)
422 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (num_indices
+ 1)));
423 R600_OUT_BATCH(vgt_num_indices
);
424 R600_OUT_BATCH(vgt_draw_initiator
);
425 for (i
= start
; i
< (start
+ num_indices
); i
++)
432 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (((num_indices
+ 1) / 2) + 1)));
433 R600_OUT_BATCH(vgt_num_indices
);
434 R600_OUT_BATCH(vgt_draw_initiator
);
435 for (i
= start
; i
< (start
+ num_indices
); i
+= 2)
437 if ((i
+ 1) == (start
+ num_indices
))
443 R600_OUT_BATCH(((i
+ 1) << 16) | (i
));
453 /* start 3d, idle, cb/db flush */
454 #define PRE_EMIT_STATE_BUFSZ 5 + 5 + 14
456 static GLuint
r700PredictRenderSize(struct gl_context
* ctx
,
457 const struct _mesa_prim
*prim
,
458 const struct _mesa_index_buffer
*ib
,
461 context_t
*context
= R700_CONTEXT(ctx
);
466 dwords
= PRE_EMIT_STATE_BUFSZ
;
468 dwords
+= nr_prims
* 18;
470 for (i
= 0; i
< nr_prims
; ++i
)
472 if (prim
[i
].start
== 0)
474 else if (prim
[i
].count
> 0xffff)
475 dwords
+= prim
[i
].count
+ 14;
477 dwords
+= ((prim
[i
].count
+ 1) / 2) + 14;
481 state_size
= radeonCountStateEmitSize(&context
->radeon
);
482 flushed
= rcommonEnsureCmdBufSpace(&context
->radeon
,
486 dwords
+= radeonCountStateEmitSize(&context
->radeon
);
488 dwords
+= state_size
;
490 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
495 #define CONVERT( TYPE, MACRO ) do { \
498 if (input->Normalized) { \
499 for (i = 0; i < count; i++) { \
500 const TYPE *in = (TYPE *)src_ptr; \
501 for (j = 0; j < sz; j++) { \
502 *dst_ptr++ = MACRO(*in); \
508 for (i = 0; i < count; i++) { \
509 const TYPE *in = (TYPE *)src_ptr; \
510 for (j = 0; j < sz; j++) { \
511 *dst_ptr++ = (GLfloat)(*in); \
520 * Convert attribute data type to float
521 * If the attribute uses named buffer object replace the bo with newly allocated bo
523 static void r700ConvertAttrib(struct gl_context
*ctx
, int count
,
524 const struct gl_client_array
*input
,
525 struct StreamDesc
*attr
)
527 context_t
*context
= R700_CONTEXT(ctx
);
528 const GLvoid
*src_ptr
;
529 GLboolean mapped_named_bo
= GL_FALSE
;
533 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
535 /* Convert value for first element only */
536 if (input
->StrideB
== 0)
541 if (input
->BufferObj
->Name
)
543 if (!input
->BufferObj
->Pointer
)
545 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
546 mapped_named_bo
= GL_TRUE
;
549 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
553 src_ptr
= input
->Ptr
;
556 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
,
557 sizeof(GLfloat
) * input
->Size
* count
, 32);
559 radeon_bo_map(attr
->bo
, 1);
561 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
563 assert(src_ptr
!= NULL
);
568 CONVERT(GLdouble
, (GLfloat
));
570 case GL_UNSIGNED_INT
:
571 CONVERT(GLuint
, UINT_TO_FLOAT
);
574 CONVERT(GLint
, INT_TO_FLOAT
);
576 case GL_UNSIGNED_SHORT
:
577 CONVERT(GLushort
, USHORT_TO_FLOAT
);
580 CONVERT(GLshort
, SHORT_TO_FLOAT
);
582 case GL_UNSIGNED_BYTE
:
583 assert(input
->Format
!= GL_BGRA
);
584 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
587 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
594 radeon_bo_unmap(attr
->bo
);
598 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
603 static void r700AlignDataToDword(struct gl_context
*ctx
,
604 const struct gl_client_array
*input
,
606 struct StreamDesc
*attr
)
608 context_t
*context
= R700_CONTEXT(ctx
);
609 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
610 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
611 GLboolean mapped_named_bo
= GL_FALSE
;
613 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
615 radeon_bo_map(attr
->bo
, 1);
617 if (!input
->BufferObj
->Pointer
)
619 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
620 mapped_named_bo
= GL_TRUE
;
624 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
625 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
628 for (i
= 0; i
< count
; ++i
)
630 memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
631 src_ptr
+= input
->StrideB
;
632 dst_ptr
+= dst_stride
;
636 radeon_bo_unmap(attr
->bo
);
639 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
642 attr
->stride
= dst_stride
;
646 static void r700SetupStreams(struct gl_context
*ctx
, const struct gl_client_array
*input
[], int count
)
648 context_t
*context
= R700_CONTEXT(ctx
);
653 R600_STATECHANGE(context
, vtx
);
655 for(index
= 0; index
< context
->nNumActiveAos
; index
++)
657 struct radeon_aos
*aos
= &context
->radeon
.tcl
.aos
[index
];
658 i
= context
->stream_desc
[index
].element
;
660 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
662 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
664 || getTypeSize(input
[i
]->Type
) != 4
669 r700ConvertAttrib(ctx
, count
, input
[i
], &context
->stream_desc
[index
]);
673 if (input
[i
]->BufferObj
->Name
)
675 context
->stream_desc
[index
].stride
= input
[i
]->StrideB
;
676 context
->stream_desc
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
677 context
->stream_desc
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
678 context
->stream_desc
[index
].is_named_bo
= GL_TRUE
;
683 int local_count
= count
;
686 if (input
[i
]->StrideB
== 0)
688 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
693 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
696 radeonAllocDmaRegion(&context
->radeon
, &context
->stream_desc
[index
].bo
,
697 &context
->stream_desc
[index
].bo_offset
, size
, 32);
699 radeon_bo_map(context
->stream_desc
[index
].bo
, 1);
700 assert(context
->stream_desc
[index
].bo
->ptr
!= NULL
);
703 dst
= (uint32_t *)ADD_POINTERS(context
->stream_desc
[index
].bo
->ptr
,
704 context
->stream_desc
[index
].bo_offset
);
706 switch (context
->stream_desc
[index
].dwords
)
709 radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
712 radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
715 radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
718 radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
724 radeon_bo_unmap(context
->stream_desc
[index
].bo
);
728 aos
->count
= context
->stream_desc
[index
].stride
== 0 ? 1 : count
;
729 aos
->stride
= context
->stream_desc
[index
].stride
/ sizeof(float);
730 aos
->components
= context
->stream_desc
[index
].dwords
;
731 aos
->bo
= context
->stream_desc
[index
].bo
;
732 aos
->offset
= context
->stream_desc
[index
].bo_offset
;
734 if(context
->stream_desc
[index
].is_named_bo
)
736 radeon_cs_space_add_persistent_bo(context
->radeon
.cmdbuf
.cs
,
737 context
->stream_desc
[index
].bo
,
738 RADEON_GEM_DOMAIN_GTT
, 0);
742 ret
= radeon_cs_space_check_with_bo(context
->radeon
.cmdbuf
.cs
,
743 first_elem(&context
->radeon
.dma
.reserved
)->bo
,
744 RADEON_GEM_DOMAIN_GTT
, 0);
747 static void r700FreeData(struct gl_context
*ctx
)
749 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
750 * to prevent double unref in radeonReleaseArrays
751 * called during context destroy
753 context_t
*context
= R700_CONTEXT(ctx
);
757 for (i
= 0; i
< context
->nNumActiveAos
; i
++)
759 if (!context
->stream_desc
[i
].is_named_bo
)
761 radeon_bo_unref(context
->stream_desc
[i
].bo
);
763 context
->radeon
.tcl
.aos
[i
].bo
= NULL
;
766 if (context
->ind_buf
.bo
!= NULL
)
768 radeon_bo_unref(context
->ind_buf
.bo
);
772 static void r700FixupIndexBuffer(struct gl_context
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
774 context_t
*context
= R700_CONTEXT(ctx
);
778 GLboolean mapped_named_bo
= GL_FALSE
;
780 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
782 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
783 mapped_named_bo
= GL_TRUE
;
784 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
786 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
788 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
)
790 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
791 GLubyte
*in
= (GLubyte
*)src_ptr
;
793 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
794 &context
->ind_buf
.bo_offset
, size
, 4);
796 radeon_bo_map(context
->ind_buf
.bo
, 1);
797 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
798 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
800 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
802 *out
++ = in
[i
] | in
[i
+ 1] << 16;
805 if (i
< mesa_ind_buf
->count
)
810 radeon_bo_unmap(context
->ind_buf
.bo
);
814 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
815 GLushort
*in
= (GLushort
*)src_ptr
;
816 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
818 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
819 &context
->ind_buf
.bo_offset
, size
, 4);
821 radeon_bo_map(context
->ind_buf
.bo
, 1);
822 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
823 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
825 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
827 *out
++ = in
[i
] | in
[i
+ 1] << 16;
830 if (i
< mesa_ind_buf
->count
)
834 radeon_bo_unmap(context
->ind_buf
.bo
);
838 context
->ind_buf
.is_32bit
= GL_FALSE
;
839 context
->ind_buf
.count
= mesa_ind_buf
->count
;
843 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
847 static void r700SetupIndexBuffer(struct gl_context
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
849 context_t
*context
= R700_CONTEXT(ctx
);
852 context
->ind_buf
.bo
= NULL
;
857 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
)
859 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
)
862 const GLvoid
*src_ptr
;
864 GLboolean mapped_named_bo
= GL_FALSE
;
866 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
868 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
869 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
870 mapped_named_bo
= GL_TRUE
;
873 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
875 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
877 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
878 &context
->ind_buf
.bo_offset
, size
, 4);
879 radeon_bo_map(context
->ind_buf
.bo
, 1);
880 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
881 dst_ptr
= ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
883 memcpy(dst_ptr
, src_ptr
, size
);
885 radeon_bo_unmap(context
->ind_buf
.bo
);
886 context
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
887 context
->ind_buf
.count
= mesa_ind_buf
->count
;
891 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
896 r700FixupIndexBuffer(ctx
, mesa_ind_buf
);
900 static GLboolean
check_fallbacks(struct gl_context
*ctx
)
902 if (ctx
->RenderMode
!= GL_RENDER
)
908 static GLboolean
r700TryDrawPrims(struct gl_context
*ctx
,
909 const struct gl_client_array
*arrays
[],
910 const struct _mesa_prim
*prim
,
912 const struct _mesa_index_buffer
*ib
,
916 context_t
*context
= R700_CONTEXT(ctx
);
917 radeonContextPtr radeon
= &context
->radeon
;
919 struct radeon_renderbuffer
*rrb
;
922 _mesa_update_state( ctx
);
924 if (check_fallbacks(ctx
))
927 _tnl_UpdateFixedFunctionProgram(ctx
);
928 r700SetVertexFormat(ctx
, arrays
, max_index
+ 1);
929 /* shaders need to be updated before buffers are validated */
930 r700UpdateShaders(ctx
);
931 if (!r600ValidateBuffers(ctx
))
934 /* always emit CB base to prevent
935 * lock ups on some chips.
937 R600_STATECHANGE(context
, cb_target
);
938 /* mark vtx as dirty since it changes per-draw */
939 R600_STATECHANGE(context
, vtx
);
941 r700SetScissor(context
);
942 r700SetupVertexProgram(ctx
);
943 r700SetupFragmentProgram(ctx
);
944 r700UpdateShaderStates(ctx
);
946 GLuint emit_end
= r700PredictRenderSize(ctx
, prim
, ib
, nr_prims
)
947 + context
->radeon
.cmdbuf
.cs
->cdw
;
949 r700SetupIndexBuffer(ctx
, ib
);
950 r700SetupStreams(ctx
, arrays
, max_index
+ 1);
952 radeonEmitState(radeon
);
954 radeon_debug_add_indent();
955 for (i
= 0; i
< nr_prims
; ++i
)
957 if (context
->ind_buf
.bo
)
958 r700RunRenderPrimitive(ctx
,
960 prim
[i
].start
+ prim
[i
].count
,
964 r700RunRenderPrimitiveImmediate(ctx
,
966 prim
[i
].start
+ prim
[i
].count
,
969 radeon_debug_remove_indent();
971 /* Flush render op cached for last several quads. */
972 /* XXX drm should handle this in fence submit */
973 r700WaitForIdleClean(context
);
975 rrb
= radeon_get_colorbuffer(&context
->radeon
);
977 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
978 CB_ACTION_ENA_bit
| (1 << (id
+ 6)));
980 rrb
= radeon_get_depthbuffer(&context
->radeon
);
982 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
983 DB_ACTION_ENA_bit
| DB_DEST_BASE_ENA_bit
);
987 if (emit_end
< context
->radeon
.cmdbuf
.cs
->cdw
)
989 WARN_ONCE("Rendering was %d commands larger than predicted size."
990 " We might overflow command buffer.\n", context
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
996 static void r700DrawPrims(struct gl_context
*ctx
,
997 const struct gl_client_array
*arrays
[],
998 const struct _mesa_prim
*prim
,
1000 const struct _mesa_index_buffer
*ib
,
1001 GLboolean index_bounds_valid
,
1005 GLboolean retval
= GL_FALSE
;
1007 context_t
*context
= R700_CONTEXT(ctx
);
1008 radeonContextPtr radeon
= &context
->radeon
;
1009 radeon_prepare_render(radeon
);
1011 /* This check should get folded into just the places that
1012 * min/max index are really needed.
1015 if (!vbo_all_varyings_in_vbos(arrays
)) {
1016 if (!index_bounds_valid
)
1017 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
1018 /* do we want to rebase, minimizes the
1019 * amount of data to upload? */
1021 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r700DrawPrims
);
1025 /* Make an attempt at drawing */
1026 retval
= r700TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
1028 /* If failed run tnl pipeline - it should take care of fallbacks */
1030 _swsetup_Wakeup(ctx
);
1031 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
1035 void r700InitDraw(struct gl_context
*ctx
)
1037 struct vbo_context
*vbo
= vbo_context(ctx
);
1040 vbo
->draw_prims
= r700DrawPrims
;