2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "tnl/t_pipeline.h"
46 #include "vbo/vbo_context.h"
48 #include "r600_context.h"
49 #include "r600_cmdbuf.h"
53 #include "r700_vertprog.h"
54 #include "r700_fragprog.h"
55 #include "r700_state.h"
57 #include "radeon_buffer_objects.h"
58 #include "radeon_common_context.h"
60 void r700WaitForIdle(context_t
*context
);
61 void r700WaitForIdleClean(context_t
*context
);
62 static unsigned int r700PrimitiveType(int prim
);
63 GLboolean
r700SyncSurf(context_t
*context
,
64 struct radeon_bo
*pbo
,
66 uint32_t write_domain
,
69 void r700WaitForIdle(context_t
*context
)
71 BATCH_LOCALS(&context
->radeon
);
72 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
73 BEGIN_BATCH_NO_AUTOSTATE(3);
75 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
76 R600_OUT_BATCH(mmWAIT_UNTIL
- ASIC_CONFIG_BASE_INDEX
);
77 R600_OUT_BATCH(WAIT_3D_IDLE_bit
);
83 void r700WaitForIdleClean(context_t
*context
)
85 BATCH_LOCALS(&context
->radeon
);
86 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
87 BEGIN_BATCH_NO_AUTOSTATE(5);
89 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE
, 0));
90 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT
);
92 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
93 R600_OUT_BATCH(mmWAIT_UNTIL
- ASIC_CONFIG_BASE_INDEX
);
94 R600_OUT_BATCH(WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
100 void r700Start3D(context_t
*context
)
102 BATCH_LOCALS(&context
->radeon
);
103 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
104 if (context
->radeon
.radeonScreen
->chip_family
< CHIP_FAMILY_RV770
)
106 BEGIN_BATCH_NO_AUTOSTATE(2);
107 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF
, 0));
112 BEGIN_BATCH_NO_AUTOSTATE(3);
113 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL
, 1));
114 R600_OUT_BATCH(0x80000000);
115 R600_OUT_BATCH(0x80000000);
120 r700WaitForIdleClean(context
);
123 GLboolean
r700SyncSurf(context_t
*context
,
124 struct radeon_bo
*pbo
,
125 uint32_t read_domain
,
126 uint32_t write_domain
,
129 BATCH_LOCALS(&context
->radeon
);
130 radeon_print(RADEON_RENDER
| RADEON_STATE
, RADEON_TRACE
, "%s\n", __func__
);
131 uint32_t cp_coher_size
;
136 if (pbo
->size
== 0xffffffff)
137 cp_coher_size
= 0xffffffff;
139 cp_coher_size
= ((pbo
->size
+ 255) >> 8);
141 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
142 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC
, 3));
143 R600_OUT_BATCH(sync_type
);
144 R600_OUT_BATCH(cp_coher_size
);
147 R600_OUT_BATCH_RELOC(0,
150 read_domain
, write_domain
, 0);
157 static unsigned int r700PrimitiveType(int prim
)
159 switch (prim
& PRIM_MODE_MASK
)
162 return DI_PT_POINTLIST
;
165 return DI_PT_LINELIST
;
168 return DI_PT_LINESTRIP
;
171 return DI_PT_LINELOOP
;
174 return DI_PT_TRILIST
;
176 case GL_TRIANGLE_STRIP
:
177 return DI_PT_TRISTRIP
;
179 case GL_TRIANGLE_FAN
:
183 return DI_PT_QUADLIST
;
186 return DI_PT_QUADSTRIP
;
189 return DI_PT_POLYGON
;
198 static int r700NumVerts(int num_verts
, int prim
)
202 switch (prim
& PRIM_MODE_MASK
) {
207 verts_off
= num_verts
% 2;
211 verts_off
= num_verts
;
215 verts_off
= num_verts
;
218 verts_off
= num_verts
% 3;
220 case GL_TRIANGLE_STRIP
:
222 verts_off
= num_verts
;
224 case GL_TRIANGLE_FAN
:
226 verts_off
= num_verts
;
229 verts_off
= num_verts
% 4;
233 verts_off
= num_verts
;
235 verts_off
= num_verts
% 2;
239 verts_off
= num_verts
;
247 return num_verts
- verts_off
;
250 static void r700RunRenderPrimitive(GLcontext
* ctx
, int start
, int end
, int prim
)
252 context_t
*context
= R700_CONTEXT(ctx
);
253 BATCH_LOCALS(&context
->radeon
);
254 int type
, total_emit
;
256 uint32_t vgt_draw_initiator
= 0;
257 uint32_t vgt_index_type
= 0;
258 uint32_t vgt_primitive_type
= 0;
259 uint32_t vgt_num_indices
= 0;
261 type
= r700PrimitiveType(prim
);
262 num_indices
= r700NumVerts(end
- start
, prim
);
264 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
265 "%s type %x num_indices %d\n",
266 __func__
, type
, num_indices
);
268 if (type
< 0 || num_indices
<= 0)
271 SETfield(vgt_primitive_type
, type
,
272 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
274 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
276 if(GL_TRUE
!= context
->ind_buf
.is_32bit
)
278 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
281 vgt_num_indices
= num_indices
;
282 SETfield(vgt_draw_initiator
, DI_SRC_SEL_DMA
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
283 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
285 total_emit
= 3 /* VGT_PRIMITIVE_TYPE */
286 + 2 /* VGT_INDEX_TYPE */
287 + 2 /* NUM_INSTANCES */
288 + 5 + 2; /* DRAW_INDEX */
290 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
292 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE
, 1);
293 R600_OUT_BATCH(vgt_primitive_type
);
295 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
296 R600_OUT_BATCH(vgt_index_type
);
298 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
301 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX
, 3));
302 R600_OUT_BATCH(context
->ind_buf
.bo_offset
);
304 R600_OUT_BATCH(vgt_num_indices
);
305 R600_OUT_BATCH(vgt_draw_initiator
);
306 R600_OUT_BATCH_RELOC(context
->ind_buf
.bo_offset
,
308 context
->ind_buf
.bo_offset
,
309 RADEON_GEM_DOMAIN_GTT
, 0, 0);
314 static void r700RunRenderPrimitiveImmediate(GLcontext
* ctx
, int start
, int end
, int prim
)
316 context_t
*context
= R700_CONTEXT(ctx
);
317 BATCH_LOCALS(&context
->radeon
);
319 uint32_t num_indices
, total_emit
= 0;
320 uint32_t vgt_draw_initiator
= 0;
321 uint32_t vgt_index_type
= 0;
322 uint32_t vgt_primitive_type
= 0;
323 uint32_t vgt_num_indices
= 0;
325 type
= r700PrimitiveType(prim
);
326 num_indices
= r700NumVerts(end
- start
, prim
);
328 radeon_print(RADEON_RENDER
, RADEON_TRACE
,
329 "%s type %x num_indices %d\n",
330 __func__
, type
, num_indices
);
332 if (type
< 0 || num_indices
<= 0)
335 SETfield(vgt_primitive_type
, type
,
336 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift
, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask
);
338 if (num_indices
> 0xffff)
340 SETfield(vgt_index_type
, DI_INDEX_SIZE_32_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
344 SETfield(vgt_index_type
, DI_INDEX_SIZE_16_BIT
, INDEX_TYPE_shift
, INDEX_TYPE_mask
);
347 vgt_num_indices
= num_indices
;
348 SETfield(vgt_draw_initiator
, DI_MAJOR_MODE_0
, MAJOR_MODE_shift
, MAJOR_MODE_mask
);
352 SETfield(vgt_draw_initiator
, DI_SRC_SEL_AUTO_INDEX
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
356 if (num_indices
> 0xffff)
358 total_emit
+= num_indices
;
362 total_emit
+= (num_indices
+ 1) / 2;
364 SETfield(vgt_draw_initiator
, DI_SRC_SEL_IMMEDIATE
, SOURCE_SELECT_shift
, SOURCE_SELECT_mask
);
367 total_emit
+= 3 /* VGT_PRIMITIVE_TYPE */
368 + 2 /* VGT_INDEX_TYPE */
369 + 2 /* NUM_INSTANCES */
372 BEGIN_BATCH_NO_AUTOSTATE(total_emit
);
374 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE
, 1);
375 R600_OUT_BATCH(vgt_primitive_type
);
377 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
378 R600_OUT_BATCH(vgt_index_type
);
380 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
385 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO
, 1));
386 R600_OUT_BATCH(vgt_num_indices
);
387 R600_OUT_BATCH(vgt_draw_initiator
);
391 if (num_indices
> 0xffff)
393 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (num_indices
+ 1)));
394 R600_OUT_BATCH(vgt_num_indices
);
395 R600_OUT_BATCH(vgt_draw_initiator
);
396 for (i
= start
; i
< (start
+ num_indices
); i
++)
403 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD
, (((num_indices
+ 1) / 2) + 1)));
404 R600_OUT_BATCH(vgt_num_indices
);
405 R600_OUT_BATCH(vgt_draw_initiator
);
406 for (i
= start
; i
< (start
+ num_indices
); i
+= 2)
408 if ((i
+ 1) == (start
+ num_indices
))
414 R600_OUT_BATCH(((i
+ 1) << 16) | (i
));
424 /* start 3d, idle, cb/db flush */
425 #define PRE_EMIT_STATE_BUFSZ 10 + 5 + 14
427 static GLuint
r700PredictRenderSize(GLcontext
* ctx
,
428 const struct _mesa_prim
*prim
,
429 const struct _mesa_index_buffer
*ib
,
432 context_t
*context
= R700_CONTEXT(ctx
);
437 dwords
= PRE_EMIT_STATE_BUFSZ
;
439 dwords
+= nr_prims
* 14;
441 for (i
= 0; i
< nr_prims
; ++i
)
443 if (prim
[i
].start
== 0)
445 else if (prim
[i
].count
> 0xffff)
446 dwords
+= prim
[i
].count
+ 10;
448 dwords
+= ((prim
[i
].count
+ 1) / 2) + 10;
452 state_size
= radeonCountStateEmitSize(&context
->radeon
);
453 flushed
= rcommonEnsureCmdBufSpace(&context
->radeon
,
457 dwords
+= radeonCountStateEmitSize(&context
->radeon
);
459 dwords
+= state_size
;
461 radeon_print(RADEON_RENDER
, RADEON_VERBOSE
, "%s: total prediction size is %d.\n", __FUNCTION__
, dwords
);
466 #define CONVERT( TYPE, MACRO ) do { \
469 if (input->Normalized) { \
470 for (i = 0; i < count; i++) { \
471 const TYPE *in = (TYPE *)src_ptr; \
472 for (j = 0; j < sz; j++) { \
473 *dst_ptr++ = MACRO(*in); \
479 for (i = 0; i < count; i++) { \
480 const TYPE *in = (TYPE *)src_ptr; \
481 for (j = 0; j < sz; j++) { \
482 *dst_ptr++ = (GLfloat)(*in); \
491 * Convert attribute data type to float
492 * If the attribute uses named buffer object replace the bo with newly allocated bo
494 static void r700ConvertAttrib(GLcontext
*ctx
, int count
,
495 const struct gl_client_array
*input
,
496 struct StreamDesc
*attr
)
498 context_t
*context
= R700_CONTEXT(ctx
);
499 const GLvoid
*src_ptr
;
500 GLboolean mapped_named_bo
= GL_FALSE
;
504 stride
= (input
->StrideB
== 0) ? getTypeSize(input
->Type
) * input
->Size
: input
->StrideB
;
506 /* Convert value for first element only */
507 if (input
->StrideB
== 0)
512 if (input
->BufferObj
->Name
)
514 if (!input
->BufferObj
->Pointer
)
516 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
517 mapped_named_bo
= GL_TRUE
;
520 src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
524 src_ptr
= input
->Ptr
;
527 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
,
528 sizeof(GLfloat
) * input
->Size
* count
, 32);
530 radeon_bo_map(attr
->bo
, 1);
532 dst_ptr
= (GLfloat
*)ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
534 assert(src_ptr
!= NULL
);
539 CONVERT(GLdouble
, (GLfloat
));
541 case GL_UNSIGNED_INT
:
542 CONVERT(GLuint
, UINT_TO_FLOAT
);
545 CONVERT(GLint
, INT_TO_FLOAT
);
547 case GL_UNSIGNED_SHORT
:
548 CONVERT(GLushort
, USHORT_TO_FLOAT
);
551 CONVERT(GLshort
, SHORT_TO_FLOAT
);
553 case GL_UNSIGNED_BYTE
:
554 assert(input
->Format
!= GL_BGRA
);
555 CONVERT(GLubyte
, UBYTE_TO_FLOAT
);
558 CONVERT(GLbyte
, BYTE_TO_FLOAT
);
565 radeon_bo_unmap(attr
->bo
);
569 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
573 static void r700AlignDataToDword(GLcontext
*ctx
,
574 const struct gl_client_array
*input
,
576 struct StreamDesc
*attr
)
578 context_t
*context
= R700_CONTEXT(ctx
);
579 const int dst_stride
= (input
->StrideB
+ 3) & ~3;
580 const int size
= getTypeSize(input
->Type
) * input
->Size
* count
;
581 GLboolean mapped_named_bo
= GL_FALSE
;
583 radeonAllocDmaRegion(&context
->radeon
, &attr
->bo
, &attr
->bo_offset
, size
, 32);
585 radeon_bo_map(attr
->bo
, 1);
587 if (!input
->BufferObj
->Pointer
)
589 ctx
->Driver
.MapBuffer(ctx
, GL_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, input
->BufferObj
);
590 mapped_named_bo
= GL_TRUE
;
594 GLvoid
*src_ptr
= ADD_POINTERS(input
->BufferObj
->Pointer
, input
->Ptr
);
595 GLvoid
*dst_ptr
= ADD_POINTERS(attr
->bo
->ptr
, attr
->bo_offset
);
598 for (i
= 0; i
< count
; ++i
)
600 _mesa_memcpy(dst_ptr
, src_ptr
, input
->StrideB
);
601 src_ptr
+= input
->StrideB
;
602 dst_ptr
+= dst_stride
;
606 radeon_bo_unmap(attr
->bo
);
609 ctx
->Driver
.UnmapBuffer(ctx
, GL_ARRAY_BUFFER
, input
->BufferObj
);
612 attr
->stride
= dst_stride
;
615 static void r700SetupStreams(GLcontext
*ctx
, const struct gl_client_array
*input
[], int count
)
617 context_t
*context
= R700_CONTEXT(ctx
);
622 R600_STATECHANGE(context
, vtx
);
624 for(index
= 0; index
< context
->nNumActiveAos
; index
++)
626 struct radeon_aos
*aos
= &context
->radeon
.tcl
.aos
[index
];
627 i
= context
->stream_desc
[index
].element
;
629 stride
= (input
[i
]->StrideB
== 0) ? getTypeSize(input
[i
]->Type
) * input
[i
]->Size
: input
[i
]->StrideB
;
631 if (input
[i
]->Type
== GL_DOUBLE
|| input
[i
]->Type
== GL_UNSIGNED_INT
|| input
[i
]->Type
== GL_INT
||
633 getTypeSize(input
[i
]->Type
) != 4 ||
637 r700ConvertAttrib(ctx
, count
, input
[i
], &context
->stream_desc
[index
]);
641 if (input
[i
]->BufferObj
->Name
)
645 assert(((intptr_t) input
[i
]->Ptr
) % input
[i
]->StrideB
== 0);
646 r700AlignDataToDword(ctx
, input
[i
], count
, &context
->stream_desc
[index
]);
647 context
->stream_desc
[index
].is_named_bo
= GL_FALSE
;
651 context
->stream_desc
[index
].stride
= input
[i
]->StrideB
;
652 context
->stream_desc
[index
].bo_offset
= (intptr_t) input
[i
]->Ptr
;
653 context
->stream_desc
[index
].bo
= get_radeon_buffer_object(input
[i
]->BufferObj
)->bo
;
654 context
->stream_desc
[index
].is_named_bo
= GL_TRUE
;
660 int local_count
= count
;
663 if (input
[i
]->StrideB
== 0)
665 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
;
670 size
= getTypeSize(input
[i
]->Type
) * input
[i
]->Size
* local_count
;
673 radeonAllocDmaRegion(&context
->radeon
, &context
->stream_desc
[index
].bo
,
674 &context
->stream_desc
[index
].bo_offset
, size
, 32);
676 radeon_bo_map(context
->stream_desc
[index
].bo
, 1);
677 assert(context
->stream_desc
[index
].bo
->ptr
!= NULL
);
680 dst
= (uint32_t *)ADD_POINTERS(context
->stream_desc
[index
].bo
->ptr
,
681 context
->stream_desc
[index
].bo_offset
);
683 switch (context
->stream_desc
[index
].dwords
)
686 radeonEmitVec4(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
689 radeonEmitVec8(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
692 radeonEmitVec12(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
695 radeonEmitVec16(dst
, input
[i
]->Ptr
, input
[i
]->StrideB
, local_count
);
701 radeon_bo_unmap(context
->stream_desc
[index
].bo
);
705 aos
->count
= context
->stream_desc
[index
].stride
== 0 ? 1 : count
;
706 aos
->stride
= context
->stream_desc
[index
].stride
/ sizeof(float);
707 aos
->components
= context
->stream_desc
[index
].dwords
;
708 aos
->bo
= context
->stream_desc
[index
].bo
;
709 aos
->offset
= context
->stream_desc
[index
].bo_offset
;
711 if(context
->stream_desc
[index
].is_named_bo
)
713 radeon_cs_space_add_persistent_bo(context
->radeon
.cmdbuf
.cs
,
714 context
->stream_desc
[index
].bo
,
715 RADEON_GEM_DOMAIN_GTT
, 0);
719 ret
= radeon_cs_space_check_with_bo(context
->radeon
.cmdbuf
.cs
,
720 first_elem(&context
->radeon
.dma
.reserved
)->bo
,
721 RADEON_GEM_DOMAIN_GTT
, 0);
724 static void r700FreeData(GLcontext
*ctx
)
726 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
727 * to prevent double unref in radeonReleaseArrays
728 * called during context destroy
730 context_t
*context
= R700_CONTEXT(ctx
);
734 for (i
= 0; i
< context
->nNumActiveAos
; i
++)
736 if (!context
->stream_desc
[i
].is_named_bo
)
738 radeon_bo_unref(context
->stream_desc
[i
].bo
);
740 context
->radeon
.tcl
.aos
[i
].bo
= NULL
;
743 if (context
->ind_buf
.bo
!= NULL
)
745 radeon_bo_unref(context
->ind_buf
.bo
);
749 static void r700FixupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
751 context_t
*context
= R700_CONTEXT(ctx
);
755 GLboolean mapped_named_bo
= GL_FALSE
;
757 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
759 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
760 mapped_named_bo
= GL_TRUE
;
761 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
763 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
765 if (mesa_ind_buf
->type
== GL_UNSIGNED_BYTE
)
767 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
768 GLubyte
*in
= (GLubyte
*)src_ptr
;
770 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
771 &context
->ind_buf
.bo_offset
, size
, 4);
773 radeon_bo_map(context
->ind_buf
.bo
, 1);
774 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
775 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
777 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
779 *out
++ = in
[i
] | in
[i
+ 1] << 16;
782 if (i
< mesa_ind_buf
->count
)
787 radeon_bo_unmap(context
->ind_buf
.bo
);
791 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
792 GLushort
*in
= (GLushort
*)src_ptr
;
793 GLuint size
= sizeof(GLushort
) * ((mesa_ind_buf
->count
+ 1) & ~1);
795 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
796 &context
->ind_buf
.bo_offset
, size
, 4);
798 radeon_bo_map(context
->ind_buf
.bo
, 1);
799 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
800 out
= (GLuint
*)ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
802 for (i
= 0; i
+ 1 < mesa_ind_buf
->count
; i
+= 2)
804 *out
++ = in
[i
] | in
[i
+ 1] << 16;
807 if (i
< mesa_ind_buf
->count
)
811 radeon_bo_unmap(context
->ind_buf
.bo
);
815 context
->ind_buf
.is_32bit
= GL_FALSE
;
816 context
->ind_buf
.count
= mesa_ind_buf
->count
;
820 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
824 static void r700SetupIndexBuffer(GLcontext
*ctx
, const struct _mesa_index_buffer
*mesa_ind_buf
)
826 context_t
*context
= R700_CONTEXT(ctx
);
829 context
->ind_buf
.bo
= NULL
;
834 if (mesa_ind_buf
->type
== GL_UNSIGNED_INT
)
837 if (mesa_ind_buf
->type
!= GL_UNSIGNED_BYTE
)
840 const GLvoid
*src_ptr
;
842 GLboolean mapped_named_bo
= GL_FALSE
;
844 if (mesa_ind_buf
->obj
->Name
&& !mesa_ind_buf
->obj
->Pointer
)
846 ctx
->Driver
.MapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, GL_READ_ONLY_ARB
, mesa_ind_buf
->obj
);
847 assert(mesa_ind_buf
->obj
->Pointer
!= NULL
);
848 mapped_named_bo
= GL_TRUE
;
851 src_ptr
= ADD_POINTERS(mesa_ind_buf
->obj
->Pointer
, mesa_ind_buf
->ptr
);
853 const GLuint size
= mesa_ind_buf
->count
* getTypeSize(mesa_ind_buf
->type
);
855 radeonAllocDmaRegion(&context
->radeon
, &context
->ind_buf
.bo
,
856 &context
->ind_buf
.bo_offset
, size
, 4);
857 radeon_bo_map(context
->ind_buf
.bo
, 1);
858 assert(context
->ind_buf
.bo
->ptr
!= NULL
);
859 dst_ptr
= ADD_POINTERS(context
->ind_buf
.bo
->ptr
, context
->ind_buf
.bo_offset
);
861 _mesa_memcpy(dst_ptr
, src_ptr
, size
);
863 radeon_bo_unmap(context
->ind_buf
.bo
);
864 context
->ind_buf
.is_32bit
= (mesa_ind_buf
->type
== GL_UNSIGNED_INT
);
865 context
->ind_buf
.count
= mesa_ind_buf
->count
;
869 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER
, mesa_ind_buf
->obj
);
874 r700FixupIndexBuffer(ctx
, mesa_ind_buf
);
878 static GLboolean
r700TryDrawPrims(GLcontext
*ctx
,
879 const struct gl_client_array
*arrays
[],
880 const struct _mesa_prim
*prim
,
882 const struct _mesa_index_buffer
*ib
,
886 context_t
*context
= R700_CONTEXT(ctx
);
887 radeonContextPtr radeon
= &context
->radeon
;
889 struct radeon_renderbuffer
*rrb
;
892 _mesa_update_state( ctx
);
894 _tnl_UpdateFixedFunctionProgram(ctx
);
895 r700SetVertexFormat(ctx
, arrays
, max_index
+ 1);
896 /* shaders need to be updated before buffers are validated */
897 r700UpdateShaders(ctx
);
898 if (!r600ValidateBuffers(ctx
))
901 /* always emit CB base to prevent
902 * lock ups on some chips.
904 R600_STATECHANGE(context
, cb_target
);
905 /* mark vtx as dirty since it changes per-draw */
906 R600_STATECHANGE(context
, vtx
);
908 r700SetScissor(context
);
909 r700SetupVertexProgram(ctx
);
910 r700SetupFragmentProgram(ctx
);
911 r700UpdateShaderStates(ctx
);
913 GLuint emit_end
= r700PredictRenderSize(ctx
, prim
, ib
, nr_prims
)
914 + context
->radeon
.cmdbuf
.cs
->cdw
;
916 r700SetupIndexBuffer(ctx
, ib
);
917 r700SetupStreams(ctx
, arrays
, max_index
+ 1);
919 radeonEmitState(radeon
);
921 radeon_debug_add_indent();
922 for (i
= 0; i
< nr_prims
; ++i
)
924 if (context
->ind_buf
.bo
)
925 r700RunRenderPrimitive(ctx
,
927 prim
[i
].start
+ prim
[i
].count
,
930 r700RunRenderPrimitiveImmediate(ctx
,
932 prim
[i
].start
+ prim
[i
].count
,
935 radeon_debug_remove_indent();
937 /* Flush render op cached for last several quads. */
938 r700WaitForIdleClean(context
);
940 rrb
= radeon_get_colorbuffer(&context
->radeon
);
942 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
943 CB_ACTION_ENA_bit
| (1 << (id
+ 6)));
945 rrb
= radeon_get_depthbuffer(&context
->radeon
);
947 r700SyncSurf(context
, rrb
->bo
, 0, RADEON_GEM_DOMAIN_VRAM
,
948 DB_ACTION_ENA_bit
| DB_DEST_BASE_ENA_bit
);
952 if (emit_end
< context
->radeon
.cmdbuf
.cs
->cdw
)
954 WARN_ONCE("Rendering was %d commands larger than predicted size."
955 " We might overflow command buffer.\n", context
->radeon
.cmdbuf
.cs
->cdw
- emit_end
);
961 static void r700DrawPrims(GLcontext
*ctx
,
962 const struct gl_client_array
*arrays
[],
963 const struct _mesa_prim
*prim
,
965 const struct _mesa_index_buffer
*ib
,
966 GLboolean index_bounds_valid
,
970 GLboolean retval
= GL_FALSE
;
972 /* This check should get folded into just the places that
973 * min/max index are really needed.
975 if (!index_bounds_valid
) {
976 vbo_get_minmax_index(ctx
, prim
, ib
, &min_index
, &max_index
);
980 vbo_rebase_prims( ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
, r700DrawPrims
);
984 /* Make an attempt at drawing */
985 retval
= r700TryDrawPrims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
987 /* If failed run tnl pipeline - it should take care of fallbacks */
989 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
992 void r700InitDraw(GLcontext
*ctx
)
994 struct vbo_context
*vbo
= vbo_context(ctx
);
997 vbo
->draw_prims
= r700DrawPrims
;