r600: fix up ordering of functions in draw prims path
[mesa.git] / src / mesa / drivers / dri / r600 / r700_render.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
34 #include "main/dd.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "vbo/vbo.h"
40
41 #include "tnl/tnl.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "tnl/t_pipeline.h"
46 #include "vbo/vbo_context.h"
47
48 #include "r600_context.h"
49 #include "r600_cmdbuf.h"
50
51 #include "r600_tex.h"
52
53 #include "r700_vertprog.h"
54 #include "r700_fragprog.h"
55 #include "r700_state.h"
56
57 #include "radeon_buffer_objects.h"
58 #include "radeon_common_context.h"
59
60 void r700WaitForIdle(context_t *context);
61 void r700WaitForIdleClean(context_t *context);
62 GLboolean r700SendTextureState(context_t *context);
63 static unsigned int r700PrimitiveType(int prim);
64 void r600UpdateTextureState(GLcontext * ctx);
65 GLboolean r700SyncSurf(context_t *context,
66 struct radeon_bo *pbo,
67 uint32_t read_domain,
68 uint32_t write_domain,
69 uint32_t sync_type);
70
71 void r700WaitForIdle(context_t *context)
72 {
73 BATCH_LOCALS(&context->radeon);
74 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
75 BEGIN_BATCH_NO_AUTOSTATE(3);
76
77 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
78 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
79 R600_OUT_BATCH(WAIT_3D_IDLE_bit);
80
81 END_BATCH();
82 COMMIT_BATCH();
83 }
84
85 void r700WaitForIdleClean(context_t *context)
86 {
87 BATCH_LOCALS(&context->radeon);
88 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
89 BEGIN_BATCH_NO_AUTOSTATE(5);
90
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
92 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
93
94 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
95 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
96 R600_OUT_BATCH(WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
97
98 END_BATCH();
99 COMMIT_BATCH();
100 }
101
102 void r700Start3D(context_t *context)
103 {
104 BATCH_LOCALS(&context->radeon);
105 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
106 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
107 {
108 BEGIN_BATCH_NO_AUTOSTATE(2);
109 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 0));
110 R600_OUT_BATCH(0);
111 END_BATCH();
112 }
113
114 BEGIN_BATCH_NO_AUTOSTATE(3);
115 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
116 R600_OUT_BATCH(0x80000000);
117 R600_OUT_BATCH(0x80000000);
118 END_BATCH();
119
120 COMMIT_BATCH();
121
122 r700WaitForIdleClean(context);
123 }
124
125 GLboolean r700SyncSurf(context_t *context,
126 struct radeon_bo *pbo,
127 uint32_t read_domain,
128 uint32_t write_domain,
129 uint32_t sync_type)
130 {
131 BATCH_LOCALS(&context->radeon);
132 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
133 uint32_t cp_coher_size;
134
135 if (!pbo)
136 return GL_FALSE;
137
138 if (pbo->size == 0xffffffff)
139 cp_coher_size = 0xffffffff;
140 else
141 cp_coher_size = ((pbo->size + 255) >> 8);
142
143 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
144 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
145 R600_OUT_BATCH(sync_type);
146 R600_OUT_BATCH(cp_coher_size);
147 R600_OUT_BATCH(0);
148 R600_OUT_BATCH(10);
149 R600_OUT_BATCH_RELOC(0,
150 pbo,
151 0,
152 read_domain, write_domain, 0);
153 END_BATCH();
154 COMMIT_BATCH();
155
156 return GL_TRUE;
157 }
158
159 static unsigned int r700PrimitiveType(int prim)
160 {
161 switch (prim & PRIM_MODE_MASK)
162 {
163 case GL_POINTS:
164 return DI_PT_POINTLIST;
165 break;
166 case GL_LINES:
167 return DI_PT_LINELIST;
168 break;
169 case GL_LINE_STRIP:
170 return DI_PT_LINESTRIP;
171 break;
172 case GL_LINE_LOOP:
173 return DI_PT_LINELOOP;
174 break;
175 case GL_TRIANGLES:
176 return DI_PT_TRILIST;
177 break;
178 case GL_TRIANGLE_STRIP:
179 return DI_PT_TRISTRIP;
180 break;
181 case GL_TRIANGLE_FAN:
182 return DI_PT_TRIFAN;
183 break;
184 case GL_QUADS:
185 return DI_PT_QUADLIST;
186 break;
187 case GL_QUAD_STRIP:
188 return DI_PT_QUADSTRIP;
189 break;
190 case GL_POLYGON:
191 return DI_PT_POLYGON;
192 break;
193 default:
194 assert(0);
195 return -1;
196 break;
197 }
198 }
199
200 static int r700NumVerts(int num_verts, int prim)
201 {
202 int verts_off = 0;
203
204 switch (prim & PRIM_MODE_MASK) {
205 case GL_POINTS:
206 verts_off = 0;
207 break;
208 case GL_LINES:
209 verts_off = num_verts % 2;
210 break;
211 case GL_LINE_STRIP:
212 if (num_verts < 2)
213 verts_off = num_verts;
214 break;
215 case GL_LINE_LOOP:
216 if (num_verts < 2)
217 verts_off = num_verts;
218 break;
219 case GL_TRIANGLES:
220 verts_off = num_verts % 3;
221 break;
222 case GL_TRIANGLE_STRIP:
223 if (num_verts < 3)
224 verts_off = num_verts;
225 break;
226 case GL_TRIANGLE_FAN:
227 if (num_verts < 3)
228 verts_off = num_verts;
229 break;
230 case GL_QUADS:
231 verts_off = num_verts % 4;
232 break;
233 case GL_QUAD_STRIP:
234 if (num_verts < 4)
235 verts_off = num_verts;
236 else
237 verts_off = num_verts % 2;
238 break;
239 case GL_POLYGON:
240 if (num_verts < 3)
241 verts_off = num_verts;
242 break;
243 default:
244 assert(0);
245 return -1;
246 break;
247 }
248
249 return num_verts - verts_off;
250 }
251
252 static void r700RunRenderPrimitive(GLcontext * ctx, int start, int end, int prim)
253 {
254 context_t *context = R700_CONTEXT(ctx);
255 BATCH_LOCALS(&context->radeon);
256 int type, i, total_emit;
257 int num_indices;
258 uint32_t vgt_draw_initiator = 0;
259 uint32_t vgt_index_type = 0;
260 uint32_t vgt_primitive_type = 0;
261 uint32_t vgt_num_indices = 0;
262 TNLcontext *tnl = TNL_CONTEXT(ctx);
263 struct vertex_buffer *vb = &tnl->vb;
264
265 GLboolean bUseDrawIndex;
266 if( (NULL != context->ind_buf.bo) && (GL_TRUE != context->ind_buf.bHostIb) )
267 {
268 bUseDrawIndex = GL_TRUE;
269 }
270 else
271 {
272 bUseDrawIndex = GL_FALSE;
273 }
274
275 type = r700PrimitiveType(prim);
276 num_indices = r700NumVerts(end - start, prim);
277
278 radeon_print(RADEON_RENDER, RADEON_TRACE,
279 "%s type %x num_indices %d\n",
280 __func__, type, num_indices);
281
282 if (type < 0 || num_indices <= 0)
283 return;
284
285 if(GL_TRUE == bUseDrawIndex)
286 {
287 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
288 + 2 /* VGT_INDEX_TYPE */
289 + 2 /* NUM_INSTANCES */
290 + 5+2; /* DRAW_INDEX */
291 }
292 else
293 {
294 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
295 + 2 /* VGT_INDEX_TYPE */
296 + 2 /* NUM_INSTANCES */
297 + num_indices + 3; /* DRAW_INDEX_IMMD */
298 }
299
300 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
301 // prim
302 SETfield(vgt_primitive_type, type,
303 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
304 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
305 R600_OUT_BATCH(mmVGT_PRIMITIVE_TYPE - ASIC_CONFIG_BASE_INDEX);
306 R600_OUT_BATCH(vgt_primitive_type);
307
308 // index type
309 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
310
311 if(GL_TRUE == bUseDrawIndex)
312 {
313 if(GL_TRUE != context->ind_buf.is_32bit)
314 {
315 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
316 }
317 }
318
319 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
320 R600_OUT_BATCH(vgt_index_type);
321
322 // num instances
323 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
324 R600_OUT_BATCH(1);
325
326 // draw packet
327 vgt_num_indices = num_indices;
328
329 if(GL_TRUE == bUseDrawIndex)
330 {
331 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
332 }
333 else
334 {
335 SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
336 }
337
338 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
339
340 if(GL_TRUE == bUseDrawIndex)
341 {
342 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
343 R600_OUT_BATCH(context->ind_buf.bo_offset);
344 R600_OUT_BATCH(0);
345 R600_OUT_BATCH(vgt_num_indices);
346 R600_OUT_BATCH(vgt_draw_initiator);
347 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
348 context->ind_buf.bo,
349 context->ind_buf.bo_offset,
350 RADEON_GEM_DOMAIN_GTT, 0, 0);
351 }
352 else
353 {
354 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
355 R600_OUT_BATCH(vgt_num_indices);
356 R600_OUT_BATCH(vgt_draw_initiator);
357 }
358
359 if(NULL == context->ind_buf.bo)
360 {
361 for (i = start; i < (start + num_indices); i++) {
362 if(vb->Elts)
363 {
364 R600_OUT_BATCH(vb->Elts[i]);
365 }
366 else
367 R600_OUT_BATCH(i);
368 }
369 }
370 else
371 {
372 if(GL_TRUE == context->ind_buf.bHostIb)
373 {
374 if(GL_TRUE != context->ind_buf.is_32bit)
375 {
376 GLushort * pIndex = (GLushort*)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
377 pIndex += start;
378 for (i = 0; i < num_indices; i++)
379 {
380 R600_OUT_BATCH(*pIndex);
381 pIndex++;
382 }
383 }
384 else
385 {
386 GLuint * pIndex = (GLuint*)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
387 pIndex += start;
388
389 for (i = 0; i < num_indices; i++)
390 {
391 R600_OUT_BATCH(*pIndex);
392 pIndex++;
393 }
394 }
395 }
396 }
397
398 END_BATCH();
399 COMMIT_BATCH();
400 }
401
402 /* start 3d, idle, cb/db flush */
403 #define PRE_EMIT_STATE_BUFSZ 10 + 5 + 14
404
405 static GLuint r700PredictRenderSize(GLcontext* ctx, GLuint nr_prims)
406 {
407 context_t *context = R700_CONTEXT(ctx);
408 struct r700_vertex_program *vp = context->selected_vp;
409 GLboolean flushed;
410 GLuint dwords, i;
411 GLuint state_size;
412 /* pre calculate aos count so state prediction works */
413 context->radeon.tcl.aos_count = _mesa_bitcount(vp->mesa_program->Base.InputsRead);
414
415 dwords = PRE_EMIT_STATE_BUFSZ;
416 if (nr_prims)
417 dwords += nr_prims * 14;
418 else {
419 TNLcontext *tnl = TNL_CONTEXT(ctx);
420 struct vertex_buffer *vb = &tnl->vb;
421
422 for (i = 0; i < vb->PrimitiveCount; i++)
423 dwords += vb->Primitive[i].count + 10;
424 }
425 state_size = radeonCountStateEmitSize(&context->radeon);
426 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
427 dwords + state_size, __FUNCTION__);
428
429 if (flushed)
430 dwords += radeonCountStateEmitSize(&context->radeon);
431 else
432 dwords += state_size;
433
434 radeon_print(RADEON_RENDER, RADEON_VERBOSE,
435 "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
436 return dwords;
437 }
438
439 static GLboolean r700RunRender(GLcontext * ctx,
440 struct tnl_pipeline_stage *stage)
441 {
442 context_t *context = R700_CONTEXT(ctx);
443 radeonContextPtr radeon = &context->radeon;
444 unsigned int i, id = 0;
445 TNLcontext *tnl = TNL_CONTEXT(ctx);
446 struct vertex_buffer *vb = &tnl->vb;
447 struct radeon_renderbuffer *rrb;
448
449 radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s: cs begin at %d\n",
450 __func__, context->radeon.cmdbuf.cs->cdw);
451
452 /* always emit CB base to prevent
453 * lock ups on some chips.
454 */
455 R600_STATECHANGE(context, cb_target);
456 /* mark vtx as dirty since it changes per-draw */
457 R600_STATECHANGE(context, vtx);
458
459 r700SetScissor(context);
460 r700SetupVertexProgram(ctx);
461 r700SetupFragmentProgram(ctx);
462 r600UpdateTextureState(ctx);
463
464 GLuint emit_end = r700PredictRenderSize(ctx, 0)
465 + context->radeon.cmdbuf.cs->cdw;
466 r700SetupStreams(ctx);
467
468 radeonEmitState(radeon);
469
470 radeon_debug_add_indent();
471 /* richard test code */
472 for (i = 0; i < vb->PrimitiveCount; i++) {
473 GLuint prim = _tnl_translate_prim(&vb->Primitive[i]);
474 GLuint start = vb->Primitive[i].start;
475 GLuint end = vb->Primitive[i].start + vb->Primitive[i].count;
476 r700RunRenderPrimitive(ctx, start, end, prim);
477 }
478 radeon_debug_remove_indent();
479
480 /* Flush render op cached for last several quads. */
481 r700WaitForIdleClean(context);
482
483 rrb = radeon_get_colorbuffer(&context->radeon);
484 if (rrb && rrb->bo)
485 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
486 CB_ACTION_ENA_bit | (1 << (id + 6)));
487
488 rrb = radeon_get_depthbuffer(&context->radeon);
489 if (rrb && rrb->bo)
490 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
491 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
492
493 radeonReleaseArrays(ctx, ~0);
494
495 radeon_print(RADEON_RENDER, RADEON_TRACE, "%s: cs end at %d\n",
496 __func__, context->radeon.cmdbuf.cs->cdw);
497
498 if ( emit_end < context->radeon.cmdbuf.cs->cdw )
499 WARN_ONCE("Rendering was %d commands larger than predicted size."
500 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
501
502 return GL_FALSE;
503 }
504
505 static GLboolean r700RunNonTCLRender(GLcontext * ctx,
506 struct tnl_pipeline_stage *stage) /* -------------------- */
507 {
508 GLboolean bRet = GL_TRUE;
509
510 return bRet;
511 }
512
513 static GLboolean r700RunTCLRender(GLcontext * ctx, /*----------------------*/
514 struct tnl_pipeline_stage *stage)
515 {
516 GLboolean bRet = GL_FALSE;
517
518 /* TODO : sw fallback */
519
520 /* Need shader bo's setup before bo check */
521 r700UpdateShaders(ctx);
522 /**
523
524 * Ensure all enabled and complete textures are uploaded along with any buffers being used.
525 */
526 if(!r600ValidateBuffers(ctx))
527 {
528 return GL_TRUE;
529 }
530
531 bRet = r700RunRender(ctx, stage);
532
533 return bRet;
534 //GL_FALSE will stop to do other pipe stage in _tnl_run_pipeline
535 //The render here DOES finish the whole pipe, so GL_FALSE should be returned for success.
536 }
537
538 const struct tnl_pipeline_stage _r700_render_stage = {
539 "r700 Hardware Rasterization",
540 NULL,
541 NULL,
542 NULL,
543 NULL,
544 r700RunNonTCLRender
545 };
546
547 const struct tnl_pipeline_stage _r700_tcl_stage = {
548 "r700 Hardware Transform, Clipping and Lighting",
549 NULL,
550 NULL,
551 NULL,
552 NULL,
553 r700RunTCLRender
554 };
555
556 const struct tnl_pipeline_stage *r700_pipeline[] =
557 {
558 &_r700_tcl_stage,
559 &_tnl_vertex_transform_stage,
560 &_tnl_normal_transform_stage,
561 &_tnl_lighting_stage,
562 &_tnl_fog_coordinate_stage,
563 &_tnl_texgen_stage,
564 &_tnl_texture_transform_stage,
565 &_tnl_vertex_program_stage,
566
567 &_r700_render_stage,
568 &_tnl_render_stage,
569 0,
570 };
571
572 #define CONVERT( TYPE, MACRO ) do { \
573 GLuint i, j, sz; \
574 sz = input->Size; \
575 if (input->Normalized) { \
576 for (i = 0; i < count; i++) { \
577 const TYPE *in = (TYPE *)src_ptr; \
578 for (j = 0; j < sz; j++) { \
579 *dst_ptr++ = MACRO(*in); \
580 in++; \
581 } \
582 src_ptr += stride; \
583 } \
584 } else { \
585 for (i = 0; i < count; i++) { \
586 const TYPE *in = (TYPE *)src_ptr; \
587 for (j = 0; j < sz; j++) { \
588 *dst_ptr++ = (GLfloat)(*in); \
589 in++; \
590 } \
591 src_ptr += stride; \
592 } \
593 } \
594 } while (0)
595
596 /**
597 * Convert attribute data type to float
598 * If the attribute uses named buffer object replace the bo with newly allocated bo
599 */
600 static void r700ConvertAttrib(GLcontext *ctx, int count,
601 const struct gl_client_array *input,
602 struct StreamDesc *attr)
603 {
604 context_t *context = R700_CONTEXT(ctx);
605 const GLvoid *src_ptr;
606 GLboolean mapped_named_bo = GL_FALSE;
607 GLfloat *dst_ptr;
608 GLuint stride;
609
610 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
611
612 /* Convert value for first element only */
613 if (input->StrideB == 0)
614 {
615 count = 1;
616 }
617
618 if (input->BufferObj->Name)
619 {
620 if (!input->BufferObj->Pointer)
621 {
622 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
623 mapped_named_bo = GL_TRUE;
624 }
625
626 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
627 }
628 else
629 {
630 src_ptr = input->Ptr;
631 }
632
633 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
634 sizeof(GLfloat) * input->Size * count, 32);
635 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
636
637 assert(src_ptr != NULL);
638
639 switch (input->Type)
640 {
641 case GL_DOUBLE:
642 CONVERT(GLdouble, (GLfloat));
643 break;
644 case GL_UNSIGNED_INT:
645 CONVERT(GLuint, UINT_TO_FLOAT);
646 break;
647 case GL_INT:
648 CONVERT(GLint, INT_TO_FLOAT);
649 break;
650 case GL_UNSIGNED_SHORT:
651 CONVERT(GLushort, USHORT_TO_FLOAT);
652 break;
653 case GL_SHORT:
654 CONVERT(GLshort, SHORT_TO_FLOAT);
655 break;
656 case GL_UNSIGNED_BYTE:
657 assert(input->Format != GL_BGRA);
658 CONVERT(GLubyte, UBYTE_TO_FLOAT);
659 break;
660 case GL_BYTE:
661 CONVERT(GLbyte, BYTE_TO_FLOAT);
662 break;
663 default:
664 assert(0);
665 break;
666 }
667
668 if (mapped_named_bo)
669 {
670 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
671 }
672 }
673
674 static void r700AlignDataToDword(GLcontext *ctx,
675 const struct gl_client_array *input,
676 int count,
677 struct StreamDesc *attr)
678 {
679 context_t *context = R700_CONTEXT(ctx);
680 const int dst_stride = (input->StrideB + 3) & ~3;
681 const int size = getTypeSize(input->Type) * input->Size * count;
682 GLboolean mapped_named_bo = GL_FALSE;
683
684 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
685
686 if (!input->BufferObj->Pointer)
687 {
688 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
689 mapped_named_bo = GL_TRUE;
690 }
691
692 {
693 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
694 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
695 int i;
696
697 for (i = 0; i < count; ++i)
698 {
699 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
700 src_ptr += input->StrideB;
701 dst_ptr += dst_stride;
702 }
703 }
704
705 if (mapped_named_bo)
706 {
707 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
708 }
709
710 attr->stride = dst_stride;
711 }
712
713 static void r700SetupStreams2(GLcontext *ctx, const struct gl_client_array *input[], int count)
714 {
715 context_t *context = R700_CONTEXT(ctx);
716 GLuint stride;
717 int ret;
718 int i, index;
719
720 R600_STATECHANGE(context, vtx);
721
722 for(index = 0; index < context->nNumActiveAos; index++)
723 {
724 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
725 i = context->stream_desc[index].element;
726
727 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
728
729 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
730 #if MESA_BIG_ENDIAN
731 getTypeSize(input[i]->Type) != 4 ||
732 #endif
733 stride < 4)
734 {
735 r700ConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
736 }
737 else
738 {
739 if (input[i]->BufferObj->Name)
740 {
741 if (stride % 4 != 0)
742 {
743 assert(((intptr_t) input[i]->Ptr) % input[i]->StrideB == 0);
744 r700AlignDataToDword(ctx, input[i], count, &context->stream_desc[index]);
745 context->stream_desc[index].is_named_bo = GL_FALSE;
746 }
747 else
748 {
749 context->stream_desc[index].stride = input[i]->StrideB;
750 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
751 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
752 context->stream_desc[index].is_named_bo = GL_TRUE;
753 }
754 }
755 else
756 {
757 int size;
758 int local_count = count;
759 uint32_t *dst;
760
761 if (input[i]->StrideB == 0)
762 {
763 size = getTypeSize(input[i]->Type) * input[i]->Size;
764 local_count = 1;
765 }
766 else
767 {
768 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
769 }
770
771 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
772 &context->stream_desc[index].bo_offset, size, 32);
773 assert(context->stream_desc[index].bo->ptr != NULL);
774 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
775 context->stream_desc[index].bo_offset);
776
777 switch (context->stream_desc[index].dwords)
778 {
779 case 1:
780 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
781 context->stream_desc[index].stride = 4;
782 break;
783 case 2:
784 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
785 context->stream_desc[index].stride = 8;
786 break;
787 case 3:
788 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
789 context->stream_desc[index].stride = 12;
790 break;
791 case 4:
792 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
793 context->stream_desc[index].stride = 16;
794 break;
795 default:
796 assert(0);
797 break;
798 }
799 }
800 }
801
802 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
803 aos->stride = context->stream_desc[index].stride / sizeof(float);
804 aos->components = context->stream_desc[index].dwords;
805 aos->bo = context->stream_desc[index].bo;
806 aos->offset = context->stream_desc[index].bo_offset;
807
808 if(context->stream_desc[index].is_named_bo)
809 {
810 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
811 context->stream_desc[index].bo,
812 RADEON_GEM_DOMAIN_GTT, 0);
813 }
814 }
815
816 context->radeon.tcl.aos_count = context->nNumActiveAos;
817 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
818 first_elem(&context->radeon.dma.reserved)->bo,
819 RADEON_GEM_DOMAIN_GTT, 0);
820 }
821
822 static void r700FreeData(GLcontext *ctx)
823 {
824 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
825 * to prevent double unref in radeonReleaseArrays
826 * called during context destroy
827 */
828 context_t *context = R700_CONTEXT(ctx);
829
830 int i;
831
832 for (i = 0; i < context->nNumActiveAos; i++)
833 {
834 if (!context->stream_desc[i].is_named_bo)
835 {
836 radeon_bo_unref(context->stream_desc[i].bo);
837 }
838 context->radeon.tcl.aos[i].bo = NULL;
839 }
840
841 if (context->ind_buf.bo != NULL)
842 {
843 if(context->ind_buf.bHostIb != GL_TRUE)
844 {
845 radeon_bo_unref(context->ind_buf.bo);
846 }
847 else
848 {
849 FREE(context->ind_buf.bo->ptr);
850 FREE(context->ind_buf.bo);
851 context->ind_buf.bo = NULL;
852 }
853 }
854 }
855
856 static void r700FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
857 {
858 context_t *context = R700_CONTEXT(ctx);
859 GLvoid *src_ptr;
860 GLuint *out;
861 int i;
862 GLboolean mapped_named_bo = GL_FALSE;
863
864 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
865 {
866 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
867 mapped_named_bo = GL_TRUE;
868 assert(mesa_ind_buf->obj->Pointer != NULL);
869 }
870 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
871
872 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
873 {
874 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
875 GLubyte *in = (GLubyte *)src_ptr;
876
877 if(context->ind_buf.bHostIb != GL_TRUE)
878 {
879 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
880 &context->ind_buf.bo_offset, size, 4);
881
882 assert(context->ind_buf.bo->ptr != NULL);
883 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
884 }
885 else
886 {
887 context->ind_buf.bo = MALLOC_STRUCT(radeon_bo);
888 context->ind_buf.bo->ptr = ALIGN_MALLOC(size, 4);
889 context->ind_buf.bo_offset = 0;
890 out = (GLuint *)context->ind_buf.bo->ptr;
891 }
892
893 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
894 {
895 *out++ = in[i] | in[i + 1] << 16;
896 }
897
898 if (i < mesa_ind_buf->count)
899 {
900 *out++ = in[i];
901 }
902
903 #if MESA_BIG_ENDIAN
904 }
905 else
906 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
907 GLushort *in = (GLushort *)src_ptr;
908 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
909
910 if(context->ind_buf.bHostIb != GL_TRUE)
911 {
912 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
913 &context->ind_buf.bo_offset, size, 4);
914
915 assert(context->ind_buf.bo->ptr != NULL);
916 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
917 }
918 else
919 {
920 context->ind_buf.bo = MALLOC_STRUCT(radeon_bo);
921 context->ind_buf.bo->ptr = ALIGN_MALLOC(size, 4);
922 context->ind_buf.bo_offset = 0;
923 out = (GLuint *)context->ind_buf.bo->ptr;
924 }
925
926 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
927 {
928 *out++ = in[i] | in[i + 1] << 16;
929 }
930
931 if (i < mesa_ind_buf->count)
932 {
933 *out++ = in[i];
934 }
935 #endif
936 }
937
938 context->ind_buf.is_32bit = GL_FALSE;
939 context->ind_buf.count = mesa_ind_buf->count;
940
941 if (mapped_named_bo)
942 {
943 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
944 }
945 }
946
947 static void r700SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
948 {
949 context_t *context = R700_CONTEXT(ctx);
950
951 if (!mesa_ind_buf) {
952 context->ind_buf.bo = NULL;
953 return;
954 }
955
956 context->ind_buf.bHostIb = GL_FALSE;
957
958 #if MESA_BIG_ENDIAN
959 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
960 {
961 #else
962 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
963 {
964 #endif
965 const GLvoid *src_ptr;
966 GLvoid *dst_ptr;
967 GLboolean mapped_named_bo = GL_FALSE;
968
969 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
970 {
971 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
972 assert(mesa_ind_buf->obj->Pointer != NULL);
973 mapped_named_bo = GL_TRUE;
974 }
975
976 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
977
978 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
979
980 if(context->ind_buf.bHostIb != GL_TRUE)
981 {
982 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
983 &context->ind_buf.bo_offset, size, 4);
984 assert(context->ind_buf.bo->ptr != NULL);
985 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
986 }
987 else
988 {
989 context->ind_buf.bo = MALLOC_STRUCT(radeon_bo);
990 context->ind_buf.bo->ptr = ALIGN_MALLOC(size, 4);
991 context->ind_buf.bo_offset = 0;
992 dst_ptr = context->ind_buf.bo->ptr;
993 }
994
995 _mesa_memcpy(dst_ptr, src_ptr, size);
996
997 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
998 context->ind_buf.count = mesa_ind_buf->count;
999
1000 if (mapped_named_bo)
1001 {
1002 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
1003 }
1004 }
1005 else
1006 {
1007 r700FixupIndexBuffer(ctx, mesa_ind_buf);
1008 }
1009 }
1010
1011 static GLboolean r700TryDrawPrims(GLcontext *ctx,
1012 const struct gl_client_array *arrays[],
1013 const struct _mesa_prim *prim,
1014 GLuint nr_prims,
1015 const struct _mesa_index_buffer *ib,
1016 GLuint min_index,
1017 GLuint max_index )
1018 {
1019 context_t *context = R700_CONTEXT(ctx);
1020 radeonContextPtr radeon = &context->radeon;
1021 GLuint i, id = 0;
1022 struct radeon_renderbuffer *rrb;
1023
1024 if (ctx->NewState)
1025 {
1026 _mesa_update_state( ctx );
1027 }
1028
1029 _tnl_UpdateFixedFunctionProgram(ctx);
1030 r700SetVertexFormat(ctx, arrays, max_index + 1);
1031 r700SetupIndexBuffer(ctx, ib);
1032 /* shaders need to be updated before buffers are validated */
1033 r700UpdateShaders2(ctx);
1034 if (!r600ValidateBuffers(ctx))
1035 return GL_FALSE;
1036
1037 /* always emit CB base to prevent
1038 * lock ups on some chips.
1039 */
1040 R600_STATECHANGE(context, cb_target);
1041 /* mark vtx as dirty since it changes per-draw */
1042 R600_STATECHANGE(context, vtx);
1043
1044 r700SetScissor(context);
1045 r700SetupVertexProgram(ctx);
1046 r700SetupFragmentProgram(ctx);
1047 r600UpdateTextureState(ctx);
1048
1049 GLuint emit_end = r700PredictRenderSize(ctx, nr_prims)
1050 + context->radeon.cmdbuf.cs->cdw;
1051
1052 r700SetupStreams2(ctx, arrays, max_index + 1);
1053
1054 radeonEmitState(radeon);
1055
1056 radeon_debug_add_indent();
1057 for (i = 0; i < nr_prims; ++i)
1058 {
1059 r700RunRenderPrimitive(ctx,
1060 prim[i].start,
1061 prim[i].start + prim[i].count,
1062 prim[i].mode);
1063 }
1064 radeon_debug_remove_indent();
1065
1066 /* Flush render op cached for last several quads. */
1067 r700WaitForIdleClean(context);
1068
1069 rrb = radeon_get_colorbuffer(&context->radeon);
1070 if (rrb && rrb->bo)
1071 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
1072 CB_ACTION_ENA_bit | (1 << (id + 6)));
1073
1074 rrb = radeon_get_depthbuffer(&context->radeon);
1075 if (rrb && rrb->bo)
1076 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
1077 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
1078
1079 r700FreeData(ctx);
1080
1081 if (emit_end < context->radeon.cmdbuf.cs->cdw)
1082 {
1083 WARN_ONCE("Rendering was %d commands larger than predicted size."
1084 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
1085 }
1086
1087 return GL_TRUE;
1088 }
1089
1090 static void r700DrawPrimsRe(GLcontext *ctx,
1091 const struct gl_client_array *arrays[],
1092 const struct _mesa_prim *prim,
1093 GLuint nr_prims,
1094 const struct _mesa_index_buffer *ib,
1095 GLboolean index_bounds_valid,
1096 GLuint min_index,
1097 GLuint max_index)
1098 {
1099 GLboolean retval = GL_FALSE;
1100
1101 /* This check should get folded into just the places that
1102 * min/max index are really needed.
1103 */
1104 if (!index_bounds_valid) {
1105 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
1106 }
1107
1108 if (min_index) {
1109 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r700DrawPrimsRe );
1110 return;
1111 }
1112
1113 /* Make an attempt at drawing */
1114 retval = r700TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
1115
1116 /* If failed run tnl pipeline - it should take care of fallbacks */
1117 if (!retval)
1118 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
1119 }
1120
1121 static void r700DrawPrims(GLcontext *ctx,
1122 const struct gl_client_array *arrays[],
1123 const struct _mesa_prim *prim,
1124 GLuint nr_prims,
1125 const struct _mesa_index_buffer *ib,
1126 GLboolean index_bounds_valid,
1127 GLuint min_index,
1128 GLuint max_index)
1129 {
1130 context_t *context = R700_CONTEXT(ctx);
1131
1132 /* For non indexed drawing, using tnl pipe. */
1133 if(!ib)
1134 {
1135 context->ind_buf.bo = NULL;
1136
1137 _tnl_vbo_draw_prims(ctx, arrays, prim, nr_prims, ib,
1138 index_bounds_valid, min_index, max_index);
1139 return;
1140 }
1141
1142 r700DrawPrimsRe(ctx, arrays, prim, nr_prims, ib, index_bounds_valid, min_index, max_index);
1143 }
1144
1145 void r700InitDraw(GLcontext *ctx)
1146 {
1147 struct vbo_context *vbo = vbo_context(ctx);
1148
1149 vbo->draw_prims = r700DrawPrims;
1150 }
1151
1152