Merge branch 'mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / r600 / r700_render.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
34 #include "main/dd.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "vbo/vbo.h"
40
41 #include "tnl/tnl.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "tnl/t_pipeline.h"
46 #include "vbo/vbo_context.h"
47
48 #include "r600_context.h"
49 #include "r600_cmdbuf.h"
50
51 #include "r600_tex.h"
52
53 #include "r700_vertprog.h"
54 #include "r700_fragprog.h"
55 #include "r700_state.h"
56
57 #include "radeon_buffer_objects.h"
58 #include "radeon_common_context.h"
59
60 void r700WaitForIdle(context_t *context);
61 void r700WaitForIdleClean(context_t *context);
62 static unsigned int r700PrimitiveType(int prim);
63 GLboolean r700SyncSurf(context_t *context,
64 struct radeon_bo *pbo,
65 uint32_t read_domain,
66 uint32_t write_domain,
67 uint32_t sync_type);
68
69 void r700WaitForIdle(context_t *context)
70 {
71 BATCH_LOCALS(&context->radeon);
72 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
73 BEGIN_BATCH_NO_AUTOSTATE(3);
74
75 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
76 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
77 R600_OUT_BATCH(WAIT_3D_IDLE_bit);
78
79 END_BATCH();
80 COMMIT_BATCH();
81 }
82
83 void r700WaitForIdleClean(context_t *context)
84 {
85 BATCH_LOCALS(&context->radeon);
86 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
87 BEGIN_BATCH_NO_AUTOSTATE(5);
88
89 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
90 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
91
92 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
93 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
94 R600_OUT_BATCH(WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
95
96 END_BATCH();
97 COMMIT_BATCH();
98 }
99
100 void r700Start3D(context_t *context)
101 {
102 BATCH_LOCALS(&context->radeon);
103 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
104 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
105 {
106 BEGIN_BATCH_NO_AUTOSTATE(2);
107 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 0));
108 R600_OUT_BATCH(0);
109 END_BATCH();
110 }
111
112 BEGIN_BATCH_NO_AUTOSTATE(3);
113 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
114 R600_OUT_BATCH(0x80000000);
115 R600_OUT_BATCH(0x80000000);
116 END_BATCH();
117
118 COMMIT_BATCH();
119
120 r700WaitForIdleClean(context);
121 }
122
123 GLboolean r700SyncSurf(context_t *context,
124 struct radeon_bo *pbo,
125 uint32_t read_domain,
126 uint32_t write_domain,
127 uint32_t sync_type)
128 {
129 BATCH_LOCALS(&context->radeon);
130 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
131 uint32_t cp_coher_size;
132
133 if (!pbo)
134 return GL_FALSE;
135
136 if (pbo->size == 0xffffffff)
137 cp_coher_size = 0xffffffff;
138 else
139 cp_coher_size = ((pbo->size + 255) >> 8);
140
141 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
142 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
143 R600_OUT_BATCH(sync_type);
144 R600_OUT_BATCH(cp_coher_size);
145 R600_OUT_BATCH(0);
146 R600_OUT_BATCH(10);
147 R600_OUT_BATCH_RELOC(0,
148 pbo,
149 0,
150 read_domain, write_domain, 0);
151 END_BATCH();
152 COMMIT_BATCH();
153
154 return GL_TRUE;
155 }
156
157 static unsigned int r700PrimitiveType(int prim)
158 {
159 switch (prim & PRIM_MODE_MASK)
160 {
161 case GL_POINTS:
162 return DI_PT_POINTLIST;
163 break;
164 case GL_LINES:
165 return DI_PT_LINELIST;
166 break;
167 case GL_LINE_STRIP:
168 return DI_PT_LINESTRIP;
169 break;
170 case GL_LINE_LOOP:
171 return DI_PT_LINELOOP;
172 break;
173 case GL_TRIANGLES:
174 return DI_PT_TRILIST;
175 break;
176 case GL_TRIANGLE_STRIP:
177 return DI_PT_TRISTRIP;
178 break;
179 case GL_TRIANGLE_FAN:
180 return DI_PT_TRIFAN;
181 break;
182 case GL_QUADS:
183 return DI_PT_QUADLIST;
184 break;
185 case GL_QUAD_STRIP:
186 return DI_PT_QUADSTRIP;
187 break;
188 case GL_POLYGON:
189 return DI_PT_POLYGON;
190 break;
191 default:
192 assert(0);
193 return -1;
194 break;
195 }
196 }
197
198 static int r700NumVerts(int num_verts, int prim)
199 {
200 int verts_off = 0;
201
202 switch (prim & PRIM_MODE_MASK) {
203 case GL_POINTS:
204 verts_off = 0;
205 break;
206 case GL_LINES:
207 verts_off = num_verts % 2;
208 break;
209 case GL_LINE_STRIP:
210 if (num_verts < 2)
211 verts_off = num_verts;
212 break;
213 case GL_LINE_LOOP:
214 if (num_verts < 2)
215 verts_off = num_verts;
216 break;
217 case GL_TRIANGLES:
218 verts_off = num_verts % 3;
219 break;
220 case GL_TRIANGLE_STRIP:
221 if (num_verts < 3)
222 verts_off = num_verts;
223 break;
224 case GL_TRIANGLE_FAN:
225 if (num_verts < 3)
226 verts_off = num_verts;
227 break;
228 case GL_QUADS:
229 verts_off = num_verts % 4;
230 break;
231 case GL_QUAD_STRIP:
232 if (num_verts < 4)
233 verts_off = num_verts;
234 else
235 verts_off = num_verts % 2;
236 break;
237 case GL_POLYGON:
238 if (num_verts < 3)
239 verts_off = num_verts;
240 break;
241 default:
242 assert(0);
243 return -1;
244 break;
245 }
246
247 return num_verts - verts_off;
248 }
249
250 static void r700RunRenderPrimitive(GLcontext * ctx, int start, int end, int prim)
251 {
252 context_t *context = R700_CONTEXT(ctx);
253 BATCH_LOCALS(&context->radeon);
254 int type, total_emit;
255 int num_indices;
256 uint32_t vgt_draw_initiator = 0;
257 uint32_t vgt_index_type = 0;
258 uint32_t vgt_primitive_type = 0;
259 uint32_t vgt_num_indices = 0;
260
261 type = r700PrimitiveType(prim);
262 num_indices = r700NumVerts(end - start, prim);
263
264 radeon_print(RADEON_RENDER, RADEON_TRACE,
265 "%s type %x num_indices %d\n",
266 __func__, type, num_indices);
267
268 if (type < 0 || num_indices <= 0)
269 return;
270
271 SETfield(vgt_primitive_type, type,
272 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
273
274 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
275
276 if(GL_TRUE != context->ind_buf.is_32bit)
277 {
278 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
279 }
280
281 vgt_num_indices = num_indices;
282 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
283 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
284
285 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
286 + 2 /* VGT_INDEX_TYPE */
287 + 2 /* NUM_INSTANCES */
288 + 5 + 2; /* DRAW_INDEX */
289
290 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
291 // prim
292 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
293 R600_OUT_BATCH(vgt_primitive_type);
294 // index type
295 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
296 R600_OUT_BATCH(vgt_index_type);
297 // num instances
298 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
299 R600_OUT_BATCH(1);
300 // draw packet
301 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
302 R600_OUT_BATCH(context->ind_buf.bo_offset);
303 R600_OUT_BATCH(0);
304 R600_OUT_BATCH(vgt_num_indices);
305 R600_OUT_BATCH(vgt_draw_initiator);
306 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
307 context->ind_buf.bo,
308 context->ind_buf.bo_offset,
309 RADEON_GEM_DOMAIN_GTT, 0, 0);
310 END_BATCH();
311 COMMIT_BATCH();
312 }
313
314 static void r700RunRenderPrimitiveImmediate(GLcontext * ctx, int start, int end, int prim)
315 {
316 context_t *context = R700_CONTEXT(ctx);
317 BATCH_LOCALS(&context->radeon);
318 int type, i;
319 uint32_t num_indices, total_emit = 0;
320 uint32_t vgt_draw_initiator = 0;
321 uint32_t vgt_index_type = 0;
322 uint32_t vgt_primitive_type = 0;
323 uint32_t vgt_num_indices = 0;
324
325 type = r700PrimitiveType(prim);
326 num_indices = r700NumVerts(end - start, prim);
327
328 radeon_print(RADEON_RENDER, RADEON_TRACE,
329 "%s type %x num_indices %d\n",
330 __func__, type, num_indices);
331
332 if (type < 0 || num_indices <= 0)
333 return;
334
335 SETfield(vgt_primitive_type, type,
336 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
337
338 if (num_indices > 0xffff)
339 {
340 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
341 }
342 else
343 {
344 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
345 }
346
347 vgt_num_indices = num_indices;
348 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
349
350 if (start == 0)
351 {
352 SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
353 }
354 else
355 {
356 if (num_indices > 0xffff)
357 {
358 total_emit += num_indices;
359 }
360 else
361 {
362 total_emit += (num_indices + 1) / 2;
363 }
364 SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
365 }
366
367 total_emit += 3 /* VGT_PRIMITIVE_TYPE */
368 + 2 /* VGT_INDEX_TYPE */
369 + 2 /* NUM_INSTANCES */
370 + 3; /* DRAW */
371
372 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
373 // prim
374 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
375 R600_OUT_BATCH(vgt_primitive_type);
376 // index type
377 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
378 R600_OUT_BATCH(vgt_index_type);
379 // num instances
380 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
381 R600_OUT_BATCH(1);
382 // draw packet
383 if(start == 0)
384 {
385 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
386 R600_OUT_BATCH(vgt_num_indices);
387 R600_OUT_BATCH(vgt_draw_initiator);
388 }
389 else
390 {
391 if (num_indices > 0xffff)
392 {
393 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
394 R600_OUT_BATCH(vgt_num_indices);
395 R600_OUT_BATCH(vgt_draw_initiator);
396 for (i = start; i < (start + num_indices); i++)
397 {
398 R600_OUT_BATCH(i);
399 }
400 }
401 else
402 {
403 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
404 R600_OUT_BATCH(vgt_num_indices);
405 R600_OUT_BATCH(vgt_draw_initiator);
406 for (i = start; i < (start + num_indices); i += 2)
407 {
408 if ((i + 1) == (start + num_indices))
409 {
410 R600_OUT_BATCH(i);
411 }
412 else
413 {
414 R600_OUT_BATCH(((i + 1) << 16) | (i));
415 }
416 }
417 }
418 }
419
420 END_BATCH();
421 COMMIT_BATCH();
422 }
423
424 /* start 3d, idle, cb/db flush */
425 #define PRE_EMIT_STATE_BUFSZ 10 + 5 + 14
426
427 static GLuint r700PredictRenderSize(GLcontext* ctx,
428 const struct _mesa_prim *prim,
429 const struct _mesa_index_buffer *ib,
430 GLuint nr_prims)
431 {
432 context_t *context = R700_CONTEXT(ctx);
433 GLboolean flushed;
434 GLuint dwords, i;
435 GLuint state_size;
436
437 dwords = PRE_EMIT_STATE_BUFSZ;
438 if (ib)
439 dwords += nr_prims * 14;
440 else {
441 for (i = 0; i < nr_prims; ++i)
442 {
443 if (prim[i].start == 0)
444 dwords += 10;
445 else if (prim[i].count > 0xffff)
446 dwords += prim[i].count + 10;
447 else
448 dwords += ((prim[i].count + 1) / 2) + 10;
449 }
450 }
451
452 state_size = radeonCountStateEmitSize(&context->radeon);
453 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
454 dwords + state_size,
455 __FUNCTION__);
456 if (flushed)
457 dwords += radeonCountStateEmitSize(&context->radeon);
458 else
459 dwords += state_size;
460
461 radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
462 return dwords;
463
464 }
465
466 #define CONVERT( TYPE, MACRO ) do { \
467 GLuint i, j, sz; \
468 sz = input->Size; \
469 if (input->Normalized) { \
470 for (i = 0; i < count; i++) { \
471 const TYPE *in = (TYPE *)src_ptr; \
472 for (j = 0; j < sz; j++) { \
473 *dst_ptr++ = MACRO(*in); \
474 in++; \
475 } \
476 src_ptr += stride; \
477 } \
478 } else { \
479 for (i = 0; i < count; i++) { \
480 const TYPE *in = (TYPE *)src_ptr; \
481 for (j = 0; j < sz; j++) { \
482 *dst_ptr++ = (GLfloat)(*in); \
483 in++; \
484 } \
485 src_ptr += stride; \
486 } \
487 } \
488 } while (0)
489
490 /**
491 * Convert attribute data type to float
492 * If the attribute uses named buffer object replace the bo with newly allocated bo
493 */
494 static void r700ConvertAttrib(GLcontext *ctx, int count,
495 const struct gl_client_array *input,
496 struct StreamDesc *attr)
497 {
498 context_t *context = R700_CONTEXT(ctx);
499 const GLvoid *src_ptr;
500 GLboolean mapped_named_bo = GL_FALSE;
501 GLfloat *dst_ptr;
502 GLuint stride;
503
504 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
505
506 /* Convert value for first element only */
507 if (input->StrideB == 0)
508 {
509 count = 1;
510 }
511
512 if (input->BufferObj->Name)
513 {
514 if (!input->BufferObj->Pointer)
515 {
516 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
517 mapped_named_bo = GL_TRUE;
518 }
519
520 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
521 }
522 else
523 {
524 src_ptr = input->Ptr;
525 }
526
527 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
528 sizeof(GLfloat) * input->Size * count, 32);
529
530 radeon_bo_map(attr->bo, 1);
531
532 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
533
534 assert(src_ptr != NULL);
535
536 switch (input->Type)
537 {
538 case GL_DOUBLE:
539 CONVERT(GLdouble, (GLfloat));
540 break;
541 case GL_UNSIGNED_INT:
542 CONVERT(GLuint, UINT_TO_FLOAT);
543 break;
544 case GL_INT:
545 CONVERT(GLint, INT_TO_FLOAT);
546 break;
547 case GL_UNSIGNED_SHORT:
548 CONVERT(GLushort, USHORT_TO_FLOAT);
549 break;
550 case GL_SHORT:
551 CONVERT(GLshort, SHORT_TO_FLOAT);
552 break;
553 case GL_UNSIGNED_BYTE:
554 assert(input->Format != GL_BGRA);
555 CONVERT(GLubyte, UBYTE_TO_FLOAT);
556 break;
557 case GL_BYTE:
558 CONVERT(GLbyte, BYTE_TO_FLOAT);
559 break;
560 default:
561 assert(0);
562 break;
563 }
564
565 radeon_bo_unmap(attr->bo);
566
567 if (mapped_named_bo)
568 {
569 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
570 }
571 }
572
573 static void r700AlignDataToDword(GLcontext *ctx,
574 const struct gl_client_array *input,
575 int count,
576 struct StreamDesc *attr)
577 {
578 context_t *context = R700_CONTEXT(ctx);
579 const int dst_stride = (input->StrideB + 3) & ~3;
580 const int size = getTypeSize(input->Type) * input->Size * count;
581 GLboolean mapped_named_bo = GL_FALSE;
582
583 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
584
585 radeon_bo_map(attr->bo, 1);
586
587 if (!input->BufferObj->Pointer)
588 {
589 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
590 mapped_named_bo = GL_TRUE;
591 }
592
593 {
594 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
595 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
596 int i;
597
598 for (i = 0; i < count; ++i)
599 {
600 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
601 src_ptr += input->StrideB;
602 dst_ptr += dst_stride;
603 }
604 }
605
606 radeon_bo_unmap(attr->bo);
607 if (mapped_named_bo)
608 {
609 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
610 }
611
612 attr->stride = dst_stride;
613 }
614
615 static void r700SetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count)
616 {
617 context_t *context = R700_CONTEXT(ctx);
618 GLuint stride;
619 int ret;
620 int i, index;
621
622 R600_STATECHANGE(context, vtx);
623
624 for(index = 0; index < context->nNumActiveAos; index++)
625 {
626 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
627 i = context->stream_desc[index].element;
628
629 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
630
631 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
632 #if MESA_BIG_ENDIAN
633 getTypeSize(input[i]->Type) != 4 ||
634 #endif
635 stride < 4)
636 {
637 r700ConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
638 }
639 else
640 {
641 if (input[i]->BufferObj->Name)
642 {
643 if (stride % 4 != 0)
644 {
645 assert(((intptr_t) input[i]->Ptr) % input[i]->StrideB == 0);
646 r700AlignDataToDword(ctx, input[i], count, &context->stream_desc[index]);
647 context->stream_desc[index].is_named_bo = GL_FALSE;
648 }
649 else
650 {
651 context->stream_desc[index].stride = input[i]->StrideB;
652 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
653 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
654 context->stream_desc[index].is_named_bo = GL_TRUE;
655 }
656 }
657 else
658 {
659 int size;
660 int local_count = count;
661 uint32_t *dst;
662
663 if (input[i]->StrideB == 0)
664 {
665 size = getTypeSize(input[i]->Type) * input[i]->Size;
666 local_count = 1;
667 }
668 else
669 {
670 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
671 }
672
673 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
674 &context->stream_desc[index].bo_offset, size, 32);
675
676 radeon_bo_map(context->stream_desc[index].bo, 1);
677 assert(context->stream_desc[index].bo->ptr != NULL);
678
679
680 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
681 context->stream_desc[index].bo_offset);
682
683 switch (context->stream_desc[index].dwords)
684 {
685 case 1:
686 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
687 break;
688 case 2:
689 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
690 break;
691 case 3:
692 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
693 break;
694 case 4:
695 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
696 break;
697 default:
698 assert(0);
699 break;
700 }
701 radeon_bo_unmap(context->stream_desc[index].bo);
702 }
703 }
704
705 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
706 aos->stride = context->stream_desc[index].stride / sizeof(float);
707 aos->components = context->stream_desc[index].dwords;
708 aos->bo = context->stream_desc[index].bo;
709 aos->offset = context->stream_desc[index].bo_offset;
710
711 if(context->stream_desc[index].is_named_bo)
712 {
713 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
714 context->stream_desc[index].bo,
715 RADEON_GEM_DOMAIN_GTT, 0);
716 }
717 }
718
719 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
720 first_elem(&context->radeon.dma.reserved)->bo,
721 RADEON_GEM_DOMAIN_GTT, 0);
722 }
723
724 static void r700FreeData(GLcontext *ctx)
725 {
726 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
727 * to prevent double unref in radeonReleaseArrays
728 * called during context destroy
729 */
730 context_t *context = R700_CONTEXT(ctx);
731
732 int i;
733
734 for (i = 0; i < context->nNumActiveAos; i++)
735 {
736 if (!context->stream_desc[i].is_named_bo)
737 {
738 radeon_bo_unref(context->stream_desc[i].bo);
739 }
740 context->radeon.tcl.aos[i].bo = NULL;
741 }
742
743 if (context->ind_buf.bo != NULL)
744 {
745 radeon_bo_unref(context->ind_buf.bo);
746 }
747 }
748
749 static void r700FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
750 {
751 context_t *context = R700_CONTEXT(ctx);
752 GLvoid *src_ptr;
753 GLuint *out;
754 int i;
755 GLboolean mapped_named_bo = GL_FALSE;
756
757 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
758 {
759 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
760 mapped_named_bo = GL_TRUE;
761 assert(mesa_ind_buf->obj->Pointer != NULL);
762 }
763 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
764
765 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
766 {
767 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
768 GLubyte *in = (GLubyte *)src_ptr;
769
770 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
771 &context->ind_buf.bo_offset, size, 4);
772
773 radeon_bo_map(context->ind_buf.bo, 1);
774 assert(context->ind_buf.bo->ptr != NULL);
775 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
776
777 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
778 {
779 *out++ = in[i] | in[i + 1] << 16;
780 }
781
782 if (i < mesa_ind_buf->count)
783 {
784 *out++ = in[i];
785 }
786
787 radeon_bo_unmap(context->ind_buf.bo);
788 #if MESA_BIG_ENDIAN
789 }
790 else
791 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
792 GLushort *in = (GLushort *)src_ptr;
793 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
794
795 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
796 &context->ind_buf.bo_offset, size, 4);
797
798 radeon_bo_map(context->ind_buf.bo, 1);
799 assert(context->ind_buf.bo->ptr != NULL);
800 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
801
802 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
803 {
804 *out++ = in[i] | in[i + 1] << 16;
805 }
806
807 if (i < mesa_ind_buf->count)
808 {
809 *out++ = in[i];
810 }
811 radeon_bo_unmap(context->ind_buf.bo);
812 #endif
813 }
814
815 context->ind_buf.is_32bit = GL_FALSE;
816 context->ind_buf.count = mesa_ind_buf->count;
817
818 if (mapped_named_bo)
819 {
820 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
821 }
822 }
823
824 static void r700SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
825 {
826 context_t *context = R700_CONTEXT(ctx);
827
828 if (!mesa_ind_buf) {
829 context->ind_buf.bo = NULL;
830 return;
831 }
832
833 #if MESA_BIG_ENDIAN
834 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
835 {
836 #else
837 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
838 {
839 #endif
840 const GLvoid *src_ptr;
841 GLvoid *dst_ptr;
842 GLboolean mapped_named_bo = GL_FALSE;
843
844 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
845 {
846 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
847 assert(mesa_ind_buf->obj->Pointer != NULL);
848 mapped_named_bo = GL_TRUE;
849 }
850
851 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
852
853 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
854
855 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
856 &context->ind_buf.bo_offset, size, 4);
857 radeon_bo_map(context->ind_buf.bo, 1);
858 assert(context->ind_buf.bo->ptr != NULL);
859 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
860
861 _mesa_memcpy(dst_ptr, src_ptr, size);
862
863 radeon_bo_unmap(context->ind_buf.bo);
864 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
865 context->ind_buf.count = mesa_ind_buf->count;
866
867 if (mapped_named_bo)
868 {
869 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
870 }
871 }
872 else
873 {
874 r700FixupIndexBuffer(ctx, mesa_ind_buf);
875 }
876 }
877
878 static GLboolean r700TryDrawPrims(GLcontext *ctx,
879 const struct gl_client_array *arrays[],
880 const struct _mesa_prim *prim,
881 GLuint nr_prims,
882 const struct _mesa_index_buffer *ib,
883 GLuint min_index,
884 GLuint max_index )
885 {
886 context_t *context = R700_CONTEXT(ctx);
887 radeonContextPtr radeon = &context->radeon;
888 GLuint i, id = 0;
889 struct radeon_renderbuffer *rrb;
890
891 if (ctx->NewState)
892 _mesa_update_state( ctx );
893
894 _tnl_UpdateFixedFunctionProgram(ctx);
895 r700SetVertexFormat(ctx, arrays, max_index + 1);
896 /* shaders need to be updated before buffers are validated */
897 r700UpdateShaders(ctx);
898 if (!r600ValidateBuffers(ctx))
899 return GL_FALSE;
900
901 /* always emit CB base to prevent
902 * lock ups on some chips.
903 */
904 R600_STATECHANGE(context, cb_target);
905 /* mark vtx as dirty since it changes per-draw */
906 R600_STATECHANGE(context, vtx);
907
908 r700SetScissor(context);
909 r700SetupVertexProgram(ctx);
910 r700SetupFragmentProgram(ctx);
911 r700UpdateShaderStates(ctx);
912
913 GLuint emit_end = r700PredictRenderSize(ctx, prim, ib, nr_prims)
914 + context->radeon.cmdbuf.cs->cdw;
915
916 r700SetupIndexBuffer(ctx, ib);
917 r700SetupStreams(ctx, arrays, max_index + 1);
918
919 radeonEmitState(radeon);
920
921 radeon_debug_add_indent();
922 for (i = 0; i < nr_prims; ++i)
923 {
924 if (context->ind_buf.bo)
925 r700RunRenderPrimitive(ctx,
926 prim[i].start,
927 prim[i].start + prim[i].count,
928 prim[i].mode);
929 else
930 r700RunRenderPrimitiveImmediate(ctx,
931 prim[i].start,
932 prim[i].start + prim[i].count,
933 prim[i].mode);
934 }
935 radeon_debug_remove_indent();
936
937 /* Flush render op cached for last several quads. */
938 r700WaitForIdleClean(context);
939
940 rrb = radeon_get_colorbuffer(&context->radeon);
941 if (rrb && rrb->bo)
942 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
943 CB_ACTION_ENA_bit | (1 << (id + 6)));
944
945 rrb = radeon_get_depthbuffer(&context->radeon);
946 if (rrb && rrb->bo)
947 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
948 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
949
950 r700FreeData(ctx);
951
952 if (emit_end < context->radeon.cmdbuf.cs->cdw)
953 {
954 WARN_ONCE("Rendering was %d commands larger than predicted size."
955 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
956 }
957
958 return GL_TRUE;
959 }
960
961 static void r700DrawPrims(GLcontext *ctx,
962 const struct gl_client_array *arrays[],
963 const struct _mesa_prim *prim,
964 GLuint nr_prims,
965 const struct _mesa_index_buffer *ib,
966 GLboolean index_bounds_valid,
967 GLuint min_index,
968 GLuint max_index)
969 {
970 GLboolean retval = GL_FALSE;
971
972 /* This check should get folded into just the places that
973 * min/max index are really needed.
974 */
975 if (!index_bounds_valid) {
976 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
977 }
978
979 if (min_index) {
980 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r700DrawPrims );
981 return;
982 }
983
984 /* Make an attempt at drawing */
985 retval = r700TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
986
987 /* If failed run tnl pipeline - it should take care of fallbacks */
988 if (!retval)
989 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
990 }
991
992 void r700InitDraw(GLcontext *ctx)
993 {
994 struct vbo_context *vbo = vbo_context(ctx);
995
996 /* to be enabled */
997 vbo->draw_prims = r700DrawPrims;
998 }
999
1000