r600: Remove unnecessary headers.
[mesa.git] / src / mesa / drivers / dri / r600 / r700_render.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
34 #include "main/dd.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "vbo/vbo.h"
40
41 #include "tnl/tnl.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "vbo/vbo_context.h"
46
47 #include "r600_context.h"
48 #include "r600_cmdbuf.h"
49
50 #include "r600_tex.h"
51
52 #include "r700_vertprog.h"
53 #include "r700_fragprog.h"
54 #include "r700_state.h"
55
56 #include "radeon_buffer_objects.h"
57 #include "radeon_common_context.h"
58
59 void r700WaitForIdle(context_t *context);
60 void r700WaitForIdleClean(context_t *context);
61 static unsigned int r700PrimitiveType(int prim);
62 GLboolean r700SyncSurf(context_t *context,
63 struct radeon_bo *pbo,
64 uint32_t read_domain,
65 uint32_t write_domain,
66 uint32_t sync_type);
67
68 void r700WaitForIdle(context_t *context)
69 {
70 BATCH_LOCALS(&context->radeon);
71 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
72 BEGIN_BATCH_NO_AUTOSTATE(3);
73
74 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
75 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
76 R600_OUT_BATCH(WAIT_3D_IDLE_bit);
77
78 END_BATCH();
79 COMMIT_BATCH();
80 }
81
82 void r700WaitForIdleClean(context_t *context)
83 {
84 BATCH_LOCALS(&context->radeon);
85 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
86 BEGIN_BATCH_NO_AUTOSTATE(5);
87
88 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
89 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
90
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
92 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
93 R600_OUT_BATCH(WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
94
95 END_BATCH();
96 COMMIT_BATCH();
97 }
98
99 void r700Start3D(context_t *context)
100 {
101 BATCH_LOCALS(&context->radeon);
102 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
103 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
104 {
105 BEGIN_BATCH_NO_AUTOSTATE(2);
106 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 0));
107 R600_OUT_BATCH(0);
108 END_BATCH();
109 }
110
111 BEGIN_BATCH_NO_AUTOSTATE(3);
112 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
113 R600_OUT_BATCH(0x80000000);
114 R600_OUT_BATCH(0x80000000);
115 END_BATCH();
116
117 COMMIT_BATCH();
118
119 r700WaitForIdleClean(context);
120 }
121
122 GLboolean r700SyncSurf(context_t *context,
123 struct radeon_bo *pbo,
124 uint32_t read_domain,
125 uint32_t write_domain,
126 uint32_t sync_type)
127 {
128 BATCH_LOCALS(&context->radeon);
129 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
130 uint32_t cp_coher_size;
131
132 if (!pbo)
133 return GL_FALSE;
134
135 if (pbo->size == 0xffffffff)
136 cp_coher_size = 0xffffffff;
137 else
138 cp_coher_size = ((pbo->size + 255) >> 8);
139
140 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
141 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
142 R600_OUT_BATCH(sync_type);
143 R600_OUT_BATCH(cp_coher_size);
144 R600_OUT_BATCH(0);
145 R600_OUT_BATCH(10);
146 R600_OUT_BATCH_RELOC(0,
147 pbo,
148 0,
149 read_domain, write_domain, 0);
150 END_BATCH();
151 COMMIT_BATCH();
152
153 return GL_TRUE;
154 }
155
156 static unsigned int r700PrimitiveType(int prim)
157 {
158 switch (prim & PRIM_MODE_MASK)
159 {
160 case GL_POINTS:
161 return DI_PT_POINTLIST;
162 break;
163 case GL_LINES:
164 return DI_PT_LINELIST;
165 break;
166 case GL_LINE_STRIP:
167 return DI_PT_LINESTRIP;
168 break;
169 case GL_LINE_LOOP:
170 return DI_PT_LINELOOP;
171 break;
172 case GL_TRIANGLES:
173 return DI_PT_TRILIST;
174 break;
175 case GL_TRIANGLE_STRIP:
176 return DI_PT_TRISTRIP;
177 break;
178 case GL_TRIANGLE_FAN:
179 return DI_PT_TRIFAN;
180 break;
181 case GL_QUADS:
182 return DI_PT_QUADLIST;
183 break;
184 case GL_QUAD_STRIP:
185 return DI_PT_QUADSTRIP;
186 break;
187 case GL_POLYGON:
188 return DI_PT_POLYGON;
189 break;
190 default:
191 assert(0);
192 return -1;
193 break;
194 }
195 }
196
197 static int r700NumVerts(int num_verts, int prim)
198 {
199 int verts_off = 0;
200
201 switch (prim & PRIM_MODE_MASK) {
202 case GL_POINTS:
203 verts_off = 0;
204 break;
205 case GL_LINES:
206 verts_off = num_verts % 2;
207 break;
208 case GL_LINE_STRIP:
209 if (num_verts < 2)
210 verts_off = num_verts;
211 break;
212 case GL_LINE_LOOP:
213 if (num_verts < 2)
214 verts_off = num_verts;
215 break;
216 case GL_TRIANGLES:
217 verts_off = num_verts % 3;
218 break;
219 case GL_TRIANGLE_STRIP:
220 if (num_verts < 3)
221 verts_off = num_verts;
222 break;
223 case GL_TRIANGLE_FAN:
224 if (num_verts < 3)
225 verts_off = num_verts;
226 break;
227 case GL_QUADS:
228 verts_off = num_verts % 4;
229 break;
230 case GL_QUAD_STRIP:
231 if (num_verts < 4)
232 verts_off = num_verts;
233 else
234 verts_off = num_verts % 2;
235 break;
236 case GL_POLYGON:
237 if (num_verts < 3)
238 verts_off = num_verts;
239 break;
240 default:
241 assert(0);
242 return -1;
243 break;
244 }
245
246 return num_verts - verts_off;
247 }
248
249 static void r700RunRenderPrimitive(GLcontext * ctx, int start, int end, int prim)
250 {
251 context_t *context = R700_CONTEXT(ctx);
252 BATCH_LOCALS(&context->radeon);
253 int type, total_emit;
254 int num_indices;
255 uint32_t vgt_draw_initiator = 0;
256 uint32_t vgt_index_type = 0;
257 uint32_t vgt_primitive_type = 0;
258 uint32_t vgt_num_indices = 0;
259
260 type = r700PrimitiveType(prim);
261 num_indices = r700NumVerts(end - start, prim);
262
263 radeon_print(RADEON_RENDER, RADEON_TRACE,
264 "%s type %x num_indices %d\n",
265 __func__, type, num_indices);
266
267 if (type < 0 || num_indices <= 0)
268 return;
269
270 SETfield(vgt_primitive_type, type,
271 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
272
273 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
274
275 if(GL_TRUE != context->ind_buf.is_32bit)
276 {
277 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
278 }
279
280 vgt_num_indices = num_indices;
281 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
282 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
283
284 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
285 + 2 /* VGT_INDEX_TYPE */
286 + 2 /* NUM_INSTANCES */
287 + 5 + 2; /* DRAW_INDEX */
288
289 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
290 // prim
291 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
292 R600_OUT_BATCH(vgt_primitive_type);
293 // index type
294 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
295 R600_OUT_BATCH(vgt_index_type);
296 // num instances
297 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
298 R600_OUT_BATCH(1);
299 // draw packet
300 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
301 R600_OUT_BATCH(context->ind_buf.bo_offset);
302 R600_OUT_BATCH(0);
303 R600_OUT_BATCH(vgt_num_indices);
304 R600_OUT_BATCH(vgt_draw_initiator);
305 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
306 context->ind_buf.bo,
307 context->ind_buf.bo_offset,
308 RADEON_GEM_DOMAIN_GTT, 0, 0);
309 END_BATCH();
310 COMMIT_BATCH();
311 }
312
313 static void r700RunRenderPrimitiveImmediate(GLcontext * ctx, int start, int end, int prim)
314 {
315 context_t *context = R700_CONTEXT(ctx);
316 BATCH_LOCALS(&context->radeon);
317 int type, i;
318 uint32_t num_indices, total_emit = 0;
319 uint32_t vgt_draw_initiator = 0;
320 uint32_t vgt_index_type = 0;
321 uint32_t vgt_primitive_type = 0;
322 uint32_t vgt_num_indices = 0;
323
324 type = r700PrimitiveType(prim);
325 num_indices = r700NumVerts(end - start, prim);
326
327 radeon_print(RADEON_RENDER, RADEON_TRACE,
328 "%s type %x num_indices %d\n",
329 __func__, type, num_indices);
330
331 if (type < 0 || num_indices <= 0)
332 return;
333
334 SETfield(vgt_primitive_type, type,
335 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
336
337 if (num_indices > 0xffff)
338 {
339 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
340 }
341 else
342 {
343 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
344 }
345
346 vgt_num_indices = num_indices;
347 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
348
349 if (start == 0)
350 {
351 SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
352 }
353 else
354 {
355 if (num_indices > 0xffff)
356 {
357 total_emit += num_indices;
358 }
359 else
360 {
361 total_emit += (num_indices + 1) / 2;
362 }
363 SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
364 }
365
366 total_emit += 3 /* VGT_PRIMITIVE_TYPE */
367 + 2 /* VGT_INDEX_TYPE */
368 + 2 /* NUM_INSTANCES */
369 + 3; /* DRAW */
370
371 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
372 // prim
373 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
374 R600_OUT_BATCH(vgt_primitive_type);
375 // index type
376 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
377 R600_OUT_BATCH(vgt_index_type);
378 // num instances
379 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
380 R600_OUT_BATCH(1);
381 // draw packet
382 if(start == 0)
383 {
384 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
385 R600_OUT_BATCH(vgt_num_indices);
386 R600_OUT_BATCH(vgt_draw_initiator);
387 }
388 else
389 {
390 if (num_indices > 0xffff)
391 {
392 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
393 R600_OUT_BATCH(vgt_num_indices);
394 R600_OUT_BATCH(vgt_draw_initiator);
395 for (i = start; i < (start + num_indices); i++)
396 {
397 R600_OUT_BATCH(i);
398 }
399 }
400 else
401 {
402 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
403 R600_OUT_BATCH(vgt_num_indices);
404 R600_OUT_BATCH(vgt_draw_initiator);
405 for (i = start; i < (start + num_indices); i += 2)
406 {
407 if ((i + 1) == (start + num_indices))
408 {
409 R600_OUT_BATCH(i);
410 }
411 else
412 {
413 R600_OUT_BATCH(((i + 1) << 16) | (i));
414 }
415 }
416 }
417 }
418
419 END_BATCH();
420 COMMIT_BATCH();
421 }
422
423 /* start 3d, idle, cb/db flush */
424 #define PRE_EMIT_STATE_BUFSZ 10 + 5 + 18
425
426 static GLuint r700PredictRenderSize(GLcontext* ctx,
427 const struct _mesa_prim *prim,
428 const struct _mesa_index_buffer *ib,
429 GLuint nr_prims)
430 {
431 context_t *context = R700_CONTEXT(ctx);
432 GLboolean flushed;
433 GLuint dwords, i;
434 GLuint state_size;
435
436 dwords = PRE_EMIT_STATE_BUFSZ;
437 if (ib)
438 dwords += nr_prims * 14;
439 else {
440 for (i = 0; i < nr_prims; ++i)
441 {
442 if (prim[i].start == 0)
443 dwords += 10;
444 else if (prim[i].count > 0xffff)
445 dwords += prim[i].count + 10;
446 else
447 dwords += ((prim[i].count + 1) / 2) + 10;
448 }
449 }
450
451 state_size = radeonCountStateEmitSize(&context->radeon);
452 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
453 dwords + state_size,
454 __FUNCTION__);
455 if (flushed)
456 dwords += radeonCountStateEmitSize(&context->radeon);
457 else
458 dwords += state_size;
459
460 radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
461 return dwords;
462
463 }
464
465 #define CONVERT( TYPE, MACRO ) do { \
466 GLuint i, j, sz; \
467 sz = input->Size; \
468 if (input->Normalized) { \
469 for (i = 0; i < count; i++) { \
470 const TYPE *in = (TYPE *)src_ptr; \
471 for (j = 0; j < sz; j++) { \
472 *dst_ptr++ = MACRO(*in); \
473 in++; \
474 } \
475 src_ptr += stride; \
476 } \
477 } else { \
478 for (i = 0; i < count; i++) { \
479 const TYPE *in = (TYPE *)src_ptr; \
480 for (j = 0; j < sz; j++) { \
481 *dst_ptr++ = (GLfloat)(*in); \
482 in++; \
483 } \
484 src_ptr += stride; \
485 } \
486 } \
487 } while (0)
488
489 /**
490 * Convert attribute data type to float
491 * If the attribute uses named buffer object replace the bo with newly allocated bo
492 */
493 static void r700ConvertAttrib(GLcontext *ctx, int count,
494 const struct gl_client_array *input,
495 struct StreamDesc *attr)
496 {
497 context_t *context = R700_CONTEXT(ctx);
498 const GLvoid *src_ptr;
499 GLboolean mapped_named_bo = GL_FALSE;
500 GLfloat *dst_ptr;
501 GLuint stride;
502
503 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
504
505 /* Convert value for first element only */
506 if (input->StrideB == 0)
507 {
508 count = 1;
509 }
510
511 if (input->BufferObj->Name)
512 {
513 if (!input->BufferObj->Pointer)
514 {
515 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
516 mapped_named_bo = GL_TRUE;
517 }
518
519 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
520 }
521 else
522 {
523 src_ptr = input->Ptr;
524 }
525
526 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
527 sizeof(GLfloat) * input->Size * count, 32);
528
529 radeon_bo_map(attr->bo, 1);
530
531 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
532
533 assert(src_ptr != NULL);
534
535 switch (input->Type)
536 {
537 case GL_DOUBLE:
538 CONVERT(GLdouble, (GLfloat));
539 break;
540 case GL_UNSIGNED_INT:
541 CONVERT(GLuint, UINT_TO_FLOAT);
542 break;
543 case GL_INT:
544 CONVERT(GLint, INT_TO_FLOAT);
545 break;
546 case GL_UNSIGNED_SHORT:
547 CONVERT(GLushort, USHORT_TO_FLOAT);
548 break;
549 case GL_SHORT:
550 CONVERT(GLshort, SHORT_TO_FLOAT);
551 break;
552 case GL_UNSIGNED_BYTE:
553 assert(input->Format != GL_BGRA);
554 CONVERT(GLubyte, UBYTE_TO_FLOAT);
555 break;
556 case GL_BYTE:
557 CONVERT(GLbyte, BYTE_TO_FLOAT);
558 break;
559 default:
560 assert(0);
561 break;
562 }
563
564 radeon_bo_unmap(attr->bo);
565
566 if (mapped_named_bo)
567 {
568 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
569 }
570 }
571
572 static void r700AlignDataToDword(GLcontext *ctx,
573 const struct gl_client_array *input,
574 int count,
575 struct StreamDesc *attr)
576 {
577 context_t *context = R700_CONTEXT(ctx);
578 const int dst_stride = (input->StrideB + 3) & ~3;
579 const int size = getTypeSize(input->Type) * input->Size * count;
580 GLboolean mapped_named_bo = GL_FALSE;
581
582 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
583
584 radeon_bo_map(attr->bo, 1);
585
586 if (!input->BufferObj->Pointer)
587 {
588 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
589 mapped_named_bo = GL_TRUE;
590 }
591
592 {
593 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
594 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
595 int i;
596
597 for (i = 0; i < count; ++i)
598 {
599 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
600 src_ptr += input->StrideB;
601 dst_ptr += dst_stride;
602 }
603 }
604
605 radeon_bo_unmap(attr->bo);
606 if (mapped_named_bo)
607 {
608 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
609 }
610
611 attr->stride = dst_stride;
612 }
613
614 static void r700SetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count)
615 {
616 context_t *context = R700_CONTEXT(ctx);
617 GLuint stride;
618 int ret;
619 int i, index;
620
621 R600_STATECHANGE(context, vtx);
622
623 for(index = 0; index < context->nNumActiveAos; index++)
624 {
625 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
626 i = context->stream_desc[index].element;
627
628 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
629
630 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
631 #if MESA_BIG_ENDIAN
632 getTypeSize(input[i]->Type) != 4 ||
633 #endif
634 stride < 4)
635 {
636 r700ConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
637 }
638 else
639 {
640 if (input[i]->BufferObj->Name)
641 {
642 if (stride % 4 != 0)
643 {
644 assert(((intptr_t) input[i]->Ptr) % input[i]->StrideB == 0);
645 r700AlignDataToDword(ctx, input[i], count, &context->stream_desc[index]);
646 context->stream_desc[index].is_named_bo = GL_FALSE;
647 }
648 else
649 {
650 context->stream_desc[index].stride = input[i]->StrideB;
651 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
652 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
653 context->stream_desc[index].is_named_bo = GL_TRUE;
654 }
655 }
656 else
657 {
658 int size;
659 int local_count = count;
660 uint32_t *dst;
661
662 if (input[i]->StrideB == 0)
663 {
664 size = getTypeSize(input[i]->Type) * input[i]->Size;
665 local_count = 1;
666 }
667 else
668 {
669 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
670 }
671
672 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
673 &context->stream_desc[index].bo_offset, size, 32);
674
675 radeon_bo_map(context->stream_desc[index].bo, 1);
676 assert(context->stream_desc[index].bo->ptr != NULL);
677
678
679 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
680 context->stream_desc[index].bo_offset);
681
682 switch (context->stream_desc[index].dwords)
683 {
684 case 1:
685 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
686 break;
687 case 2:
688 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
689 break;
690 case 3:
691 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
692 break;
693 case 4:
694 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
695 break;
696 default:
697 assert(0);
698 break;
699 }
700 radeon_bo_unmap(context->stream_desc[index].bo);
701 }
702 }
703
704 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
705 aos->stride = context->stream_desc[index].stride / sizeof(float);
706 aos->components = context->stream_desc[index].dwords;
707 aos->bo = context->stream_desc[index].bo;
708 aos->offset = context->stream_desc[index].bo_offset;
709
710 if(context->stream_desc[index].is_named_bo)
711 {
712 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
713 context->stream_desc[index].bo,
714 RADEON_GEM_DOMAIN_GTT, 0);
715 }
716 }
717
718 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
719 first_elem(&context->radeon.dma.reserved)->bo,
720 RADEON_GEM_DOMAIN_GTT, 0);
721 }
722
723 static void r700FreeData(GLcontext *ctx)
724 {
725 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
726 * to prevent double unref in radeonReleaseArrays
727 * called during context destroy
728 */
729 context_t *context = R700_CONTEXT(ctx);
730
731 int i;
732
733 for (i = 0; i < context->nNumActiveAos; i++)
734 {
735 if (!context->stream_desc[i].is_named_bo)
736 {
737 radeon_bo_unref(context->stream_desc[i].bo);
738 }
739 context->radeon.tcl.aos[i].bo = NULL;
740 }
741
742 if (context->ind_buf.bo != NULL)
743 {
744 radeon_bo_unref(context->ind_buf.bo);
745 }
746 }
747
748 static void r700FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
749 {
750 context_t *context = R700_CONTEXT(ctx);
751 GLvoid *src_ptr;
752 GLuint *out;
753 int i;
754 GLboolean mapped_named_bo = GL_FALSE;
755
756 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
757 {
758 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
759 mapped_named_bo = GL_TRUE;
760 assert(mesa_ind_buf->obj->Pointer != NULL);
761 }
762 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
763
764 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
765 {
766 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
767 GLubyte *in = (GLubyte *)src_ptr;
768
769 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
770 &context->ind_buf.bo_offset, size, 4);
771
772 radeon_bo_map(context->ind_buf.bo, 1);
773 assert(context->ind_buf.bo->ptr != NULL);
774 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
775
776 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
777 {
778 *out++ = in[i] | in[i + 1] << 16;
779 }
780
781 if (i < mesa_ind_buf->count)
782 {
783 *out++ = in[i];
784 }
785
786 radeon_bo_unmap(context->ind_buf.bo);
787 #if MESA_BIG_ENDIAN
788 }
789 else
790 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
791 GLushort *in = (GLushort *)src_ptr;
792 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
793
794 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
795 &context->ind_buf.bo_offset, size, 4);
796
797 radeon_bo_map(context->ind_buf.bo, 1);
798 assert(context->ind_buf.bo->ptr != NULL);
799 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
800
801 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
802 {
803 *out++ = in[i] | in[i + 1] << 16;
804 }
805
806 if (i < mesa_ind_buf->count)
807 {
808 *out++ = in[i];
809 }
810 radeon_bo_unmap(context->ind_buf.bo);
811 #endif
812 }
813
814 context->ind_buf.is_32bit = GL_FALSE;
815 context->ind_buf.count = mesa_ind_buf->count;
816
817 if (mapped_named_bo)
818 {
819 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
820 }
821 }
822
823 static void r700SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
824 {
825 context_t *context = R700_CONTEXT(ctx);
826
827 if (!mesa_ind_buf) {
828 context->ind_buf.bo = NULL;
829 return;
830 }
831
832 #if MESA_BIG_ENDIAN
833 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
834 {
835 #else
836 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
837 {
838 #endif
839 const GLvoid *src_ptr;
840 GLvoid *dst_ptr;
841 GLboolean mapped_named_bo = GL_FALSE;
842
843 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
844 {
845 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
846 assert(mesa_ind_buf->obj->Pointer != NULL);
847 mapped_named_bo = GL_TRUE;
848 }
849
850 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
851
852 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
853
854 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
855 &context->ind_buf.bo_offset, size, 4);
856 radeon_bo_map(context->ind_buf.bo, 1);
857 assert(context->ind_buf.bo->ptr != NULL);
858 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
859
860 _mesa_memcpy(dst_ptr, src_ptr, size);
861
862 radeon_bo_unmap(context->ind_buf.bo);
863 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
864 context->ind_buf.count = mesa_ind_buf->count;
865
866 if (mapped_named_bo)
867 {
868 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
869 }
870 }
871 else
872 {
873 r700FixupIndexBuffer(ctx, mesa_ind_buf);
874 }
875 }
876
877 static GLboolean r700TryDrawPrims(GLcontext *ctx,
878 const struct gl_client_array *arrays[],
879 const struct _mesa_prim *prim,
880 GLuint nr_prims,
881 const struct _mesa_index_buffer *ib,
882 GLuint min_index,
883 GLuint max_index )
884 {
885 context_t *context = R700_CONTEXT(ctx);
886 radeonContextPtr radeon = &context->radeon;
887 GLuint i, id = 0;
888 struct radeon_renderbuffer *rrb;
889
890 if (ctx->NewState)
891 _mesa_update_state( ctx );
892
893 _tnl_UpdateFixedFunctionProgram(ctx);
894 r700SetVertexFormat(ctx, arrays, max_index + 1);
895 /* shaders need to be updated before buffers are validated */
896 r700UpdateShaders(ctx);
897 if (!r600ValidateBuffers(ctx))
898 return GL_FALSE;
899
900 /* always emit CB base to prevent
901 * lock ups on some chips.
902 */
903 R600_STATECHANGE(context, cb_target);
904 /* mark vtx as dirty since it changes per-draw */
905 R600_STATECHANGE(context, vtx);
906
907 r700SetScissor(context);
908 r700SetupVertexProgram(ctx);
909 r700SetupFragmentProgram(ctx);
910 r700UpdateShaderStates(ctx);
911
912 GLuint emit_end = r700PredictRenderSize(ctx, prim, ib, nr_prims)
913 + context->radeon.cmdbuf.cs->cdw;
914
915 r700SetupIndexBuffer(ctx, ib);
916 r700SetupStreams(ctx, arrays, max_index + 1);
917
918 radeonEmitState(radeon);
919
920 radeon_debug_add_indent();
921 for (i = 0; i < nr_prims; ++i)
922 {
923 if (context->ind_buf.bo)
924 r700RunRenderPrimitive(ctx,
925 prim[i].start,
926 prim[i].start + prim[i].count,
927 prim[i].mode);
928 else
929 r700RunRenderPrimitiveImmediate(ctx,
930 prim[i].start,
931 prim[i].start + prim[i].count,
932 prim[i].mode);
933 }
934 radeon_debug_remove_indent();
935
936 /* Flush render op cached for last several quads. */
937 r700WaitForIdleClean(context);
938
939 rrb = radeon_get_colorbuffer(&context->radeon);
940 if (rrb && rrb->bo)
941 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
942 CB_ACTION_ENA_bit | (1 << (id + 6)));
943
944 rrb = radeon_get_depthbuffer(&context->radeon);
945 if (rrb && rrb->bo)
946 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
947 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
948
949 r700FreeData(ctx);
950
951 if (emit_end < context->radeon.cmdbuf.cs->cdw)
952 {
953 WARN_ONCE("Rendering was %d commands larger than predicted size."
954 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
955 }
956
957 return GL_TRUE;
958 }
959
960 static void r700DrawPrims(GLcontext *ctx,
961 const struct gl_client_array *arrays[],
962 const struct _mesa_prim *prim,
963 GLuint nr_prims,
964 const struct _mesa_index_buffer *ib,
965 GLboolean index_bounds_valid,
966 GLuint min_index,
967 GLuint max_index)
968 {
969 GLboolean retval = GL_FALSE;
970
971 /* This check should get folded into just the places that
972 * min/max index are really needed.
973 */
974 if (!index_bounds_valid) {
975 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
976 }
977
978 if (min_index) {
979 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r700DrawPrims );
980 return;
981 }
982
983 /* Make an attempt at drawing */
984 retval = r700TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
985
986 /* If failed run tnl pipeline - it should take care of fallbacks */
987 if (!retval)
988 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
989 }
990
991 void r700InitDraw(GLcontext *ctx)
992 {
993 struct vbo_context *vbo = vbo_context(ctx);
994
995 /* to be enabled */
996 vbo->draw_prims = r700DrawPrims;
997 }
998
999