r600: rework draw functions
[mesa.git] / src / mesa / drivers / dri / r600 / r700_render.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
34 #include "main/dd.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "vbo/vbo.h"
40
41 #include "tnl/tnl.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "tnl/t_pipeline.h"
46 #include "vbo/vbo_context.h"
47
48 #include "r600_context.h"
49 #include "r600_cmdbuf.h"
50
51 #include "r600_tex.h"
52
53 #include "r700_vertprog.h"
54 #include "r700_fragprog.h"
55 #include "r700_state.h"
56
57 #include "radeon_buffer_objects.h"
58 #include "radeon_common_context.h"
59
60 void r700WaitForIdle(context_t *context);
61 void r700WaitForIdleClean(context_t *context);
62 GLboolean r700SendTextureState(context_t *context);
63 static unsigned int r700PrimitiveType(int prim);
64 void r600UpdateTextureState(GLcontext * ctx);
65 GLboolean r700SyncSurf(context_t *context,
66 struct radeon_bo *pbo,
67 uint32_t read_domain,
68 uint32_t write_domain,
69 uint32_t sync_type);
70
71 void r700WaitForIdle(context_t *context)
72 {
73 BATCH_LOCALS(&context->radeon);
74 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
75 BEGIN_BATCH_NO_AUTOSTATE(3);
76
77 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
78 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
79 R600_OUT_BATCH(WAIT_3D_IDLE_bit);
80
81 END_BATCH();
82 COMMIT_BATCH();
83 }
84
85 void r700WaitForIdleClean(context_t *context)
86 {
87 BATCH_LOCALS(&context->radeon);
88 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
89 BEGIN_BATCH_NO_AUTOSTATE(5);
90
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
92 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
93
94 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
95 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
96 R600_OUT_BATCH(WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
97
98 END_BATCH();
99 COMMIT_BATCH();
100 }
101
102 void r700Start3D(context_t *context)
103 {
104 BATCH_LOCALS(&context->radeon);
105 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
106 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
107 {
108 BEGIN_BATCH_NO_AUTOSTATE(2);
109 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 0));
110 R600_OUT_BATCH(0);
111 END_BATCH();
112 }
113
114 BEGIN_BATCH_NO_AUTOSTATE(3);
115 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
116 R600_OUT_BATCH(0x80000000);
117 R600_OUT_BATCH(0x80000000);
118 END_BATCH();
119
120 COMMIT_BATCH();
121
122 r700WaitForIdleClean(context);
123 }
124
125 GLboolean r700SyncSurf(context_t *context,
126 struct radeon_bo *pbo,
127 uint32_t read_domain,
128 uint32_t write_domain,
129 uint32_t sync_type)
130 {
131 BATCH_LOCALS(&context->radeon);
132 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
133 uint32_t cp_coher_size;
134
135 if (!pbo)
136 return GL_FALSE;
137
138 if (pbo->size == 0xffffffff)
139 cp_coher_size = 0xffffffff;
140 else
141 cp_coher_size = ((pbo->size + 255) >> 8);
142
143 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
144 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
145 R600_OUT_BATCH(sync_type);
146 R600_OUT_BATCH(cp_coher_size);
147 R600_OUT_BATCH(0);
148 R600_OUT_BATCH(10);
149 R600_OUT_BATCH_RELOC(0,
150 pbo,
151 0,
152 read_domain, write_domain, 0);
153 END_BATCH();
154 COMMIT_BATCH();
155
156 return GL_TRUE;
157 }
158
159 static unsigned int r700PrimitiveType(int prim)
160 {
161 switch (prim & PRIM_MODE_MASK)
162 {
163 case GL_POINTS:
164 return DI_PT_POINTLIST;
165 break;
166 case GL_LINES:
167 return DI_PT_LINELIST;
168 break;
169 case GL_LINE_STRIP:
170 return DI_PT_LINESTRIP;
171 break;
172 case GL_LINE_LOOP:
173 return DI_PT_LINELOOP;
174 break;
175 case GL_TRIANGLES:
176 return DI_PT_TRILIST;
177 break;
178 case GL_TRIANGLE_STRIP:
179 return DI_PT_TRISTRIP;
180 break;
181 case GL_TRIANGLE_FAN:
182 return DI_PT_TRIFAN;
183 break;
184 case GL_QUADS:
185 return DI_PT_QUADLIST;
186 break;
187 case GL_QUAD_STRIP:
188 return DI_PT_QUADSTRIP;
189 break;
190 case GL_POLYGON:
191 return DI_PT_POLYGON;
192 break;
193 default:
194 assert(0);
195 return -1;
196 break;
197 }
198 }
199
200 static int r700NumVerts(int num_verts, int prim)
201 {
202 int verts_off = 0;
203
204 switch (prim & PRIM_MODE_MASK) {
205 case GL_POINTS:
206 verts_off = 0;
207 break;
208 case GL_LINES:
209 verts_off = num_verts % 2;
210 break;
211 case GL_LINE_STRIP:
212 if (num_verts < 2)
213 verts_off = num_verts;
214 break;
215 case GL_LINE_LOOP:
216 if (num_verts < 2)
217 verts_off = num_verts;
218 break;
219 case GL_TRIANGLES:
220 verts_off = num_verts % 3;
221 break;
222 case GL_TRIANGLE_STRIP:
223 if (num_verts < 3)
224 verts_off = num_verts;
225 break;
226 case GL_TRIANGLE_FAN:
227 if (num_verts < 3)
228 verts_off = num_verts;
229 break;
230 case GL_QUADS:
231 verts_off = num_verts % 4;
232 break;
233 case GL_QUAD_STRIP:
234 if (num_verts < 4)
235 verts_off = num_verts;
236 else
237 verts_off = num_verts % 2;
238 break;
239 case GL_POLYGON:
240 if (num_verts < 3)
241 verts_off = num_verts;
242 break;
243 default:
244 assert(0);
245 return -1;
246 break;
247 }
248
249 return num_verts - verts_off;
250 }
251
252 static void r700RunRenderPrimitive(GLcontext * ctx, int start, int end, int prim)
253 {
254 context_t *context = R700_CONTEXT(ctx);
255 BATCH_LOCALS(&context->radeon);
256 int type, total_emit;
257 int num_indices;
258 uint32_t vgt_draw_initiator = 0;
259 uint32_t vgt_index_type = 0;
260 uint32_t vgt_primitive_type = 0;
261 uint32_t vgt_num_indices = 0;
262
263 type = r700PrimitiveType(prim);
264 num_indices = r700NumVerts(end - start, prim);
265
266 radeon_print(RADEON_RENDER, RADEON_TRACE,
267 "%s type %x num_indices %d\n",
268 __func__, type, num_indices);
269
270 if (type < 0 || num_indices <= 0)
271 return;
272
273 SETfield(vgt_primitive_type, type,
274 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
275
276 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
277
278 if(GL_TRUE != context->ind_buf.is_32bit)
279 {
280 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
281 }
282
283 vgt_num_indices = num_indices;
284 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
285 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
286
287 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
288 + 2 /* VGT_INDEX_TYPE */
289 + 2 /* NUM_INSTANCES */
290 + 5 + 2; /* DRAW_INDEX */
291
292 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
293 // prim
294 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
295 R600_OUT_BATCH(vgt_primitive_type);
296 // index type
297 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
298 R600_OUT_BATCH(vgt_index_type);
299 // num instances
300 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
301 R600_OUT_BATCH(1);
302 // draw packet
303 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
304 R600_OUT_BATCH(context->ind_buf.bo_offset);
305 R600_OUT_BATCH(0);
306 R600_OUT_BATCH(vgt_num_indices);
307 R600_OUT_BATCH(vgt_draw_initiator);
308 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
309 context->ind_buf.bo,
310 context->ind_buf.bo_offset,
311 RADEON_GEM_DOMAIN_GTT, 0, 0);
312 END_BATCH();
313 COMMIT_BATCH();
314 }
315
316 static void r700RunRenderPrimitiveImmediate(GLcontext * ctx, int start, int end, int prim)
317 {
318 context_t *context = R700_CONTEXT(ctx);
319 BATCH_LOCALS(&context->radeon);
320 int type, i;
321 uint32_t num_indices, total_emit = 0;
322 uint32_t vgt_draw_initiator = 0;
323 uint32_t vgt_index_type = 0;
324 uint32_t vgt_primitive_type = 0;
325 uint32_t vgt_num_indices = 0;
326
327 type = r700PrimitiveType(prim);
328 num_indices = r700NumVerts(end - start, prim);
329
330 radeon_print(RADEON_RENDER, RADEON_TRACE,
331 "%s type %x num_indices %d\n",
332 __func__, type, num_indices);
333
334 if (type < 0 || num_indices <= 0)
335 return;
336
337 SETfield(vgt_primitive_type, type,
338 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
339
340 if (num_indices > 0xffff)
341 {
342 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
343 }
344 else
345 {
346 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
347 }
348
349 vgt_num_indices = num_indices;
350 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
351
352 if (start == 0)
353 {
354 SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
355 }
356 else
357 {
358 if (num_indices > 0xffff)
359 {
360 total_emit += num_indices;
361 }
362 else
363 {
364 total_emit += (num_indices + 1) / 2;
365 }
366 SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
367 }
368
369 total_emit += 3 /* VGT_PRIMITIVE_TYPE */
370 + 2 /* VGT_INDEX_TYPE */
371 + 2 /* NUM_INSTANCES */
372 + 3; /* DRAW */
373
374 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
375 // prim
376 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
377 R600_OUT_BATCH(vgt_primitive_type);
378 // index type
379 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
380 R600_OUT_BATCH(vgt_index_type);
381 // num instances
382 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
383 R600_OUT_BATCH(1);
384 // draw packet
385 if(start == 0)
386 {
387 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
388 R600_OUT_BATCH(vgt_num_indices);
389 R600_OUT_BATCH(vgt_draw_initiator);
390 }
391 else
392 {
393 if (num_indices > 0xffff)
394 {
395 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
396 R600_OUT_BATCH(vgt_num_indices);
397 R600_OUT_BATCH(vgt_draw_initiator);
398 for (i = start; i < (start + num_indices); i++)
399 {
400 R600_OUT_BATCH(i);
401 }
402 }
403 else
404 {
405 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
406 R600_OUT_BATCH(vgt_num_indices);
407 R600_OUT_BATCH(vgt_draw_initiator);
408 for (i = start; i < (start + num_indices); i += 2)
409 {
410 if ((i + 1) == (start + num_indices))
411 {
412 R600_OUT_BATCH(i);
413 }
414 else
415 {
416 R600_OUT_BATCH(((i + 1) << 16) | (i));
417 }
418 }
419 }
420 }
421
422 END_BATCH();
423 COMMIT_BATCH();
424 }
425
426 /* start 3d, idle, cb/db flush */
427 #define PRE_EMIT_STATE_BUFSZ 10 + 5 + 14
428
429 static GLuint r700PredictRenderSize(GLcontext* ctx,
430 const struct _mesa_prim *prim,
431 const struct _mesa_index_buffer *ib,
432 GLuint nr_prims)
433 {
434 context_t *context = R700_CONTEXT(ctx);
435 GLboolean flushed;
436 GLuint dwords, i;
437 GLuint state_size;
438
439 dwords = PRE_EMIT_STATE_BUFSZ;
440 if (ib)
441 dwords += nr_prims * 14;
442 else {
443 for (i = 0; i < nr_prims; ++i)
444 {
445 if (prim[i].start == 0)
446 dwords += 10;
447 else if (prim[i].count > 0xffff)
448 dwords += prim[i].count + 10;
449 else
450 dwords += ((prim[i].count + 1) / 2) + 10;
451 }
452 }
453
454 state_size = radeonCountStateEmitSize(&context->radeon);
455 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
456 dwords + state_size,
457 __FUNCTION__);
458 if (flushed)
459 dwords += radeonCountStateEmitSize(&context->radeon);
460 else
461 dwords += state_size;
462
463 radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
464 return dwords;
465
466 }
467
468 #define CONVERT( TYPE, MACRO ) do { \
469 GLuint i, j, sz; \
470 sz = input->Size; \
471 if (input->Normalized) { \
472 for (i = 0; i < count; i++) { \
473 const TYPE *in = (TYPE *)src_ptr; \
474 for (j = 0; j < sz; j++) { \
475 *dst_ptr++ = MACRO(*in); \
476 in++; \
477 } \
478 src_ptr += stride; \
479 } \
480 } else { \
481 for (i = 0; i < count; i++) { \
482 const TYPE *in = (TYPE *)src_ptr; \
483 for (j = 0; j < sz; j++) { \
484 *dst_ptr++ = (GLfloat)(*in); \
485 in++; \
486 } \
487 src_ptr += stride; \
488 } \
489 } \
490 } while (0)
491
492 /**
493 * Convert attribute data type to float
494 * If the attribute uses named buffer object replace the bo with newly allocated bo
495 */
496 static void r700ConvertAttrib(GLcontext *ctx, int count,
497 const struct gl_client_array *input,
498 struct StreamDesc *attr)
499 {
500 context_t *context = R700_CONTEXT(ctx);
501 const GLvoid *src_ptr;
502 GLboolean mapped_named_bo = GL_FALSE;
503 GLfloat *dst_ptr;
504 GLuint stride;
505
506 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
507
508 /* Convert value for first element only */
509 if (input->StrideB == 0)
510 {
511 count = 1;
512 }
513
514 if (input->BufferObj->Name)
515 {
516 if (!input->BufferObj->Pointer)
517 {
518 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
519 mapped_named_bo = GL_TRUE;
520 }
521
522 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
523 }
524 else
525 {
526 src_ptr = input->Ptr;
527 }
528
529 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
530 sizeof(GLfloat) * input->Size * count, 32);
531 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
532
533 assert(src_ptr != NULL);
534
535 switch (input->Type)
536 {
537 case GL_DOUBLE:
538 CONVERT(GLdouble, (GLfloat));
539 break;
540 case GL_UNSIGNED_INT:
541 CONVERT(GLuint, UINT_TO_FLOAT);
542 break;
543 case GL_INT:
544 CONVERT(GLint, INT_TO_FLOAT);
545 break;
546 case GL_UNSIGNED_SHORT:
547 CONVERT(GLushort, USHORT_TO_FLOAT);
548 break;
549 case GL_SHORT:
550 CONVERT(GLshort, SHORT_TO_FLOAT);
551 break;
552 case GL_UNSIGNED_BYTE:
553 assert(input->Format != GL_BGRA);
554 CONVERT(GLubyte, UBYTE_TO_FLOAT);
555 break;
556 case GL_BYTE:
557 CONVERT(GLbyte, BYTE_TO_FLOAT);
558 break;
559 default:
560 assert(0);
561 break;
562 }
563
564 if (mapped_named_bo)
565 {
566 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
567 }
568 }
569
570 static void r700AlignDataToDword(GLcontext *ctx,
571 const struct gl_client_array *input,
572 int count,
573 struct StreamDesc *attr)
574 {
575 context_t *context = R700_CONTEXT(ctx);
576 const int dst_stride = (input->StrideB + 3) & ~3;
577 const int size = getTypeSize(input->Type) * input->Size * count;
578 GLboolean mapped_named_bo = GL_FALSE;
579
580 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
581
582 if (!input->BufferObj->Pointer)
583 {
584 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
585 mapped_named_bo = GL_TRUE;
586 }
587
588 {
589 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
590 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
591 int i;
592
593 for (i = 0; i < count; ++i)
594 {
595 _mesa_memcpy(dst_ptr, src_ptr, input->StrideB);
596 src_ptr += input->StrideB;
597 dst_ptr += dst_stride;
598 }
599 }
600
601 if (mapped_named_bo)
602 {
603 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
604 }
605
606 attr->stride = dst_stride;
607 }
608
609 static void r700SetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count)
610 {
611 context_t *context = R700_CONTEXT(ctx);
612 GLuint stride;
613 int ret;
614 int i, index;
615
616 R600_STATECHANGE(context, vtx);
617
618 for(index = 0; index < context->nNumActiveAos; index++)
619 {
620 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
621 i = context->stream_desc[index].element;
622
623 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
624
625 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
626 #if MESA_BIG_ENDIAN
627 getTypeSize(input[i]->Type) != 4 ||
628 #endif
629 stride < 4)
630 {
631 r700ConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
632 }
633 else
634 {
635 if (input[i]->BufferObj->Name)
636 {
637 if (stride % 4 != 0)
638 {
639 assert(((intptr_t) input[i]->Ptr) % input[i]->StrideB == 0);
640 r700AlignDataToDword(ctx, input[i], count, &context->stream_desc[index]);
641 context->stream_desc[index].is_named_bo = GL_FALSE;
642 }
643 else
644 {
645 context->stream_desc[index].stride = input[i]->StrideB;
646 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
647 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
648 context->stream_desc[index].is_named_bo = GL_TRUE;
649 }
650 }
651 else
652 {
653 int size;
654 int local_count = count;
655 uint32_t *dst;
656
657 if (input[i]->StrideB == 0)
658 {
659 size = getTypeSize(input[i]->Type) * input[i]->Size;
660 local_count = 1;
661 }
662 else
663 {
664 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
665 }
666
667 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
668 &context->stream_desc[index].bo_offset, size, 32);
669 assert(context->stream_desc[index].bo->ptr != NULL);
670 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
671 context->stream_desc[index].bo_offset);
672
673 switch (context->stream_desc[index].dwords)
674 {
675 case 1:
676 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
677 break;
678 case 2:
679 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
680 break;
681 case 3:
682 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
683 break;
684 case 4:
685 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
686 break;
687 default:
688 assert(0);
689 break;
690 }
691 }
692 }
693
694 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
695 aos->stride = context->stream_desc[index].stride / sizeof(float);
696 aos->components = context->stream_desc[index].dwords;
697 aos->bo = context->stream_desc[index].bo;
698 aos->offset = context->stream_desc[index].bo_offset;
699
700 if(context->stream_desc[index].is_named_bo)
701 {
702 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
703 context->stream_desc[index].bo,
704 RADEON_GEM_DOMAIN_GTT, 0);
705 }
706 }
707
708 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
709 first_elem(&context->radeon.dma.reserved)->bo,
710 RADEON_GEM_DOMAIN_GTT, 0);
711 }
712
713 static void r700FreeData(GLcontext *ctx)
714 {
715 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
716 * to prevent double unref in radeonReleaseArrays
717 * called during context destroy
718 */
719 context_t *context = R700_CONTEXT(ctx);
720
721 int i;
722
723 for (i = 0; i < context->nNumActiveAos; i++)
724 {
725 if (!context->stream_desc[i].is_named_bo)
726 {
727 radeon_bo_unref(context->stream_desc[i].bo);
728 }
729 context->radeon.tcl.aos[i].bo = NULL;
730 }
731
732 if (context->ind_buf.bo != NULL)
733 {
734 radeon_bo_unref(context->ind_buf.bo);
735 }
736 }
737
738 static void r700FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
739 {
740 context_t *context = R700_CONTEXT(ctx);
741 GLvoid *src_ptr;
742 GLuint *out;
743 int i;
744 GLboolean mapped_named_bo = GL_FALSE;
745
746 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
747 {
748 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
749 mapped_named_bo = GL_TRUE;
750 assert(mesa_ind_buf->obj->Pointer != NULL);
751 }
752 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
753
754 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
755 {
756 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
757 GLubyte *in = (GLubyte *)src_ptr;
758
759 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
760 &context->ind_buf.bo_offset, size, 4);
761
762 assert(context->ind_buf.bo->ptr != NULL);
763 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
764
765 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
766 {
767 *out++ = in[i] | in[i + 1] << 16;
768 }
769
770 if (i < mesa_ind_buf->count)
771 {
772 *out++ = in[i];
773 }
774
775 #if MESA_BIG_ENDIAN
776 }
777 else
778 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
779 GLushort *in = (GLushort *)src_ptr;
780 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
781
782 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
783 &context->ind_buf.bo_offset, size, 4);
784
785 assert(context->ind_buf.bo->ptr != NULL);
786 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
787
788 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
789 {
790 *out++ = in[i] | in[i + 1] << 16;
791 }
792
793 if (i < mesa_ind_buf->count)
794 {
795 *out++ = in[i];
796 }
797 #endif
798 }
799
800 context->ind_buf.is_32bit = GL_FALSE;
801 context->ind_buf.count = mesa_ind_buf->count;
802
803 if (mapped_named_bo)
804 {
805 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
806 }
807 }
808
809 static void r700SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
810 {
811 context_t *context = R700_CONTEXT(ctx);
812
813 if (!mesa_ind_buf) {
814 context->ind_buf.bo = NULL;
815 return;
816 }
817
818 #if MESA_BIG_ENDIAN
819 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
820 {
821 #else
822 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
823 {
824 #endif
825 const GLvoid *src_ptr;
826 GLvoid *dst_ptr;
827 GLboolean mapped_named_bo = GL_FALSE;
828
829 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
830 {
831 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
832 assert(mesa_ind_buf->obj->Pointer != NULL);
833 mapped_named_bo = GL_TRUE;
834 }
835
836 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
837
838 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
839
840 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
841 &context->ind_buf.bo_offset, size, 4);
842 assert(context->ind_buf.bo->ptr != NULL);
843 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
844
845 _mesa_memcpy(dst_ptr, src_ptr, size);
846
847 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
848 context->ind_buf.count = mesa_ind_buf->count;
849
850 if (mapped_named_bo)
851 {
852 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
853 }
854 }
855 else
856 {
857 r700FixupIndexBuffer(ctx, mesa_ind_buf);
858 }
859 }
860
861 static GLboolean r700TryDrawPrims(GLcontext *ctx,
862 const struct gl_client_array *arrays[],
863 const struct _mesa_prim *prim,
864 GLuint nr_prims,
865 const struct _mesa_index_buffer *ib,
866 GLuint min_index,
867 GLuint max_index )
868 {
869 context_t *context = R700_CONTEXT(ctx);
870 radeonContextPtr radeon = &context->radeon;
871 GLuint i, id = 0;
872 struct radeon_renderbuffer *rrb;
873
874 if (ctx->NewState)
875 _mesa_update_state( ctx );
876
877 _tnl_UpdateFixedFunctionProgram(ctx);
878 r700SetVertexFormat(ctx, arrays, max_index + 1);
879 /* shaders need to be updated before buffers are validated */
880 r700UpdateShaders(ctx);
881 if (!r600ValidateBuffers(ctx))
882 return GL_FALSE;
883
884 /* always emit CB base to prevent
885 * lock ups on some chips.
886 */
887 R600_STATECHANGE(context, cb_target);
888 /* mark vtx as dirty since it changes per-draw */
889 R600_STATECHANGE(context, vtx);
890
891 r700SetScissor(context);
892 r700SetupVertexProgram(ctx);
893 r700SetupFragmentProgram(ctx);
894 r600UpdateTextureState(ctx);
895
896 GLuint emit_end = r700PredictRenderSize(ctx, prim, ib, nr_prims)
897 + context->radeon.cmdbuf.cs->cdw;
898
899 r700SetupIndexBuffer(ctx, ib);
900 r700SetupStreams(ctx, arrays, max_index + 1);
901
902 radeonEmitState(radeon);
903
904 radeon_debug_add_indent();
905 for (i = 0; i < nr_prims; ++i)
906 {
907 if (context->ind_buf.bo)
908 r700RunRenderPrimitive(ctx,
909 prim[i].start,
910 prim[i].start + prim[i].count,
911 prim[i].mode);
912 else
913 r700RunRenderPrimitiveImmediate(ctx,
914 prim[i].start,
915 prim[i].start + prim[i].count,
916 prim[i].mode);
917 }
918 radeon_debug_remove_indent();
919
920 /* Flush render op cached for last several quads. */
921 r700WaitForIdleClean(context);
922
923 rrb = radeon_get_colorbuffer(&context->radeon);
924 if (rrb && rrb->bo)
925 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
926 CB_ACTION_ENA_bit | (1 << (id + 6)));
927
928 rrb = radeon_get_depthbuffer(&context->radeon);
929 if (rrb && rrb->bo)
930 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
931 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
932
933 r700FreeData(ctx);
934
935 if (emit_end < context->radeon.cmdbuf.cs->cdw)
936 {
937 WARN_ONCE("Rendering was %d commands larger than predicted size."
938 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
939 }
940
941 return GL_TRUE;
942 }
943
944 static void r700DrawPrims(GLcontext *ctx,
945 const struct gl_client_array *arrays[],
946 const struct _mesa_prim *prim,
947 GLuint nr_prims,
948 const struct _mesa_index_buffer *ib,
949 GLboolean index_bounds_valid,
950 GLuint min_index,
951 GLuint max_index)
952 {
953 GLboolean retval = GL_FALSE;
954
955 /* This check should get folded into just the places that
956 * min/max index are really needed.
957 */
958 if (!index_bounds_valid) {
959 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
960 }
961
962 if (min_index) {
963 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r700DrawPrims );
964 return;
965 }
966
967 /* Make an attempt at drawing */
968 retval = r700TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
969
970 /* If failed run tnl pipeline - it should take care of fallbacks */
971 if (!retval)
972 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
973 }
974
975 void r700InitDraw(GLcontext *ctx)
976 {
977 struct vbo_context *vbo = vbo_context(ctx);
978
979 /* to be enabled */
980 vbo->draw_prims = r700DrawPrims;
981 }
982
983