r600c: add big endian support for r6xx/r7xx
[mesa.git] / src / mesa / drivers / dri / r600 / r700_render.c
1 /*
2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
26 */
27
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
34 #include "main/dd.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "vbo/vbo.h"
40
41 #include "tnl/tnl.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "vbo/vbo_context.h"
46
47 #include "r600_context.h"
48 #include "r600_cmdbuf.h"
49
50 #include "r600_tex.h"
51
52 #include "r700_vertprog.h"
53 #include "r700_fragprog.h"
54 #include "r700_state.h"
55
56 #include "radeon_buffer_objects.h"
57 #include "radeon_common_context.h"
58
59 void r700WaitForIdle(context_t *context);
60 void r700WaitForIdleClean(context_t *context);
61 static unsigned int r700PrimitiveType(int prim);
62 GLboolean r700SyncSurf(context_t *context,
63 struct radeon_bo *pbo,
64 uint32_t read_domain,
65 uint32_t write_domain,
66 uint32_t sync_type);
67
68 void r700WaitForIdle(context_t *context)
69 {
70 BATCH_LOCALS(&context->radeon);
71 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
72 BEGIN_BATCH_NO_AUTOSTATE(3);
73
74 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
75 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
76 R600_OUT_BATCH(WAIT_3D_IDLE_bit);
77
78 END_BATCH();
79 COMMIT_BATCH();
80 }
81
82 void r700WaitForIdleClean(context_t *context)
83 {
84 BATCH_LOCALS(&context->radeon);
85 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
86 BEGIN_BATCH_NO_AUTOSTATE(5);
87
88 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
89 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
90
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
92 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
93 R600_OUT_BATCH(WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
94
95 END_BATCH();
96 COMMIT_BATCH();
97 }
98
99 void r700Start3D(context_t *context)
100 {
101 BATCH_LOCALS(&context->radeon);
102 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
103 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
104 {
105 BEGIN_BATCH_NO_AUTOSTATE(2);
106 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 0));
107 R600_OUT_BATCH(0);
108 END_BATCH();
109 }
110
111 BEGIN_BATCH_NO_AUTOSTATE(3);
112 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
113 R600_OUT_BATCH(0x80000000);
114 R600_OUT_BATCH(0x80000000);
115 END_BATCH();
116
117 COMMIT_BATCH();
118 }
119
120 GLboolean r700SyncSurf(context_t *context,
121 struct radeon_bo *pbo,
122 uint32_t read_domain,
123 uint32_t write_domain,
124 uint32_t sync_type)
125 {
126 BATCH_LOCALS(&context->radeon);
127 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
128 uint32_t cp_coher_size;
129
130 if (!pbo)
131 return GL_FALSE;
132
133 if (pbo->size == 0xffffffff)
134 cp_coher_size = 0xffffffff;
135 else
136 cp_coher_size = ((pbo->size + 255) >> 8);
137
138 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
139 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
140 R600_OUT_BATCH(sync_type);
141 R600_OUT_BATCH(cp_coher_size);
142 R600_OUT_BATCH(0);
143 R600_OUT_BATCH(10);
144 R600_OUT_BATCH_RELOC(0,
145 pbo,
146 0,
147 read_domain, write_domain, 0);
148 END_BATCH();
149 COMMIT_BATCH();
150
151 return GL_TRUE;
152 }
153
154 static unsigned int r700PrimitiveType(int prim)
155 {
156 switch (prim & PRIM_MODE_MASK)
157 {
158 case GL_POINTS:
159 return DI_PT_POINTLIST;
160 break;
161 case GL_LINES:
162 return DI_PT_LINELIST;
163 break;
164 case GL_LINE_STRIP:
165 return DI_PT_LINESTRIP;
166 break;
167 case GL_LINE_LOOP:
168 return DI_PT_LINELOOP;
169 break;
170 case GL_TRIANGLES:
171 return DI_PT_TRILIST;
172 break;
173 case GL_TRIANGLE_STRIP:
174 return DI_PT_TRISTRIP;
175 break;
176 case GL_TRIANGLE_FAN:
177 return DI_PT_TRIFAN;
178 break;
179 case GL_QUADS:
180 return DI_PT_QUADLIST;
181 break;
182 case GL_QUAD_STRIP:
183 return DI_PT_QUADSTRIP;
184 break;
185 case GL_POLYGON:
186 return DI_PT_POLYGON;
187 break;
188 default:
189 assert(0);
190 return -1;
191 break;
192 }
193 }
194
195 static int r700NumVerts(int num_verts, int prim)
196 {
197 int verts_off = 0;
198
199 switch (prim & PRIM_MODE_MASK) {
200 case GL_POINTS:
201 verts_off = 0;
202 break;
203 case GL_LINES:
204 verts_off = num_verts % 2;
205 break;
206 case GL_LINE_STRIP:
207 if (num_verts < 2)
208 verts_off = num_verts;
209 break;
210 case GL_LINE_LOOP:
211 if (num_verts < 2)
212 verts_off = num_verts;
213 break;
214 case GL_TRIANGLES:
215 verts_off = num_verts % 3;
216 break;
217 case GL_TRIANGLE_STRIP:
218 if (num_verts < 3)
219 verts_off = num_verts;
220 break;
221 case GL_TRIANGLE_FAN:
222 if (num_verts < 3)
223 verts_off = num_verts;
224 break;
225 case GL_QUADS:
226 verts_off = num_verts % 4;
227 break;
228 case GL_QUAD_STRIP:
229 if (num_verts < 4)
230 verts_off = num_verts;
231 else
232 verts_off = num_verts % 2;
233 break;
234 case GL_POLYGON:
235 if (num_verts < 3)
236 verts_off = num_verts;
237 break;
238 default:
239 assert(0);
240 return -1;
241 break;
242 }
243
244 return num_verts - verts_off;
245 }
246
247 static void r700RunRenderPrimitive(struct gl_context * ctx, int start, int end,
248 int prim, GLint basevertex)
249 {
250 context_t *context = R700_CONTEXT(ctx);
251 BATCH_LOCALS(&context->radeon);
252 int type, total_emit;
253 int num_indices;
254 uint32_t vgt_draw_initiator = 0;
255 uint32_t vgt_index_type = 0;
256 uint32_t vgt_primitive_type = 0;
257 uint32_t vgt_num_indices = 0;
258
259 type = r700PrimitiveType(prim);
260 num_indices = r700NumVerts(end - start, prim);
261
262 radeon_print(RADEON_RENDER, RADEON_TRACE,
263 "%s type %x num_indices %d\n",
264 __func__, type, num_indices);
265
266 if (type < 0 || num_indices <= 0)
267 return;
268
269 SETfield(vgt_primitive_type, type,
270 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
271
272 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
273
274 if(GL_TRUE != context->ind_buf.is_32bit)
275 {
276 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
277 }
278
279 /* 16-bit indexes are packed in a 32-bit value */
280 SETfield(vgt_index_type,
281 #if MESA_BIG_ENDIAN
282 VGT_DMA_SWAP_32_BIT,
283 #else
284 VGT_DMA_SWAP_NONE,
285 #endif
286 SWAP_MODE_shift, SWAP_MODE_mask);
287
288
289 vgt_num_indices = num_indices;
290 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
291 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
292
293 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
294 + 2 /* VGT_INDEX_TYPE */
295 + 2 /* NUM_INSTANCES */
296 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
297 + 5 + 2; /* DRAW_INDEX */
298
299 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
300 // prim
301 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
302 R600_OUT_BATCH(vgt_primitive_type);
303 // index type
304 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
305 R600_OUT_BATCH(vgt_index_type);
306 // num instances
307 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
308 R600_OUT_BATCH(1);
309 /* offset */
310 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
311 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
312 R600_OUT_BATCH(basevertex); //VTX_BASE_VTX_LOC
313 R600_OUT_BATCH(0); //VTX_START_INST_LOC
314 // draw packet
315 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
316 R600_OUT_BATCH(context->ind_buf.bo_offset);
317 R600_OUT_BATCH(0);
318 R600_OUT_BATCH(vgt_num_indices);
319 R600_OUT_BATCH(vgt_draw_initiator);
320 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
321 context->ind_buf.bo,
322 context->ind_buf.bo_offset,
323 RADEON_GEM_DOMAIN_GTT, 0, 0);
324 END_BATCH();
325 COMMIT_BATCH();
326 }
327
328 static void r700RunRenderPrimitiveImmediate(struct gl_context * ctx, int start, int end, int prim)
329 {
330 context_t *context = R700_CONTEXT(ctx);
331 BATCH_LOCALS(&context->radeon);
332 int type, i;
333 uint32_t num_indices, total_emit = 0;
334 uint32_t vgt_draw_initiator = 0;
335 uint32_t vgt_index_type = 0;
336 uint32_t vgt_primitive_type = 0;
337 uint32_t vgt_num_indices = 0;
338
339 type = r700PrimitiveType(prim);
340 num_indices = r700NumVerts(end - start, prim);
341
342 radeon_print(RADEON_RENDER, RADEON_TRACE,
343 "%s type %x num_indices %d\n",
344 __func__, type, num_indices);
345
346 if (type < 0 || num_indices <= 0)
347 return;
348
349 SETfield(vgt_primitive_type, type,
350 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
351
352 if (num_indices > 0xffff)
353 {
354 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
355 }
356 else
357 {
358 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
359 }
360
361 /* 16-bit indexes are packed in a 32-bit value */
362 SETfield(vgt_index_type,
363 #if MESA_BIG_ENDIAN
364 VGT_DMA_SWAP_32_BIT,
365 #else
366 VGT_DMA_SWAP_NONE,
367 #endif
368 SWAP_MODE_shift, SWAP_MODE_mask);
369
370 vgt_num_indices = num_indices;
371 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
372
373 if (start == 0)
374 {
375 SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
376 }
377 else
378 {
379 if (num_indices > 0xffff)
380 {
381 total_emit += num_indices;
382 }
383 else
384 {
385 total_emit += (num_indices + 1) / 2;
386 }
387 SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
388 }
389
390 total_emit += 3 /* VGT_PRIMITIVE_TYPE */
391 + 2 /* VGT_INDEX_TYPE */
392 + 2 /* NUM_INSTANCES */
393 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
394 + 3; /* DRAW */
395
396 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
397 // prim
398 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
399 R600_OUT_BATCH(vgt_primitive_type);
400 // index type
401 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
402 R600_OUT_BATCH(vgt_index_type);
403 // num instances
404 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
405 R600_OUT_BATCH(1);
406 /* offset */
407 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
408 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
409 R600_OUT_BATCH(0); //VTX_BASE_VTX_LOC
410 R600_OUT_BATCH(0); //VTX_START_INST_LOC
411 // draw packet
412 if(start == 0)
413 {
414 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
415 R600_OUT_BATCH(vgt_num_indices);
416 R600_OUT_BATCH(vgt_draw_initiator);
417 }
418 else
419 {
420 if (num_indices > 0xffff)
421 {
422 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
423 R600_OUT_BATCH(vgt_num_indices);
424 R600_OUT_BATCH(vgt_draw_initiator);
425 for (i = start; i < (start + num_indices); i++)
426 {
427 R600_OUT_BATCH(i);
428 }
429 }
430 else
431 {
432 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
433 R600_OUT_BATCH(vgt_num_indices);
434 R600_OUT_BATCH(vgt_draw_initiator);
435 for (i = start; i < (start + num_indices); i += 2)
436 {
437 if ((i + 1) == (start + num_indices))
438 {
439 R600_OUT_BATCH(i);
440 }
441 else
442 {
443 R600_OUT_BATCH(((i + 1) << 16) | (i));
444 }
445 }
446 }
447 }
448
449 END_BATCH();
450 COMMIT_BATCH();
451 }
452
453 /* start 3d, idle, cb/db flush */
454 #define PRE_EMIT_STATE_BUFSZ 5 + 5 + 14
455
456 static GLuint r700PredictRenderSize(struct gl_context* ctx,
457 const struct _mesa_prim *prim,
458 const struct _mesa_index_buffer *ib,
459 GLuint nr_prims)
460 {
461 context_t *context = R700_CONTEXT(ctx);
462 GLboolean flushed;
463 GLuint dwords, i;
464 GLuint state_size;
465
466 dwords = PRE_EMIT_STATE_BUFSZ;
467 if (ib)
468 dwords += nr_prims * 18;
469 else {
470 for (i = 0; i < nr_prims; ++i)
471 {
472 if (prim[i].start == 0)
473 dwords += 14;
474 else if (prim[i].count > 0xffff)
475 dwords += prim[i].count + 14;
476 else
477 dwords += ((prim[i].count + 1) / 2) + 14;
478 }
479 }
480
481 state_size = radeonCountStateEmitSize(&context->radeon);
482 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
483 dwords + state_size,
484 __FUNCTION__);
485 if (flushed)
486 dwords += radeonCountStateEmitSize(&context->radeon);
487 else
488 dwords += state_size;
489
490 radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
491 return dwords;
492
493 }
494
495 #define CONVERT( TYPE, MACRO ) do { \
496 GLuint i, j, sz; \
497 sz = input->Size; \
498 if (input->Normalized) { \
499 for (i = 0; i < count; i++) { \
500 const TYPE *in = (TYPE *)src_ptr; \
501 for (j = 0; j < sz; j++) { \
502 *dst_ptr++ = MACRO(*in); \
503 in++; \
504 } \
505 src_ptr += stride; \
506 } \
507 } else { \
508 for (i = 0; i < count; i++) { \
509 const TYPE *in = (TYPE *)src_ptr; \
510 for (j = 0; j < sz; j++) { \
511 *dst_ptr++ = (GLfloat)(*in); \
512 in++; \
513 } \
514 src_ptr += stride; \
515 } \
516 } \
517 } while (0)
518
519 /**
520 * Convert attribute data type to float
521 * If the attribute uses named buffer object replace the bo with newly allocated bo
522 */
523 static void r700ConvertAttrib(struct gl_context *ctx, int count,
524 const struct gl_client_array *input,
525 struct StreamDesc *attr)
526 {
527 context_t *context = R700_CONTEXT(ctx);
528 const GLvoid *src_ptr;
529 GLboolean mapped_named_bo = GL_FALSE;
530 GLfloat *dst_ptr;
531 GLuint stride;
532
533 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
534
535 /* Convert value for first element only */
536 if (input->StrideB == 0)
537 {
538 count = 1;
539 }
540
541 if (input->BufferObj->Name)
542 {
543 if (!input->BufferObj->Pointer)
544 {
545 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
546 mapped_named_bo = GL_TRUE;
547 }
548
549 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
550 }
551 else
552 {
553 src_ptr = input->Ptr;
554 }
555
556 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
557 sizeof(GLfloat) * input->Size * count, 32);
558
559 radeon_bo_map(attr->bo, 1);
560
561 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
562
563 assert(src_ptr != NULL);
564
565 switch (input->Type)
566 {
567 case GL_DOUBLE:
568 CONVERT(GLdouble, (GLfloat));
569 break;
570 case GL_UNSIGNED_INT:
571 CONVERT(GLuint, UINT_TO_FLOAT);
572 break;
573 case GL_INT:
574 CONVERT(GLint, INT_TO_FLOAT);
575 break;
576 case GL_UNSIGNED_SHORT:
577 CONVERT(GLushort, USHORT_TO_FLOAT);
578 break;
579 case GL_SHORT:
580 CONVERT(GLshort, SHORT_TO_FLOAT);
581 break;
582 case GL_UNSIGNED_BYTE:
583 assert(input->Format != GL_BGRA);
584 CONVERT(GLubyte, UBYTE_TO_FLOAT);
585 break;
586 case GL_BYTE:
587 CONVERT(GLbyte, BYTE_TO_FLOAT);
588 break;
589 default:
590 assert(0);
591 break;
592 }
593
594 radeon_bo_unmap(attr->bo);
595
596 if (mapped_named_bo)
597 {
598 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
599 }
600 }
601
602 #if 0 /* unused */
603 static void r700AlignDataToDword(struct gl_context *ctx,
604 const struct gl_client_array *input,
605 int count,
606 struct StreamDesc *attr)
607 {
608 context_t *context = R700_CONTEXT(ctx);
609 const int dst_stride = (input->StrideB + 3) & ~3;
610 const int size = getTypeSize(input->Type) * input->Size * count;
611 GLboolean mapped_named_bo = GL_FALSE;
612
613 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
614
615 radeon_bo_map(attr->bo, 1);
616
617 if (!input->BufferObj->Pointer)
618 {
619 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
620 mapped_named_bo = GL_TRUE;
621 }
622
623 {
624 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
625 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
626 int i;
627
628 for (i = 0; i < count; ++i)
629 {
630 memcpy(dst_ptr, src_ptr, input->StrideB);
631 src_ptr += input->StrideB;
632 dst_ptr += dst_stride;
633 }
634 }
635
636 radeon_bo_unmap(attr->bo);
637 if (mapped_named_bo)
638 {
639 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
640 }
641
642 attr->stride = dst_stride;
643 }
644 #endif
645
646 static void r700SetupStreams(struct gl_context *ctx, const struct gl_client_array *input[], int count)
647 {
648 context_t *context = R700_CONTEXT(ctx);
649 GLuint stride;
650 int ret;
651 int i, index;
652
653 R600_STATECHANGE(context, vtx);
654
655 for(index = 0; index < context->nNumActiveAos; index++)
656 {
657 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
658 i = context->stream_desc[index].element;
659
660 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
661
662 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT
663 #if MESA_BIG_ENDIAN
664 || getTypeSize(input[i]->Type) != 4
665 #endif
666 )
667 {
668 assert(count);
669 r700ConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
670 }
671 else
672 {
673 if (input[i]->BufferObj->Name)
674 {
675 context->stream_desc[index].stride = input[i]->StrideB;
676 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
677 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
678 context->stream_desc[index].is_named_bo = GL_TRUE;
679 }
680 else
681 {
682 int size;
683 int local_count = count;
684 uint32_t *dst;
685
686 if (input[i]->StrideB == 0)
687 {
688 size = getTypeSize(input[i]->Type) * input[i]->Size;
689 local_count = 1;
690 }
691 else
692 {
693 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
694 }
695
696 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
697 &context->stream_desc[index].bo_offset, size, 32);
698
699 radeon_bo_map(context->stream_desc[index].bo, 1);
700 assert(context->stream_desc[index].bo->ptr != NULL);
701
702
703 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
704 context->stream_desc[index].bo_offset);
705
706 switch (context->stream_desc[index].dwords)
707 {
708 case 1:
709 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
710 break;
711 case 2:
712 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
713 break;
714 case 3:
715 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
716 break;
717 case 4:
718 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
719 break;
720 default:
721 assert(0);
722 break;
723 }
724 radeon_bo_unmap(context->stream_desc[index].bo);
725 }
726 }
727
728 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
729 aos->stride = context->stream_desc[index].stride / sizeof(float);
730 aos->components = context->stream_desc[index].dwords;
731 aos->bo = context->stream_desc[index].bo;
732 aos->offset = context->stream_desc[index].bo_offset;
733
734 if(context->stream_desc[index].is_named_bo)
735 {
736 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
737 context->stream_desc[index].bo,
738 RADEON_GEM_DOMAIN_GTT, 0);
739 }
740 }
741
742 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
743 first_elem(&context->radeon.dma.reserved)->bo,
744 RADEON_GEM_DOMAIN_GTT, 0);
745 }
746
747 static void r700FreeData(struct gl_context *ctx)
748 {
749 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
750 * to prevent double unref in radeonReleaseArrays
751 * called during context destroy
752 */
753 context_t *context = R700_CONTEXT(ctx);
754
755 int i;
756
757 for (i = 0; i < context->nNumActiveAos; i++)
758 {
759 if (!context->stream_desc[i].is_named_bo)
760 {
761 radeon_bo_unref(context->stream_desc[i].bo);
762 }
763 context->radeon.tcl.aos[i].bo = NULL;
764 }
765
766 if (context->ind_buf.bo != NULL)
767 {
768 radeon_bo_unref(context->ind_buf.bo);
769 }
770 }
771
772 static void r700FixupIndexBuffer(struct gl_context *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
773 {
774 context_t *context = R700_CONTEXT(ctx);
775 GLvoid *src_ptr;
776 GLuint *out;
777 int i;
778 GLboolean mapped_named_bo = GL_FALSE;
779
780 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
781 {
782 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
783 mapped_named_bo = GL_TRUE;
784 assert(mesa_ind_buf->obj->Pointer != NULL);
785 }
786 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
787
788 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
789 {
790 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
791 GLubyte *in = (GLubyte *)src_ptr;
792
793 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
794 &context->ind_buf.bo_offset, size, 4);
795
796 radeon_bo_map(context->ind_buf.bo, 1);
797 assert(context->ind_buf.bo->ptr != NULL);
798 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
799
800 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
801 {
802 *out++ = in[i] | in[i + 1] << 16;
803 }
804
805 if (i < mesa_ind_buf->count)
806 {
807 *out++ = in[i];
808 }
809
810 radeon_bo_unmap(context->ind_buf.bo);
811 #if MESA_BIG_ENDIAN
812 }
813 else
814 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
815 GLushort *in = (GLushort *)src_ptr;
816 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
817
818 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
819 &context->ind_buf.bo_offset, size, 4);
820
821 radeon_bo_map(context->ind_buf.bo, 1);
822 assert(context->ind_buf.bo->ptr != NULL);
823 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
824
825 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
826 {
827 *out++ = in[i] | in[i + 1] << 16;
828 }
829
830 if (i < mesa_ind_buf->count)
831 {
832 *out++ = in[i];
833 }
834 radeon_bo_unmap(context->ind_buf.bo);
835 #endif
836 }
837
838 context->ind_buf.is_32bit = GL_FALSE;
839 context->ind_buf.count = mesa_ind_buf->count;
840
841 if (mapped_named_bo)
842 {
843 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
844 }
845 }
846
847 static void r700SetupIndexBuffer(struct gl_context *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
848 {
849 context_t *context = R700_CONTEXT(ctx);
850
851 if (!mesa_ind_buf) {
852 context->ind_buf.bo = NULL;
853 return;
854 }
855
856 #if MESA_BIG_ENDIAN
857 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
858 #else
859 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
860 #endif
861 {
862 const GLvoid *src_ptr;
863 GLvoid *dst_ptr;
864 GLboolean mapped_named_bo = GL_FALSE;
865
866 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
867 {
868 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
869 assert(mesa_ind_buf->obj->Pointer != NULL);
870 mapped_named_bo = GL_TRUE;
871 }
872
873 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
874
875 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
876
877 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
878 &context->ind_buf.bo_offset, size, 4);
879 radeon_bo_map(context->ind_buf.bo, 1);
880 assert(context->ind_buf.bo->ptr != NULL);
881 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
882
883 memcpy(dst_ptr, src_ptr, size);
884
885 radeon_bo_unmap(context->ind_buf.bo);
886 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
887 context->ind_buf.count = mesa_ind_buf->count;
888
889 if (mapped_named_bo)
890 {
891 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
892 }
893 }
894 else
895 {
896 r700FixupIndexBuffer(ctx, mesa_ind_buf);
897 }
898 }
899
900 static GLboolean check_fallbacks(struct gl_context *ctx)
901 {
902 if (ctx->RenderMode != GL_RENDER)
903 return GL_TRUE;
904
905 return GL_FALSE;
906 }
907
908 static GLboolean r700TryDrawPrims(struct gl_context *ctx,
909 const struct gl_client_array *arrays[],
910 const struct _mesa_prim *prim,
911 GLuint nr_prims,
912 const struct _mesa_index_buffer *ib,
913 GLuint min_index,
914 GLuint max_index )
915 {
916 context_t *context = R700_CONTEXT(ctx);
917 radeonContextPtr radeon = &context->radeon;
918 GLuint i, id = 0;
919 struct radeon_renderbuffer *rrb;
920
921 if (ctx->NewState)
922 _mesa_update_state( ctx );
923
924 if (check_fallbacks(ctx))
925 return GL_FALSE;
926
927 _tnl_UpdateFixedFunctionProgram(ctx);
928 r700SetVertexFormat(ctx, arrays, max_index + 1);
929 /* shaders need to be updated before buffers are validated */
930 r700UpdateShaders(ctx);
931 if (!r600ValidateBuffers(ctx))
932 return GL_FALSE;
933
934 /* always emit CB base to prevent
935 * lock ups on some chips.
936 */
937 R600_STATECHANGE(context, cb_target);
938 /* mark vtx as dirty since it changes per-draw */
939 R600_STATECHANGE(context, vtx);
940
941 r700SetScissor(context);
942 r700SetupVertexProgram(ctx);
943 r700SetupFragmentProgram(ctx);
944 r700UpdateShaderStates(ctx);
945
946 GLuint emit_end = r700PredictRenderSize(ctx, prim, ib, nr_prims)
947 + context->radeon.cmdbuf.cs->cdw;
948
949 r700SetupIndexBuffer(ctx, ib);
950 r700SetupStreams(ctx, arrays, max_index + 1);
951
952 radeonEmitState(radeon);
953
954 radeon_debug_add_indent();
955 for (i = 0; i < nr_prims; ++i)
956 {
957 if (context->ind_buf.bo)
958 r700RunRenderPrimitive(ctx,
959 prim[i].start,
960 prim[i].start + prim[i].count,
961 prim[i].mode,
962 prim[i].basevertex);
963 else
964 r700RunRenderPrimitiveImmediate(ctx,
965 prim[i].start,
966 prim[i].start + prim[i].count,
967 prim[i].mode);
968 }
969 radeon_debug_remove_indent();
970
971 /* Flush render op cached for last several quads. */
972 /* XXX drm should handle this in fence submit */
973 r700WaitForIdleClean(context);
974
975 rrb = radeon_get_colorbuffer(&context->radeon);
976 if (rrb && rrb->bo)
977 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
978 CB_ACTION_ENA_bit | (1 << (id + 6)));
979
980 rrb = radeon_get_depthbuffer(&context->radeon);
981 if (rrb && rrb->bo)
982 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
983 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
984
985 r700FreeData(ctx);
986
987 if (emit_end < context->radeon.cmdbuf.cs->cdw)
988 {
989 WARN_ONCE("Rendering was %d commands larger than predicted size."
990 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
991 }
992
993 return GL_TRUE;
994 }
995
996 static void r700DrawPrims(struct gl_context *ctx,
997 const struct gl_client_array *arrays[],
998 const struct _mesa_prim *prim,
999 GLuint nr_prims,
1000 const struct _mesa_index_buffer *ib,
1001 GLboolean index_bounds_valid,
1002 GLuint min_index,
1003 GLuint max_index)
1004 {
1005 GLboolean retval = GL_FALSE;
1006
1007 context_t *context = R700_CONTEXT(ctx);
1008 radeonContextPtr radeon = &context->radeon;
1009 radeon_prepare_render(radeon);
1010
1011 /* This check should get folded into just the places that
1012 * min/max index are really needed.
1013 */
1014
1015 if (!vbo_all_varyings_in_vbos(arrays)) {
1016 if (!index_bounds_valid)
1017 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
1018 /* do we want to rebase, minimizes the
1019 * amount of data to upload? */
1020 if (min_index) {
1021 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r700DrawPrims );
1022 return;
1023 }
1024 }
1025 /* Make an attempt at drawing */
1026 retval = r700TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
1027
1028 /* If failed run tnl pipeline - it should take care of fallbacks */
1029 if (!retval) {
1030 _swsetup_Wakeup(ctx);
1031 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
1032 }
1033 }
1034
1035 void r700InitDraw(struct gl_context *ctx)
1036 {
1037 struct vbo_context *vbo = vbo_context(ctx);
1038
1039 /* to be enabled */
1040 vbo->draw_prims = r700DrawPrims;
1041 }
1042
1043