Merge branch 'glsl-to-tgsi'
[mesa.git] / src / mesa / drivers / dri / r600 / evergreen_render.c
1 /*
2 * Copyright (C) 2008-2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 */
21
22 /*
23 * Authors:
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 */
26
27 #include "main/glheader.h"
28 #include "main/state.h"
29 #include "main/imports.h"
30 #include "main/enums.h"
31 #include "main/macros.h"
32 #include "main/context.h"
33 #include "main/dd.h"
34 #include "main/simple_list.h"
35 #include "main/api_arrayelt.h"
36 #include "swrast/swrast.h"
37 #include "swrast_setup/swrast_setup.h"
38 #include "vbo/vbo.h"
39
40 #include "tnl/tnl.h"
41 #include "tnl/t_vp_build.h"
42 #include "tnl/t_context.h"
43 #include "tnl/t_vertex.h"
44 #include "vbo/vbo_context.h"
45
46 #include "r600_context.h"
47 #include "r600_cmdbuf.h"
48
49 #include "evergreen_fragprog.h"
50 #include "evergreen_vertprog.h"
51
52 #include "evergreen_state.h"
53 #include "evergreen_tex.h"
54
55 #include "radeon_buffer_objects.h"
56 #include "radeon_common_context.h"
57
58 static unsigned int evergreenPrimitiveType(int prim) //same
59 {
60 switch (prim & PRIM_MODE_MASK)
61 {
62 case GL_POINTS:
63 return DI_PT_POINTLIST;
64 break;
65 case GL_LINES:
66 return DI_PT_LINELIST;
67 break;
68 case GL_LINE_STRIP:
69 return DI_PT_LINESTRIP;
70 break;
71 case GL_LINE_LOOP:
72 return DI_PT_LINELOOP;
73 break;
74 case GL_TRIANGLES:
75 return DI_PT_TRILIST;
76 break;
77 case GL_TRIANGLE_STRIP:
78 return DI_PT_TRISTRIP;
79 break;
80 case GL_TRIANGLE_FAN:
81 return DI_PT_TRIFAN;
82 break;
83 case GL_QUADS:
84 return DI_PT_QUADLIST;
85 break;
86 case GL_QUAD_STRIP:
87 return DI_PT_QUADSTRIP;
88 break;
89 case GL_POLYGON:
90 return DI_PT_POLYGON;
91 break;
92 default:
93 assert(0);
94 return -1;
95 break;
96 }
97 }
98
99 static int evergreenNumVerts(int num_verts, int prim) //same
100 {
101 int verts_off = 0;
102
103 switch (prim & PRIM_MODE_MASK) {
104 case GL_POINTS:
105 verts_off = 0;
106 break;
107 case GL_LINES:
108 verts_off = num_verts % 2;
109 break;
110 case GL_LINE_STRIP:
111 if (num_verts < 2)
112 verts_off = num_verts;
113 break;
114 case GL_LINE_LOOP:
115 if (num_verts < 2)
116 verts_off = num_verts;
117 break;
118 case GL_TRIANGLES:
119 verts_off = num_verts % 3;
120 break;
121 case GL_TRIANGLE_STRIP:
122 if (num_verts < 3)
123 verts_off = num_verts;
124 break;
125 case GL_TRIANGLE_FAN:
126 if (num_verts < 3)
127 verts_off = num_verts;
128 break;
129 case GL_QUADS:
130 verts_off = num_verts % 4;
131 break;
132 case GL_QUAD_STRIP:
133 if (num_verts < 4)
134 verts_off = num_verts;
135 else
136 verts_off = num_verts % 2;
137 break;
138 case GL_POLYGON:
139 if (num_verts < 3)
140 verts_off = num_verts;
141 break;
142 default:
143 assert(0);
144 return -1;
145 break;
146 }
147
148 return num_verts - verts_off;
149 }
150
151 static void evergreenRunRenderPrimitive(struct gl_context * ctx, int start, int end, int prim,
152 GLint basevertex) //same
153 {
154 context_t *context = EVERGREEN_CONTEXT(ctx);
155 BATCH_LOCALS(&context->radeon);
156 int type, total_emit;
157 int num_indices;
158 uint32_t vgt_draw_initiator = 0;
159 uint32_t vgt_index_type = 0;
160 uint32_t vgt_primitive_type = 0;
161 uint32_t vgt_num_indices = 0;
162
163 type = evergreenPrimitiveType(prim);
164 num_indices = evergreenNumVerts(end - start, prim);
165
166 radeon_print(RADEON_RENDER, RADEON_TRACE,
167 "%s type %x num_indices %d\n",
168 __func__, type, num_indices);
169
170 if (type < 0 || num_indices <= 0)
171 return;
172
173 SETfield(vgt_primitive_type, type,
174 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
175
176 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
177
178 if(GL_TRUE != context->ind_buf.is_32bit)
179 {
180 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
181 }
182
183 /* 16-bit indexes are packed in a 32-bit value */
184 SETfield(vgt_index_type,
185 #if MESA_BIG_ENDIAN
186 VGT_DMA_SWAP_32_BIT,
187 #else
188 VGT_DMA_SWAP_NONE,
189 #endif
190 SWAP_MODE_shift, SWAP_MODE_mask);
191
192 vgt_num_indices = num_indices;
193 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
194 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
195
196 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
197 + 2 /* VGT_INDEX_TYPE */
198 + 2 /* NUM_INSTANCES */
199 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
200 + 5 + 2; /* DRAW_INDEX */
201
202 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
203 // prim
204 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
205 R600_OUT_BATCH(vgt_primitive_type);
206 // index type
207 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
208 R600_OUT_BATCH(vgt_index_type);
209 // num instances
210 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
211 R600_OUT_BATCH(1);
212 /* offset */
213 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
214 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
215 R600_OUT_BATCH(basevertex); //VTX_BASE_VTX_LOC
216 R600_OUT_BATCH(0); //VTX_START_INST_LOC
217 // draw packet
218 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
219 R600_OUT_BATCH(context->ind_buf.bo_offset);
220 R600_OUT_BATCH(0);
221 R600_OUT_BATCH(vgt_num_indices);
222 R600_OUT_BATCH(vgt_draw_initiator);
223 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
224 context->ind_buf.bo,
225 context->ind_buf.bo_offset,
226 RADEON_GEM_DOMAIN_GTT, 0, 0);
227 END_BATCH();
228 COMMIT_BATCH();
229 }
230
231 static void evergreenRunRenderPrimitiveImmediate(struct gl_context * ctx, int start, int end, int prim) //same
232 {
233 context_t *context = EVERGREEN_CONTEXT(ctx);
234 BATCH_LOCALS(&context->radeon);
235 int type, i;
236 uint32_t num_indices, total_emit = 0;
237 uint32_t vgt_draw_initiator = 0;
238 uint32_t vgt_index_type = 0;
239 uint32_t vgt_primitive_type = 0;
240 uint32_t vgt_num_indices = 0;
241
242 type = evergreenPrimitiveType(prim);
243 num_indices = evergreenNumVerts(end - start, prim);
244
245 radeon_print(RADEON_RENDER, RADEON_TRACE,
246 "%s type %x num_indices %d\n",
247 __func__, type, num_indices);
248
249 if (type < 0 || num_indices <= 0)
250 return;
251
252 SETfield(vgt_primitive_type, type,
253 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
254
255 if (num_indices > 0xffff)
256 {
257 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
258 }
259 else
260 {
261 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
262 }
263
264 /* 16-bit indexes are packed in a 32-bit value */
265 SETfield(vgt_index_type,
266 #if MESA_BIG_ENDIAN
267 VGT_DMA_SWAP_32_BIT,
268 #else
269 VGT_DMA_SWAP_NONE,
270 #endif
271 SWAP_MODE_shift, SWAP_MODE_mask);
272
273 vgt_num_indices = num_indices;
274 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
275
276 if (start == 0)
277 {
278 SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
279 }
280 else
281 {
282 if (num_indices > 0xffff)
283 {
284 total_emit += num_indices;
285 }
286 else
287 {
288 total_emit += (num_indices + 1) / 2;
289 }
290 SETfield(vgt_draw_initiator, DI_SRC_SEL_IMMEDIATE, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
291 }
292
293 total_emit += 3 /* VGT_PRIMITIVE_TYPE */
294 + 2 /* VGT_INDEX_TYPE */
295 + 2 /* NUM_INSTANCES */
296 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
297 + 3; /* DRAW */
298
299 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
300 // prim
301 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
302 R600_OUT_BATCH(vgt_primitive_type);
303 // index type
304 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
305 R600_OUT_BATCH(vgt_index_type);
306 // num instances
307 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
308 R600_OUT_BATCH(1);
309 /* offset */
310 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
311 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
312 R600_OUT_BATCH(0); //VTX_BASE_VTX_LOC
313 R600_OUT_BATCH(0); //VTX_START_INST_LOC
314 // draw packet
315 if(start == 0)
316 {
317 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
318 R600_OUT_BATCH(vgt_num_indices);
319 R600_OUT_BATCH(vgt_draw_initiator);
320 }
321 else
322 {
323 if (num_indices > 0xffff)
324 {
325 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (num_indices + 1)));
326 R600_OUT_BATCH(vgt_num_indices);
327 R600_OUT_BATCH(vgt_draw_initiator);
328 for (i = start; i < (start + num_indices); i++)
329 {
330 R600_OUT_BATCH(i);
331 }
332 }
333 else
334 {
335 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_IMMD, (((num_indices + 1) / 2) + 1)));
336 R600_OUT_BATCH(vgt_num_indices);
337 R600_OUT_BATCH(vgt_draw_initiator);
338 for (i = start; i < (start + num_indices); i += 2)
339 {
340 if ((i + 1) == (start + num_indices))
341 {
342 R600_OUT_BATCH(i);
343 }
344 else
345 {
346 R600_OUT_BATCH(((i + 1) << 16) | (i));
347 }
348 }
349 }
350 }
351
352 END_BATCH();
353 COMMIT_BATCH();
354 }
355
356 #define CONVERT( TYPE, MACRO ) do { \
357 GLuint i, j, sz; \
358 sz = input->Size; \
359 if (input->Normalized) { \
360 for (i = 0; i < count; i++) { \
361 const TYPE *in = (TYPE *)src_ptr; \
362 for (j = 0; j < sz; j++) { \
363 *dst_ptr++ = MACRO(*in); \
364 in++; \
365 } \
366 src_ptr += stride; \
367 } \
368 } else { \
369 for (i = 0; i < count; i++) { \
370 const TYPE *in = (TYPE *)src_ptr; \
371 for (j = 0; j < sz; j++) { \
372 *dst_ptr++ = (GLfloat)(*in); \
373 in++; \
374 } \
375 src_ptr += stride; \
376 } \
377 } \
378 } while (0)
379
380 /**
381 * Convert attribute data type to float
382 * If the attribute uses named buffer object replace the bo with newly allocated bo
383 */
384 static void evergreenConvertAttrib(struct gl_context *ctx, int count,
385 const struct gl_client_array *input,
386 struct StreamDesc *attr)
387 {
388 context_t *context = R700_CONTEXT(ctx);
389 const GLvoid *src_ptr;
390 GLboolean mapped_named_bo = GL_FALSE;
391 GLfloat *dst_ptr;
392 GLuint stride;
393
394 stride = (input->StrideB == 0) ? evergreen_getTypeSize(input->Type) * input->Size : input->StrideB;
395
396 /* Convert value for first element only */
397 if (input->StrideB == 0)
398 {
399 count = 1;
400 }
401
402 if (input->BufferObj->Name)
403 {
404 if (!input->BufferObj->Pointer)
405 {
406 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
407 mapped_named_bo = GL_TRUE;
408 }
409
410 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
411 }
412 else
413 {
414 src_ptr = input->Ptr;
415 }
416
417 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
418 sizeof(GLfloat) * input->Size * count, 32);
419
420 radeon_bo_map(attr->bo, 1);
421
422 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
423
424 assert(src_ptr != NULL);
425
426 switch (input->Type)
427 {
428 case GL_DOUBLE:
429 CONVERT(GLdouble, (GLfloat));
430 break;
431 case GL_UNSIGNED_INT:
432 CONVERT(GLuint, UINT_TO_FLOAT);
433 break;
434 case GL_INT:
435 CONVERT(GLint, INT_TO_FLOAT);
436 break;
437 case GL_UNSIGNED_SHORT:
438 CONVERT(GLushort, USHORT_TO_FLOAT);
439 break;
440 case GL_SHORT:
441 CONVERT(GLshort, SHORT_TO_FLOAT);
442 break;
443 case GL_UNSIGNED_BYTE:
444 assert(input->Format != GL_BGRA);
445 CONVERT(GLubyte, UBYTE_TO_FLOAT);
446 break;
447 case GL_BYTE:
448 CONVERT(GLbyte, BYTE_TO_FLOAT);
449 break;
450 default:
451 assert(0);
452 break;
453 }
454
455 radeon_bo_unmap(attr->bo);
456
457 if (mapped_named_bo)
458 {
459 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
460 }
461 }
462
463 static void evergreenFixupIndexBuffer(struct gl_context *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
464 {
465 context_t *context = EVERGREEN_CONTEXT(ctx);
466 GLvoid *src_ptr;
467 GLuint *out;
468 int i;
469 GLboolean mapped_named_bo = GL_FALSE;
470
471 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
472 {
473 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
474 mapped_named_bo = GL_TRUE;
475 assert(mesa_ind_buf->obj->Pointer != NULL);
476 }
477 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
478
479 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
480 {
481 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
482 GLubyte *in = (GLubyte *)src_ptr;
483
484 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
485 &context->ind_buf.bo_offset, size, 4);
486
487 radeon_bo_map(context->ind_buf.bo, 1);
488 assert(context->ind_buf.bo->ptr != NULL);
489 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
490
491 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
492 {
493 *out++ = in[i] | in[i + 1] << 16;
494 }
495
496 if (i < mesa_ind_buf->count)
497 {
498 *out++ = in[i];
499 }
500
501 radeon_bo_unmap(context->ind_buf.bo);
502 #if MESA_BIG_ENDIAN
503 }
504 else
505 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
506 GLushort *in = (GLushort *)src_ptr;
507 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
508
509 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
510 &context->ind_buf.bo_offset, size, 4);
511
512 radeon_bo_map(context->ind_buf.bo, 1);
513 assert(context->ind_buf.bo->ptr != NULL);
514 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
515
516 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
517 {
518 *out++ = in[i] | in[i + 1] << 16;
519 }
520
521 if (i < mesa_ind_buf->count)
522 {
523 *out++ = in[i];
524 }
525 radeon_bo_unmap(context->ind_buf.bo);
526 #endif
527 }
528
529 context->ind_buf.is_32bit = GL_FALSE;
530 context->ind_buf.count = mesa_ind_buf->count;
531
532 if (mapped_named_bo)
533 {
534 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
535 }
536 }
537
538 static GLboolean evergreen_check_fallbacks(struct gl_context *ctx) //same
539 {
540 if (ctx->RenderMode != GL_RENDER)
541 return GL_TRUE;
542
543 return GL_FALSE;
544 }
545
546 /* start 3d, idle, cb/db flush */
547 #define PRE_EMIT_STATE_BUFSZ 5 + 5 + 14
548
549 static GLuint evergreenPredictRenderSize(struct gl_context* ctx,
550 const struct _mesa_prim *prim,
551 const struct _mesa_index_buffer *ib,
552 GLuint nr_prims)
553 {
554 context_t *context = EVERGREEN_CONTEXT(ctx);
555 GLboolean flushed;
556 GLuint dwords, i;
557 GLuint state_size;
558
559 dwords = PRE_EMIT_STATE_BUFSZ;
560 if (ib)
561 dwords += nr_prims * 18;
562 else {
563 for (i = 0; i < nr_prims; ++i)
564 {
565 if (prim[i].start == 0)
566 dwords += 14;
567 else if (prim[i].count > 0xffff)
568 dwords += prim[i].count + 14;
569 else
570 dwords += ((prim[i].count + 1) / 2) + 14;
571 }
572 }
573
574 state_size = radeonCountStateEmitSize(&context->radeon);
575 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
576 dwords + state_size,
577 __FUNCTION__);
578 if (flushed)
579 dwords += radeonCountStateEmitSize(&context->radeon);
580 else
581 dwords += state_size;
582
583 radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
584 return dwords;
585
586 }
587
588 static void evergreenSetupIndexBuffer(struct gl_context *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
589 {
590 context_t *context = EVERGREEN_CONTEXT(ctx);
591
592 if (!mesa_ind_buf) {
593 context->ind_buf.bo = NULL;
594 return;
595 }
596
597 #if MESA_BIG_ENDIAN
598 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
599 #else
600 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
601 #endif
602 {
603 const GLvoid *src_ptr;
604 GLvoid *dst_ptr;
605 GLboolean mapped_named_bo = GL_FALSE;
606
607 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
608 {
609 ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
610 assert(mesa_ind_buf->obj->Pointer != NULL);
611 mapped_named_bo = GL_TRUE;
612 }
613
614 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
615
616 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
617
618 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
619 &context->ind_buf.bo_offset, size, 4);
620 radeon_bo_map(context->ind_buf.bo, 1);
621 assert(context->ind_buf.bo->ptr != NULL);
622 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
623
624 memcpy(dst_ptr, src_ptr, size);
625
626 radeon_bo_unmap(context->ind_buf.bo);
627 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
628 context->ind_buf.count = mesa_ind_buf->count;
629
630 if (mapped_named_bo)
631 {
632 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
633 }
634 }
635 else
636 {
637 evergreenFixupIndexBuffer(ctx, mesa_ind_buf);
638 }
639 }
640
641 #if 0 /* unused */
642 static void evergreenAlignDataToDword(struct gl_context *ctx,
643 const struct gl_client_array *input,
644 int count,
645 struct StreamDesc *attr)
646 {
647 context_t *context = EVERGREEN_CONTEXT(ctx);
648 const int dst_stride = (input->StrideB + 3) & ~3;
649 const int size = getTypeSize(input->Type) * input->Size * count;
650 GLboolean mapped_named_bo = GL_FALSE;
651
652 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
653
654 radeon_bo_map(attr->bo, 1);
655
656 if (!input->BufferObj->Pointer)
657 {
658 ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
659 mapped_named_bo = GL_TRUE;
660 }
661
662 {
663 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
664 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
665 int i;
666
667 for (i = 0; i < count; ++i)
668 {
669 memcpy(dst_ptr, src_ptr, input->StrideB);
670 src_ptr += input->StrideB;
671 dst_ptr += dst_stride;
672 }
673 }
674
675 radeon_bo_unmap(attr->bo);
676 if (mapped_named_bo)
677 {
678 ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
679 }
680
681 attr->stride = dst_stride;
682 }
683 #endif
684
685 static void evergreenSetupStreams(struct gl_context *ctx, const struct gl_client_array *input[], int count)
686 {
687 context_t *context = EVERGREEN_CONTEXT(ctx);
688 GLuint stride;
689 int ret;
690 int i, index;
691
692 EVERGREEN_STATECHANGE(context, vtx);
693
694 for(index = 0; index < context->nNumActiveAos; index++)
695 {
696 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
697 i = context->stream_desc[index].element;
698
699 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
700
701 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT
702 #if MESA_BIG_ENDIAN
703 || getTypeSize(input[i]->Type) != 4
704 #endif
705 )
706 {
707 evergreenConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
708 }
709 else
710 {
711 if (input[i]->BufferObj->Name)
712 {
713 context->stream_desc[index].stride = input[i]->StrideB;
714 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
715 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
716 context->stream_desc[index].is_named_bo = GL_TRUE;
717 }
718 else
719 {
720 int size;
721 int local_count = count;
722 uint32_t *dst;
723
724 if (input[i]->StrideB == 0)
725 {
726 size = getTypeSize(input[i]->Type) * input[i]->Size;
727 local_count = 1;
728 }
729 else
730 {
731 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
732 }
733
734 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
735 &context->stream_desc[index].bo_offset, size, 32);
736
737 radeon_bo_map(context->stream_desc[index].bo, 1);
738 assert(context->stream_desc[index].bo->ptr != NULL);
739
740
741 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
742 context->stream_desc[index].bo_offset);
743
744 switch (context->stream_desc[index].dwords)
745 {
746 case 1:
747 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
748 break;
749 case 2:
750 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
751 break;
752 case 3:
753 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
754 break;
755 case 4:
756 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
757 break;
758 default:
759 assert(0);
760 break;
761 }
762
763 radeon_bo_unmap(context->stream_desc[index].bo);
764 }
765 }
766
767 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
768 aos->stride = context->stream_desc[index].stride / sizeof(float);
769 aos->components = context->stream_desc[index].dwords;
770 aos->bo = context->stream_desc[index].bo;
771 aos->offset = context->stream_desc[index].bo_offset;
772
773 if(context->stream_desc[index].is_named_bo)
774 {
775 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
776 context->stream_desc[index].bo,
777 RADEON_GEM_DOMAIN_GTT, 0);
778 }
779 }
780
781 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
782 first_elem(&context->radeon.dma.reserved)->bo,
783 RADEON_GEM_DOMAIN_GTT, 0);
784 }
785
786 static void evergreenFreeData(struct gl_context *ctx)
787 {
788 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
789 * to prevent double unref in radeonReleaseArrays
790 * called during context destroy
791 */
792 context_t *context = EVERGREEN_CONTEXT(ctx);
793
794 int i;
795
796 for (i = 0; i < context->nNumActiveAos; i++)
797 {
798 if (!context->stream_desc[i].is_named_bo)
799 {
800 radeon_bo_unref(context->stream_desc[i].bo);
801 }
802 context->radeon.tcl.aos[i].bo = NULL;
803 }
804
805 if(context->vp_Constbo != NULL)
806 {
807 radeon_bo_unref(context->vp_Constbo);
808 context->vp_Constbo = NULL;
809 }
810 if(context->fp_Constbo != NULL)
811 {
812 radeon_bo_unref(context->fp_Constbo);
813 context->fp_Constbo = NULL;
814 }
815
816 if (context->ind_buf.bo != NULL)
817 {
818 radeon_bo_unref(context->ind_buf.bo);
819 }
820 }
821
822 static GLboolean evergreenTryDrawPrims(struct gl_context *ctx,
823 const struct gl_client_array *arrays[],
824 const struct _mesa_prim *prim,
825 GLuint nr_prims,
826 const struct _mesa_index_buffer *ib,
827 GLuint min_index,
828 GLuint max_index )
829 {
830 context_t *context = EVERGREEN_CONTEXT(ctx);
831 radeonContextPtr radeon = &context->radeon;
832 GLuint i, id = 0;
833 struct radeon_renderbuffer *rrb;
834
835 if (ctx->NewState)
836 _mesa_update_state( ctx );
837
838 if (evergreen_check_fallbacks(ctx))
839 return GL_FALSE;
840
841 _tnl_UpdateFixedFunctionProgram(ctx);
842 evergreenSetVertexFormat(ctx, arrays, max_index + 1);
843
844
845 /* shaders need to be updated before buffers are validated */
846 evergreenUpdateShaders(ctx);
847 if (!evergreenValidateBuffers(ctx))
848 return GL_FALSE;
849
850 /* always emit CB base to prevent
851 * lock ups on some chips.
852 */
853 EVERGREEN_STATECHANGE(context, cb);
854 /* mark vtx as dirty since it changes per-draw */
855 EVERGREEN_STATECHANGE(context, vtx);
856
857 evergreenSetScissor(context);
858
859 evergreenSetupVertexProgram(ctx);
860 evergreenSetupFragmentProgram(ctx);
861 evergreenUpdateShaderStates(ctx);
862
863 GLuint emit_end = evergreenPredictRenderSize(ctx, prim, ib, nr_prims)
864 + context->radeon.cmdbuf.cs->cdw;
865
866 /* evergreenPredictRenderSize will call radeonReleaseDmaRegions, so update VP/FP const buf after it. */
867 evergreenSetupVPconstants(ctx);
868 evergreenSetupFPconstants(ctx);
869
870 evergreenSetupIndexBuffer(ctx, ib);
871
872 evergreenSetupStreams(ctx, arrays, max_index + 1);
873
874 radeonEmitState(radeon);
875
876 radeon_debug_add_indent();
877
878 for (i = 0; i < nr_prims; ++i)
879 {
880 if (context->ind_buf.bo)
881 evergreenRunRenderPrimitive(ctx,
882 prim[i].start,
883 prim[i].start + prim[i].count,
884 prim[i].mode,
885 prim[i].basevertex);
886 else
887 evergreenRunRenderPrimitiveImmediate(ctx,
888 prim[i].start,
889 prim[i].start + prim[i].count,
890 prim[i].mode);
891 }
892
893 radeon_debug_remove_indent();
894
895 /* Flush render op cached for last several quads. */
896 /* XXX drm should handle this in fence submit */
897
898 //evergreeWaitForIdleClean(context);
899
900 rrb = radeon_get_colorbuffer(&context->radeon);
901 if (rrb && rrb->bo)
902 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
903 CB_ACTION_ENA_bit | (1 << (id + 6)));
904
905 rrb = radeon_get_depthbuffer(&context->radeon);
906 if (rrb && rrb->bo)
907 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
908 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
909
910 evergreenFreeData(ctx);
911
912 if (emit_end < context->radeon.cmdbuf.cs->cdw)
913 {
914 WARN_ONCE("Rendering was %d commands larger than predicted size."
915 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
916 }
917
918 return GL_TRUE;
919 }
920
921 static void evergreenDrawPrims(struct gl_context *ctx,
922 const struct gl_client_array *arrays[],
923 const struct _mesa_prim *prim,
924 GLuint nr_prims,
925 const struct _mesa_index_buffer *ib,
926 GLboolean index_bounds_valid,
927 GLuint min_index,
928 GLuint max_index)
929 {
930 GLboolean retval = GL_FALSE;
931
932 context_t *context = EVERGREEN_CONTEXT(ctx);
933 radeonContextPtr radeon = &context->radeon;
934 radeon_prepare_render(radeon);
935
936 /* This check should get folded into just the places that
937 * min/max index are really needed.
938 */
939 if (!vbo_all_varyings_in_vbos(arrays)) {
940 if (!index_bounds_valid)
941 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
942 /* do we want to rebase, minimizes the
943 * amount of data to upload? */
944 if (min_index) {
945 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, evergreenDrawPrims );
946 return;
947 }
948 }
949 /* Make an attempt at drawing */
950 retval = evergreenTryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
951
952 /* If failed run tnl pipeline - it should take care of fallbacks */
953 if (!retval) {
954 _swsetup_Wakeup(ctx);
955 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
956 }
957 }
958
959 void evergreenInitDraw(struct gl_context *ctx)
960 {
961 struct vbo_context *vbo = vbo_context(ctx);
962
963 /* to be enabled */
964 vbo->draw_prims = evergreenDrawPrims;
965 }
966
967