Merge branch 'xa_branch'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw_upload.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #undef NDEBUG
29
30 #include "main/glheader.h"
31 #include "main/bufferobj.h"
32 #include "main/context.h"
33 #include "main/enums.h"
34
35 #include "brw_draw.h"
36 #include "brw_defines.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39
40 #include "intel_batchbuffer.h"
41 #include "intel_buffer_objects.h"
42
43 static GLuint double_types[5] = {
44 0,
45 BRW_SURFACEFORMAT_R64_FLOAT,
46 BRW_SURFACEFORMAT_R64G64_FLOAT,
47 BRW_SURFACEFORMAT_R64G64B64_FLOAT,
48 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
49 };
50
51 static GLuint float_types[5] = {
52 0,
53 BRW_SURFACEFORMAT_R32_FLOAT,
54 BRW_SURFACEFORMAT_R32G32_FLOAT,
55 BRW_SURFACEFORMAT_R32G32B32_FLOAT,
56 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
57 };
58
59 static GLuint half_float_types[5] = {
60 0,
61 BRW_SURFACEFORMAT_R16_FLOAT,
62 BRW_SURFACEFORMAT_R16G16_FLOAT,
63 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT,
64 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
65 };
66
67 static GLuint uint_types_norm[5] = {
68 0,
69 BRW_SURFACEFORMAT_R32_UNORM,
70 BRW_SURFACEFORMAT_R32G32_UNORM,
71 BRW_SURFACEFORMAT_R32G32B32_UNORM,
72 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
73 };
74
75 static GLuint uint_types_scale[5] = {
76 0,
77 BRW_SURFACEFORMAT_R32_USCALED,
78 BRW_SURFACEFORMAT_R32G32_USCALED,
79 BRW_SURFACEFORMAT_R32G32B32_USCALED,
80 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
81 };
82
83 static GLuint int_types_norm[5] = {
84 0,
85 BRW_SURFACEFORMAT_R32_SNORM,
86 BRW_SURFACEFORMAT_R32G32_SNORM,
87 BRW_SURFACEFORMAT_R32G32B32_SNORM,
88 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
89 };
90
91 static GLuint int_types_scale[5] = {
92 0,
93 BRW_SURFACEFORMAT_R32_SSCALED,
94 BRW_SURFACEFORMAT_R32G32_SSCALED,
95 BRW_SURFACEFORMAT_R32G32B32_SSCALED,
96 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
97 };
98
99 static GLuint ushort_types_norm[5] = {
100 0,
101 BRW_SURFACEFORMAT_R16_UNORM,
102 BRW_SURFACEFORMAT_R16G16_UNORM,
103 BRW_SURFACEFORMAT_R16G16B16_UNORM,
104 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
105 };
106
107 static GLuint ushort_types_scale[5] = {
108 0,
109 BRW_SURFACEFORMAT_R16_USCALED,
110 BRW_SURFACEFORMAT_R16G16_USCALED,
111 BRW_SURFACEFORMAT_R16G16B16_USCALED,
112 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
113 };
114
115 static GLuint short_types_norm[5] = {
116 0,
117 BRW_SURFACEFORMAT_R16_SNORM,
118 BRW_SURFACEFORMAT_R16G16_SNORM,
119 BRW_SURFACEFORMAT_R16G16B16_SNORM,
120 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
121 };
122
123 static GLuint short_types_scale[5] = {
124 0,
125 BRW_SURFACEFORMAT_R16_SSCALED,
126 BRW_SURFACEFORMAT_R16G16_SSCALED,
127 BRW_SURFACEFORMAT_R16G16B16_SSCALED,
128 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
129 };
130
131 static GLuint ubyte_types_norm[5] = {
132 0,
133 BRW_SURFACEFORMAT_R8_UNORM,
134 BRW_SURFACEFORMAT_R8G8_UNORM,
135 BRW_SURFACEFORMAT_R8G8B8_UNORM,
136 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
137 };
138
139 static GLuint ubyte_types_scale[5] = {
140 0,
141 BRW_SURFACEFORMAT_R8_USCALED,
142 BRW_SURFACEFORMAT_R8G8_USCALED,
143 BRW_SURFACEFORMAT_R8G8B8_USCALED,
144 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
145 };
146
147 static GLuint byte_types_norm[5] = {
148 0,
149 BRW_SURFACEFORMAT_R8_SNORM,
150 BRW_SURFACEFORMAT_R8G8_SNORM,
151 BRW_SURFACEFORMAT_R8G8B8_SNORM,
152 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
153 };
154
155 static GLuint byte_types_scale[5] = {
156 0,
157 BRW_SURFACEFORMAT_R8_SSCALED,
158 BRW_SURFACEFORMAT_R8G8_SSCALED,
159 BRW_SURFACEFORMAT_R8G8B8_SSCALED,
160 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
161 };
162
163
164 /**
165 * Given vertex array type/size/format/normalized info, return
166 * the appopriate hardware surface type.
167 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
168 */
169 static GLuint get_surface_type( GLenum type, GLuint size,
170 GLenum format, GLboolean normalized )
171 {
172 if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
173 printf("type %s size %d normalized %d\n",
174 _mesa_lookup_enum_by_nr(type), size, normalized);
175
176 if (normalized) {
177 switch (type) {
178 case GL_DOUBLE: return double_types[size];
179 case GL_FLOAT: return float_types[size];
180 case GL_HALF_FLOAT: return half_float_types[size];
181 case GL_INT: return int_types_norm[size];
182 case GL_SHORT: return short_types_norm[size];
183 case GL_BYTE: return byte_types_norm[size];
184 case GL_UNSIGNED_INT: return uint_types_norm[size];
185 case GL_UNSIGNED_SHORT: return ushort_types_norm[size];
186 case GL_UNSIGNED_BYTE:
187 if (format == GL_BGRA) {
188 /* See GL_EXT_vertex_array_bgra */
189 assert(size == 4);
190 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
191 }
192 else {
193 return ubyte_types_norm[size];
194 }
195 default: assert(0); return 0;
196 }
197 }
198 else {
199 assert(format == GL_RGBA); /* sanity check */
200 switch (type) {
201 case GL_DOUBLE: return double_types[size];
202 case GL_FLOAT: return float_types[size];
203 case GL_HALF_FLOAT: return half_float_types[size];
204 case GL_INT: return int_types_scale[size];
205 case GL_SHORT: return short_types_scale[size];
206 case GL_BYTE: return byte_types_scale[size];
207 case GL_UNSIGNED_INT: return uint_types_scale[size];
208 case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
209 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
210 /* This produces GL_FIXED inputs as values between INT32_MIN and
211 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
212 */
213 case GL_FIXED: return int_types_scale[size];
214 default: assert(0); return 0;
215 }
216 }
217 }
218
219
220 static GLuint get_size( GLenum type )
221 {
222 switch (type) {
223 case GL_DOUBLE: return sizeof(GLdouble);
224 case GL_FLOAT: return sizeof(GLfloat);
225 case GL_HALF_FLOAT: return sizeof(GLhalfARB);
226 case GL_INT: return sizeof(GLint);
227 case GL_SHORT: return sizeof(GLshort);
228 case GL_BYTE: return sizeof(GLbyte);
229 case GL_UNSIGNED_INT: return sizeof(GLuint);
230 case GL_UNSIGNED_SHORT: return sizeof(GLushort);
231 case GL_UNSIGNED_BYTE: return sizeof(GLubyte);
232 case GL_FIXED: return sizeof(GLuint);
233 default: assert(0); return 0;
234 }
235 }
236
237 static GLuint get_index_type(GLenum type)
238 {
239 switch (type) {
240 case GL_UNSIGNED_BYTE: return BRW_INDEX_BYTE;
241 case GL_UNSIGNED_SHORT: return BRW_INDEX_WORD;
242 case GL_UNSIGNED_INT: return BRW_INDEX_DWORD;
243 default: assert(0); return 0;
244 }
245 }
246
247 static void
248 copy_array_to_vbo_array(struct brw_context *brw,
249 struct brw_vertex_element *element,
250 int min, int max,
251 struct brw_vertex_buffer *buffer,
252 GLuint dst_stride)
253 {
254 int src_stride = element->glarray->StrideB;
255 const unsigned char *src = element->glarray->Ptr + min * src_stride;
256 int count = max - min + 1;
257 GLuint size = count * dst_stride;
258
259 if (dst_stride == src_stride) {
260 intel_upload_data(&brw->intel, src, size, dst_stride,
261 &buffer->bo, &buffer->offset);
262 } else {
263 char * const map = intel_upload_map(&brw->intel, size, dst_stride);
264 char *dst = map;
265
266 while (count--) {
267 memcpy(dst, src, dst_stride);
268 src += src_stride;
269 dst += dst_stride;
270 }
271 intel_upload_unmap(&brw->intel, map, size, dst_stride,
272 &buffer->bo, &buffer->offset);
273 }
274 buffer->stride = dst_stride;
275 }
276
277 static void brw_prepare_vertices(struct brw_context *brw)
278 {
279 struct gl_context *ctx = &brw->intel.ctx;
280 struct intel_context *intel = intel_context(ctx);
281 /* CACHE_NEW_VS_PROG */
282 GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
283 const unsigned char *ptr = NULL;
284 GLuint interleaved = 0, total_size = 0;
285 unsigned int min_index = brw->vb.min_index;
286 unsigned int max_index = brw->vb.max_index;
287 int delta, i, j;
288
289 struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
290 GLuint nr_uploads = 0;
291
292 /* First build an array of pointers to ve's in vb.inputs_read
293 */
294 if (0)
295 printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
296
297 /* Accumulate the list of enabled arrays. */
298 brw->vb.nr_enabled = 0;
299 while (vs_inputs) {
300 GLuint i = ffs(vs_inputs) - 1;
301 struct brw_vertex_element *input = &brw->vb.inputs[i];
302
303 vs_inputs &= ~(1 << i);
304 if (input->glarray->Size && get_size(input->glarray->Type))
305 brw->vb.enabled[brw->vb.nr_enabled++] = input;
306 }
307
308 if (brw->vb.nr_enabled == 0)
309 return;
310
311 if (brw->vb.nr_buffers)
312 goto validate;
313
314 /* XXX: In the rare cases where this happens we fallback all
315 * the way to software rasterization, although a tnl fallback
316 * would be sufficient. I don't know of *any* real world
317 * cases with > 17 vertex attributes enabled, so it probably
318 * isn't an issue at this point.
319 */
320 if (brw->vb.nr_enabled >= BRW_VEP_MAX) {
321 intel->Fallback = GL_TRUE; /* boolean, not bitfield */
322 return;
323 }
324
325 for (i = j = 0; i < brw->vb.nr_enabled; i++) {
326 struct brw_vertex_element *input = brw->vb.enabled[i];
327 const struct gl_client_array *glarray = input->glarray;
328 int type_size = get_size(glarray->Type);
329
330 input->element_size = type_size * glarray->Size;
331
332 if (_mesa_is_bufferobj(glarray->BufferObj)) {
333 struct intel_buffer_object *intel_buffer =
334 intel_buffer_object(glarray->BufferObj);
335 int k;
336
337 for (k = 0; k < i; k++) {
338 const struct gl_client_array *other = brw->vb.enabled[k]->glarray;
339 if (glarray->BufferObj == other->BufferObj &&
340 glarray->StrideB == other->StrideB &&
341 (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB)
342 {
343 input->buffer = brw->vb.enabled[k]->buffer;
344 input->offset = glarray->Ptr - other->Ptr;
345 break;
346 }
347 }
348 if (k == i) {
349 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
350
351 /* Named buffer object: Just reference its contents directly. */
352 buffer->bo = intel_bufferobj_source(intel,
353 intel_buffer, type_size,
354 &buffer->offset);
355 drm_intel_bo_reference(buffer->bo);
356 buffer->offset += (uintptr_t)glarray->Ptr;
357 buffer->stride = glarray->StrideB;
358
359 input->buffer = j++;
360 input->offset = 0;
361 }
362
363 /* This is a common place to reach if the user mistakenly supplies
364 * a pointer in place of a VBO offset. If we just let it go through,
365 * we may end up dereferencing a pointer beyond the bounds of the
366 * GTT. We would hope that the VBO's max_index would save us, but
367 * Mesa appears to hand us min/max values not clipped to the
368 * array object's _MaxElement, and _MaxElement frequently appears
369 * to be wrong anyway.
370 *
371 * The VBO spec allows application termination in this case, and it's
372 * probably a service to the poor programmer to do so rather than
373 * trying to just not render.
374 */
375 assert(input->offset < brw->vb.buffers[input->buffer].bo->size);
376 } else {
377 /* Queue the buffer object up to be uploaded in the next pass,
378 * when we've decided if we're doing interleaved or not.
379 */
380 if (nr_uploads == 0) {
381 /* Position array not properly enabled:
382 */
383 if (input->attrib == VERT_ATTRIB_POS && glarray->StrideB == 0) {
384 intel->Fallback = GL_TRUE; /* boolean, not bitfield */
385 return;
386 }
387
388 interleaved = glarray->StrideB;
389 ptr = glarray->Ptr;
390 }
391 else if (interleaved != glarray->StrideB ||
392 (uintptr_t)(glarray->Ptr - ptr) > interleaved)
393 {
394 interleaved = 0;
395 }
396 else if ((uintptr_t)(glarray->Ptr - ptr) & (type_size -1))
397 {
398 /* enforce natural alignment (for doubles) */
399 interleaved = 0;
400 }
401
402 upload[nr_uploads++] = input;
403 total_size = ALIGN(total_size, type_size);
404 total_size += input->element_size;
405 }
406 }
407
408 /* If we need to upload all the arrays, then we can trim those arrays to
409 * only the used elements [min_index, max_index] so long as we adjust all
410 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
411 */
412 brw->vb.start_vertex_bias = 0;
413 delta = min_index;
414 if (nr_uploads == brw->vb.nr_enabled) {
415 brw->vb.start_vertex_bias = -delta;
416 delta = 0;
417 }
418 if (delta && !brw->intel.intelScreen->relaxed_relocations)
419 min_index = delta = 0;
420
421 /* Handle any arrays to be uploaded. */
422 if (nr_uploads > 1) {
423 if (interleaved && interleaved <= 2*total_size) {
424 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
425 /* All uploads are interleaved, so upload the arrays together as
426 * interleaved. First, upload the contents and set up upload[0].
427 */
428 copy_array_to_vbo_array(brw, upload[0], min_index, max_index,
429 buffer, interleaved);
430 buffer->offset -= delta * interleaved;
431
432 for (i = 0; i < nr_uploads; i++) {
433 /* Then, just point upload[i] at upload[0]'s buffer. */
434 upload[i]->offset =
435 ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
436 upload[i]->buffer = j;
437 }
438 j++;
439
440 nr_uploads = 0;
441 }
442 else if (total_size < 2048) {
443 /* Upload non-interleaved arrays into a single interleaved array */
444 struct brw_vertex_buffer *buffer;
445 int count = max_index - min_index + 1;
446 int offset;
447 char *map;
448
449 map = intel_upload_map(&brw->intel, total_size * count, total_size);
450 for (i = offset = 0; i < nr_uploads; i++) {
451 const unsigned char *src = upload[i]->glarray->Ptr;
452 int size = upload[i]->element_size;
453 int stride = upload[i]->glarray->StrideB;
454 char *dst;
455 int n;
456
457 offset = ALIGN(offset, get_size(upload[i]->glarray->Type));
458 dst = map + offset;
459 src += min_index * stride;
460
461 for (n = 0; n < count; n++) {
462 memcpy(dst, src, size);
463 src += stride;
464 dst += total_size;
465 }
466
467 upload[i]->offset = offset;
468 upload[i]->buffer = j;
469
470 offset += size;
471 }
472 assert(offset == total_size);
473 buffer = &brw->vb.buffers[j++];
474 intel_upload_unmap(&brw->intel, map, offset * count, offset,
475 &buffer->bo, &buffer->offset);
476 buffer->stride = offset;
477 buffer->offset -= delta * offset;
478
479 nr_uploads = 0;
480 }
481 }
482 /* Upload non-interleaved arrays */
483 for (i = 0; i < nr_uploads; i++) {
484 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
485 copy_array_to_vbo_array(brw, upload[i], min_index, max_index,
486 buffer, upload[i]->element_size);
487 buffer->offset -= delta * buffer->stride;
488 upload[i]->buffer = j++;
489 upload[i]->offset = 0;
490 }
491
492 /* can we simply extend the current vb? */
493 if (j == brw->vb.nr_current_buffers) {
494 int delta = 0;
495 for (i = 0; i < j; i++) {
496 int d;
497
498 if (brw->vb.current_buffers[i].handle != brw->vb.buffers[i].bo->handle ||
499 brw->vb.current_buffers[i].stride != brw->vb.buffers[i].stride)
500 break;
501
502 d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset;
503 if (d < 0)
504 break;
505 if (i == 0)
506 delta = d / brw->vb.current_buffers[i].stride;
507 if (delta * brw->vb.current_buffers[i].stride != d)
508 break;
509 }
510
511 if (i == j) {
512 brw->vb.start_vertex_bias += delta;
513 while (--j >= 0)
514 drm_intel_bo_unreference(brw->vb.buffers[j].bo);
515 j = 0;
516 }
517 }
518
519 brw->vb.nr_buffers = j;
520
521 validate:
522 brw_prepare_query_begin(brw);
523 for (i = 0; i < brw->vb.nr_buffers; i++) {
524 brw_add_validated_bo(brw, brw->vb.buffers[i].bo);
525 }
526 }
527
528 static void brw_emit_vertices(struct brw_context *brw)
529 {
530 struct gl_context *ctx = &brw->intel.ctx;
531 struct intel_context *intel = intel_context(ctx);
532 GLuint i;
533
534 brw_emit_query_begin(brw);
535
536 /* If the VS doesn't read any inputs (calculating vertex position from
537 * a state variable for some reason, for example), emit a single pad
538 * VERTEX_ELEMENT struct and bail.
539 *
540 * The stale VB state stays in place, but they don't do anything unless
541 * a VE loads from them.
542 */
543 if (brw->vb.nr_enabled == 0) {
544 BEGIN_BATCH(3);
545 OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
546 if (intel->gen >= 6) {
547 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
548 GEN6_VE0_VALID |
549 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
550 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
551 } else {
552 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
553 BRW_VE0_VALID |
554 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
555 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
556 }
557 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
558 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
559 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
560 (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
561 CACHED_BATCH();
562 return;
563 }
564
565 /* Now emit VB and VEP state packets.
566 */
567
568 if (brw->vb.nr_buffers) {
569 BEGIN_BATCH(1 + 4*brw->vb.nr_buffers);
570 OUT_BATCH((CMD_VERTEX_BUFFER << 16) | (4*brw->vb.nr_buffers - 1));
571 for (i = 0; i < brw->vb.nr_buffers; i++) {
572 struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
573 uint32_t dw0;
574
575 if (intel->gen >= 6) {
576 dw0 = GEN6_VB0_ACCESS_VERTEXDATA | (i << GEN6_VB0_INDEX_SHIFT);
577 } else {
578 dw0 = BRW_VB0_ACCESS_VERTEXDATA | (i << BRW_VB0_INDEX_SHIFT);
579 }
580
581 if (intel->gen >= 7)
582 dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
583
584 OUT_BATCH(dw0 | (buffer->stride << BRW_VB0_PITCH_SHIFT));
585 OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
586 if (intel->gen >= 5) {
587 OUT_RELOC(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->bo->size - 1);
588 } else
589 OUT_BATCH(0);
590 OUT_BATCH(0); /* Instance data step rate */
591
592 brw->vb.current_buffers[i].handle = buffer->bo->handle;
593 brw->vb.current_buffers[i].offset = buffer->offset;
594 brw->vb.current_buffers[i].stride = buffer->stride;
595 }
596 brw->vb.nr_current_buffers = i;
597 ADVANCE_BATCH();
598 }
599
600 BEGIN_BATCH(1 + brw->vb.nr_enabled * 2);
601 OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | (2*brw->vb.nr_enabled - 1));
602 for (i = 0; i < brw->vb.nr_enabled; i++) {
603 struct brw_vertex_element *input = brw->vb.enabled[i];
604 uint32_t format = get_surface_type(input->glarray->Type,
605 input->glarray->Size,
606 input->glarray->Format,
607 input->glarray->Normalized);
608 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
609 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
610 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
611 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
612
613 switch (input->glarray->Size) {
614 case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
615 case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
616 case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
617 case 3: comp3 = BRW_VE1_COMPONENT_STORE_1_FLT;
618 break;
619 }
620
621 if (intel->gen >= 6) {
622 OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
623 GEN6_VE0_VALID |
624 (format << BRW_VE0_FORMAT_SHIFT) |
625 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
626 } else {
627 OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
628 BRW_VE0_VALID |
629 (format << BRW_VE0_FORMAT_SHIFT) |
630 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
631 }
632
633 if (intel->gen >= 5)
634 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
635 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
636 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
637 (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
638 else
639 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
640 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
641 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
642 (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
643 ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
644 }
645 CACHED_BATCH();
646 }
647
648 const struct brw_tracked_state brw_vertices = {
649 .dirty = {
650 .mesa = 0,
651 .brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
652 .cache = CACHE_NEW_VS_PROG,
653 },
654 .prepare = brw_prepare_vertices,
655 .emit = brw_emit_vertices,
656 };
657
658 static void brw_prepare_indices(struct brw_context *brw)
659 {
660 struct gl_context *ctx = &brw->intel.ctx;
661 struct intel_context *intel = &brw->intel;
662 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
663 GLuint ib_size;
664 drm_intel_bo *bo = NULL;
665 struct gl_buffer_object *bufferobj;
666 GLuint offset;
667 GLuint ib_type_size;
668
669 if (index_buffer == NULL)
670 return;
671
672 ib_type_size = get_size(index_buffer->type);
673 ib_size = ib_type_size * index_buffer->count;
674 bufferobj = index_buffer->obj;
675
676 /* Turn into a proper VBO:
677 */
678 if (!_mesa_is_bufferobj(bufferobj)) {
679
680 /* Get new bufferobj, offset:
681 */
682 intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
683 &bo, &offset);
684 brw->ib.start_vertex_offset = offset / ib_type_size;
685 } else {
686 offset = (GLuint) (unsigned long) index_buffer->ptr;
687
688 /* If the index buffer isn't aligned to its element size, we have to
689 * rebase it into a temporary.
690 */
691 if ((get_size(index_buffer->type) - 1) & offset) {
692 GLubyte *map = ctx->Driver.MapBuffer(ctx,
693 GL_ELEMENT_ARRAY_BUFFER_ARB,
694 GL_DYNAMIC_DRAW_ARB,
695 bufferobj);
696 map += offset;
697
698 intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
699 &bo, &offset);
700 brw->ib.start_vertex_offset = offset / ib_type_size;
701
702 ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
703 } else {
704 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
705 * the index buffer state when we're just moving the start index
706 * of our drawing.
707 */
708 brw->ib.start_vertex_offset = offset / ib_type_size;
709
710 bo = intel_bufferobj_source(intel,
711 intel_buffer_object(bufferobj),
712 ib_type_size,
713 &offset);
714 drm_intel_bo_reference(bo);
715
716 brw->ib.start_vertex_offset += offset / ib_type_size;
717 }
718 }
719
720 if (brw->ib.bo != bo) {
721 drm_intel_bo_unreference(brw->ib.bo);
722 brw->ib.bo = bo;
723
724 brw_add_validated_bo(brw, brw->ib.bo);
725 brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
726 } else {
727 drm_intel_bo_unreference(bo);
728 }
729
730 if (index_buffer->type != brw->ib.type) {
731 brw->ib.type = index_buffer->type;
732 brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
733 }
734 }
735
736 const struct brw_tracked_state brw_indices = {
737 .dirty = {
738 .mesa = 0,
739 .brw = BRW_NEW_INDICES,
740 .cache = 0,
741 },
742 .prepare = brw_prepare_indices,
743 };
744
745 static void brw_emit_index_buffer(struct brw_context *brw)
746 {
747 struct intel_context *intel = &brw->intel;
748 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
749
750 if (index_buffer == NULL)
751 return;
752
753 BEGIN_BATCH(3);
754 OUT_BATCH(CMD_INDEX_BUFFER << 16 |
755 /* cut index enable << 10 */
756 get_index_type(index_buffer->type) << 8 |
757 1);
758 OUT_RELOC(brw->ib.bo,
759 I915_GEM_DOMAIN_VERTEX, 0,
760 0);
761 OUT_RELOC(brw->ib.bo,
762 I915_GEM_DOMAIN_VERTEX, 0,
763 brw->ib.bo->size - 1);
764 ADVANCE_BATCH();
765 }
766
767 const struct brw_tracked_state brw_index_buffer = {
768 .dirty = {
769 .mesa = 0,
770 .brw = BRW_NEW_BATCH | BRW_NEW_INDEX_BUFFER,
771 .cache = 0,
772 },
773 .emit = brw_emit_index_buffer,
774 };