Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw_upload.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "main/bufferobj.h"
27 #include "main/context.h"
28 #include "main/enums.h"
29 #include "main/macros.h"
30 #include "main/glformats.h"
31
32 #include "brw_draw.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
36
37 #include "intel_batchbuffer.h"
38 #include "intel_buffer_objects.h"
39
40 static const GLuint double_types[5] = {
41 0,
42 BRW_SURFACEFORMAT_R64_FLOAT,
43 BRW_SURFACEFORMAT_R64G64_FLOAT,
44 BRW_SURFACEFORMAT_R64G64B64_FLOAT,
45 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
46 };
47
48 static const GLuint float_types[5] = {
49 0,
50 BRW_SURFACEFORMAT_R32_FLOAT,
51 BRW_SURFACEFORMAT_R32G32_FLOAT,
52 BRW_SURFACEFORMAT_R32G32B32_FLOAT,
53 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
54 };
55
56 static const GLuint half_float_types[5] = {
57 0,
58 BRW_SURFACEFORMAT_R16_FLOAT,
59 BRW_SURFACEFORMAT_R16G16_FLOAT,
60 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT,
61 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
62 };
63
64 static const GLuint fixed_point_types[5] = {
65 0,
66 BRW_SURFACEFORMAT_R32_SFIXED,
67 BRW_SURFACEFORMAT_R32G32_SFIXED,
68 BRW_SURFACEFORMAT_R32G32B32_SFIXED,
69 BRW_SURFACEFORMAT_R32G32B32A32_SFIXED,
70 };
71
72 static const GLuint uint_types_direct[5] = {
73 0,
74 BRW_SURFACEFORMAT_R32_UINT,
75 BRW_SURFACEFORMAT_R32G32_UINT,
76 BRW_SURFACEFORMAT_R32G32B32_UINT,
77 BRW_SURFACEFORMAT_R32G32B32A32_UINT
78 };
79
80 static const GLuint uint_types_norm[5] = {
81 0,
82 BRW_SURFACEFORMAT_R32_UNORM,
83 BRW_SURFACEFORMAT_R32G32_UNORM,
84 BRW_SURFACEFORMAT_R32G32B32_UNORM,
85 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
86 };
87
88 static const GLuint uint_types_scale[5] = {
89 0,
90 BRW_SURFACEFORMAT_R32_USCALED,
91 BRW_SURFACEFORMAT_R32G32_USCALED,
92 BRW_SURFACEFORMAT_R32G32B32_USCALED,
93 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
94 };
95
96 static const GLuint int_types_direct[5] = {
97 0,
98 BRW_SURFACEFORMAT_R32_SINT,
99 BRW_SURFACEFORMAT_R32G32_SINT,
100 BRW_SURFACEFORMAT_R32G32B32_SINT,
101 BRW_SURFACEFORMAT_R32G32B32A32_SINT
102 };
103
104 static const GLuint int_types_norm[5] = {
105 0,
106 BRW_SURFACEFORMAT_R32_SNORM,
107 BRW_SURFACEFORMAT_R32G32_SNORM,
108 BRW_SURFACEFORMAT_R32G32B32_SNORM,
109 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
110 };
111
112 static const GLuint int_types_scale[5] = {
113 0,
114 BRW_SURFACEFORMAT_R32_SSCALED,
115 BRW_SURFACEFORMAT_R32G32_SSCALED,
116 BRW_SURFACEFORMAT_R32G32B32_SSCALED,
117 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
118 };
119
120 static const GLuint ushort_types_direct[5] = {
121 0,
122 BRW_SURFACEFORMAT_R16_UINT,
123 BRW_SURFACEFORMAT_R16G16_UINT,
124 BRW_SURFACEFORMAT_R16G16B16A16_UINT,
125 BRW_SURFACEFORMAT_R16G16B16A16_UINT
126 };
127
128 static const GLuint ushort_types_norm[5] = {
129 0,
130 BRW_SURFACEFORMAT_R16_UNORM,
131 BRW_SURFACEFORMAT_R16G16_UNORM,
132 BRW_SURFACEFORMAT_R16G16B16_UNORM,
133 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
134 };
135
136 static const GLuint ushort_types_scale[5] = {
137 0,
138 BRW_SURFACEFORMAT_R16_USCALED,
139 BRW_SURFACEFORMAT_R16G16_USCALED,
140 BRW_SURFACEFORMAT_R16G16B16_USCALED,
141 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
142 };
143
144 static const GLuint short_types_direct[5] = {
145 0,
146 BRW_SURFACEFORMAT_R16_SINT,
147 BRW_SURFACEFORMAT_R16G16_SINT,
148 BRW_SURFACEFORMAT_R16G16B16A16_SINT,
149 BRW_SURFACEFORMAT_R16G16B16A16_SINT
150 };
151
152 static const GLuint short_types_norm[5] = {
153 0,
154 BRW_SURFACEFORMAT_R16_SNORM,
155 BRW_SURFACEFORMAT_R16G16_SNORM,
156 BRW_SURFACEFORMAT_R16G16B16_SNORM,
157 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
158 };
159
160 static const GLuint short_types_scale[5] = {
161 0,
162 BRW_SURFACEFORMAT_R16_SSCALED,
163 BRW_SURFACEFORMAT_R16G16_SSCALED,
164 BRW_SURFACEFORMAT_R16G16B16_SSCALED,
165 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
166 };
167
168 static const GLuint ubyte_types_direct[5] = {
169 0,
170 BRW_SURFACEFORMAT_R8_UINT,
171 BRW_SURFACEFORMAT_R8G8_UINT,
172 BRW_SURFACEFORMAT_R8G8B8A8_UINT,
173 BRW_SURFACEFORMAT_R8G8B8A8_UINT
174 };
175
176 static const GLuint ubyte_types_norm[5] = {
177 0,
178 BRW_SURFACEFORMAT_R8_UNORM,
179 BRW_SURFACEFORMAT_R8G8_UNORM,
180 BRW_SURFACEFORMAT_R8G8B8_UNORM,
181 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
182 };
183
184 static const GLuint ubyte_types_scale[5] = {
185 0,
186 BRW_SURFACEFORMAT_R8_USCALED,
187 BRW_SURFACEFORMAT_R8G8_USCALED,
188 BRW_SURFACEFORMAT_R8G8B8_USCALED,
189 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
190 };
191
192 static const GLuint byte_types_direct[5] = {
193 0,
194 BRW_SURFACEFORMAT_R8_SINT,
195 BRW_SURFACEFORMAT_R8G8_SINT,
196 BRW_SURFACEFORMAT_R8G8B8A8_SINT,
197 BRW_SURFACEFORMAT_R8G8B8A8_SINT
198 };
199
200 static const GLuint byte_types_norm[5] = {
201 0,
202 BRW_SURFACEFORMAT_R8_SNORM,
203 BRW_SURFACEFORMAT_R8G8_SNORM,
204 BRW_SURFACEFORMAT_R8G8B8_SNORM,
205 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
206 };
207
208 static const GLuint byte_types_scale[5] = {
209 0,
210 BRW_SURFACEFORMAT_R8_SSCALED,
211 BRW_SURFACEFORMAT_R8G8_SSCALED,
212 BRW_SURFACEFORMAT_R8G8B8_SSCALED,
213 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
214 };
215
216
217 /**
218 * Given vertex array type/size/format/normalized info, return
219 * the appopriate hardware surface type.
220 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
221 */
222 unsigned
223 brw_get_vertex_surface_type(struct brw_context *brw,
224 const struct gl_client_array *glarray)
225 {
226 int size = glarray->Size;
227
228 if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
229 fprintf(stderr, "type %s size %d normalized %d\n",
230 _mesa_enum_to_string(glarray->Type),
231 glarray->Size, glarray->Normalized);
232
233 if (glarray->Integer) {
234 assert(glarray->Format == GL_RGBA); /* sanity check */
235 switch (glarray->Type) {
236 case GL_INT: return int_types_direct[size];
237 case GL_SHORT: return short_types_direct[size];
238 case GL_BYTE: return byte_types_direct[size];
239 case GL_UNSIGNED_INT: return uint_types_direct[size];
240 case GL_UNSIGNED_SHORT: return ushort_types_direct[size];
241 case GL_UNSIGNED_BYTE: return ubyte_types_direct[size];
242 default: unreachable("not reached");
243 }
244 } else if (glarray->Type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
245 return BRW_SURFACEFORMAT_R11G11B10_FLOAT;
246 } else if (glarray->Normalized) {
247 switch (glarray->Type) {
248 case GL_DOUBLE: return double_types[size];
249 case GL_FLOAT: return float_types[size];
250 case GL_HALF_FLOAT: return half_float_types[size];
251 case GL_INT: return int_types_norm[size];
252 case GL_SHORT: return short_types_norm[size];
253 case GL_BYTE: return byte_types_norm[size];
254 case GL_UNSIGNED_INT: return uint_types_norm[size];
255 case GL_UNSIGNED_SHORT: return ushort_types_norm[size];
256 case GL_UNSIGNED_BYTE:
257 if (glarray->Format == GL_BGRA) {
258 /* See GL_EXT_vertex_array_bgra */
259 assert(size == 4);
260 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
261 }
262 else {
263 return ubyte_types_norm[size];
264 }
265 case GL_FIXED:
266 if (brw->gen >= 8 || brw->is_haswell)
267 return fixed_point_types[size];
268
269 /* This produces GL_FIXED inputs as values between INT32_MIN and
270 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
271 */
272 return int_types_scale[size];
273 /* See GL_ARB_vertex_type_2_10_10_10_rev.
274 * W/A: Pre-Haswell, the hardware doesn't really support the formats we'd
275 * like to use here, so upload everything as UINT and fix
276 * it in the shader
277 */
278 case GL_INT_2_10_10_10_REV:
279 assert(size == 4);
280 if (brw->gen >= 8 || brw->is_haswell) {
281 return glarray->Format == GL_BGRA
282 ? BRW_SURFACEFORMAT_B10G10R10A2_SNORM
283 : BRW_SURFACEFORMAT_R10G10B10A2_SNORM;
284 }
285 return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
286 case GL_UNSIGNED_INT_2_10_10_10_REV:
287 assert(size == 4);
288 if (brw->gen >= 8 || brw->is_haswell) {
289 return glarray->Format == GL_BGRA
290 ? BRW_SURFACEFORMAT_B10G10R10A2_UNORM
291 : BRW_SURFACEFORMAT_R10G10B10A2_UNORM;
292 }
293 return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
294 default: unreachable("not reached");
295 }
296 }
297 else {
298 /* See GL_ARB_vertex_type_2_10_10_10_rev.
299 * W/A: the hardware doesn't really support the formats we'd
300 * like to use here, so upload everything as UINT and fix
301 * it in the shader
302 */
303 if (glarray->Type == GL_INT_2_10_10_10_REV) {
304 assert(size == 4);
305 if (brw->gen >= 8 || brw->is_haswell) {
306 return glarray->Format == GL_BGRA
307 ? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED
308 : BRW_SURFACEFORMAT_R10G10B10A2_SSCALED;
309 }
310 return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
311 } else if (glarray->Type == GL_UNSIGNED_INT_2_10_10_10_REV) {
312 assert(size == 4);
313 if (brw->gen >= 8 || brw->is_haswell) {
314 return glarray->Format == GL_BGRA
315 ? BRW_SURFACEFORMAT_B10G10R10A2_USCALED
316 : BRW_SURFACEFORMAT_R10G10B10A2_USCALED;
317 }
318 return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
319 }
320 assert(glarray->Format == GL_RGBA); /* sanity check */
321 switch (glarray->Type) {
322 case GL_DOUBLE: return double_types[size];
323 case GL_FLOAT: return float_types[size];
324 case GL_HALF_FLOAT: return half_float_types[size];
325 case GL_INT: return int_types_scale[size];
326 case GL_SHORT: return short_types_scale[size];
327 case GL_BYTE: return byte_types_scale[size];
328 case GL_UNSIGNED_INT: return uint_types_scale[size];
329 case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
330 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
331 case GL_FIXED:
332 if (brw->gen >= 8 || brw->is_haswell)
333 return fixed_point_types[size];
334
335 /* This produces GL_FIXED inputs as values between INT32_MIN and
336 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
337 */
338 return int_types_scale[size];
339 default: unreachable("not reached");
340 }
341 }
342 }
343
344 static void
345 copy_array_to_vbo_array(struct brw_context *brw,
346 struct brw_vertex_element *element,
347 int min, int max,
348 struct brw_vertex_buffer *buffer,
349 GLuint dst_stride)
350 {
351 const int src_stride = element->glarray->StrideB;
352
353 /* If the source stride is zero, we just want to upload the current
354 * attribute once and set the buffer's stride to 0. There's no need
355 * to replicate it out.
356 */
357 if (src_stride == 0) {
358 intel_upload_data(brw, element->glarray->Ptr,
359 element->glarray->_ElementSize,
360 element->glarray->_ElementSize,
361 &buffer->bo, &buffer->offset);
362
363 buffer->stride = 0;
364 return;
365 }
366
367 const unsigned char *src = element->glarray->Ptr + min * src_stride;
368 int count = max - min + 1;
369 GLuint size = count * dst_stride;
370 uint8_t *dst = intel_upload_space(brw, size, dst_stride,
371 &buffer->bo, &buffer->offset);
372
373 if (dst_stride == src_stride) {
374 memcpy(dst, src, size);
375 } else {
376 while (count--) {
377 memcpy(dst, src, dst_stride);
378 src += src_stride;
379 dst += dst_stride;
380 }
381 }
382 buffer->stride = dst_stride;
383 }
384
385 void
386 brw_prepare_vertices(struct brw_context *brw)
387 {
388 struct gl_context *ctx = &brw->ctx;
389 /* BRW_NEW_VS_PROG_DATA */
390 GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read;
391 const unsigned char *ptr = NULL;
392 GLuint interleaved = 0;
393 unsigned int min_index = brw->vb.min_index + brw->basevertex;
394 unsigned int max_index = brw->vb.max_index + brw->basevertex;
395 unsigned i;
396 int delta, j;
397
398 struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
399 GLuint nr_uploads = 0;
400
401 /* _NEW_POLYGON
402 *
403 * On gen6+, edge flags don't end up in the VUE (either in or out of the
404 * VS). Instead, they're uploaded as the last vertex element, and the data
405 * is passed sideband through the fixed function units. So, we need to
406 * prepare the vertex buffer for it, but it's not present in inputs_read.
407 */
408 if (brw->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL ||
409 ctx->Polygon.BackMode != GL_FILL)) {
410 vs_inputs |= VERT_BIT_EDGEFLAG;
411 }
412
413 if (0)
414 fprintf(stderr, "%s %d..%d\n", __func__, min_index, max_index);
415
416 /* Accumulate the list of enabled arrays. */
417 brw->vb.nr_enabled = 0;
418 while (vs_inputs) {
419 GLuint index = ffsll(vs_inputs) - 1;
420 struct brw_vertex_element *input = &brw->vb.inputs[index];
421
422 vs_inputs &= ~BITFIELD64_BIT(index);
423 brw->vb.enabled[brw->vb.nr_enabled++] = input;
424 }
425
426 if (brw->vb.nr_enabled == 0)
427 return;
428
429 if (brw->vb.nr_buffers)
430 return;
431
432 for (i = j = 0; i < brw->vb.nr_enabled; i++) {
433 struct brw_vertex_element *input = brw->vb.enabled[i];
434 const struct gl_client_array *glarray = input->glarray;
435
436 if (_mesa_is_bufferobj(glarray->BufferObj)) {
437 struct intel_buffer_object *intel_buffer =
438 intel_buffer_object(glarray->BufferObj);
439 unsigned k;
440
441 /* If we have a VB set to be uploaded for this buffer object
442 * already, reuse that VB state so that we emit fewer
443 * relocations.
444 */
445 for (k = 0; k < i; k++) {
446 const struct gl_client_array *other = brw->vb.enabled[k]->glarray;
447 if (glarray->BufferObj == other->BufferObj &&
448 glarray->StrideB == other->StrideB &&
449 glarray->InstanceDivisor == other->InstanceDivisor &&
450 (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB)
451 {
452 input->buffer = brw->vb.enabled[k]->buffer;
453 input->offset = glarray->Ptr - other->Ptr;
454 break;
455 }
456 }
457 if (k == i) {
458 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
459
460 /* Named buffer object: Just reference its contents directly. */
461 buffer->offset = (uintptr_t)glarray->Ptr;
462 buffer->stride = glarray->StrideB;
463 buffer->step_rate = glarray->InstanceDivisor;
464
465 uint32_t offset, size;
466 if (glarray->InstanceDivisor) {
467 offset = buffer->offset;
468 size = (buffer->stride * ((brw->num_instances /
469 glarray->InstanceDivisor) - 1) +
470 glarray->_ElementSize);
471 } else {
472 if (min_index == -1) {
473 offset = 0;
474 size = intel_buffer->Base.Size;
475 } else {
476 offset = buffer->offset + min_index * buffer->stride;
477 size = (buffer->stride * (max_index - min_index) +
478 glarray->_ElementSize);
479 }
480 }
481 buffer->bo = intel_bufferobj_buffer(brw, intel_buffer,
482 offset, size);
483 drm_intel_bo_reference(buffer->bo);
484
485 input->buffer = j++;
486 input->offset = 0;
487 }
488
489 /* This is a common place to reach if the user mistakenly supplies
490 * a pointer in place of a VBO offset. If we just let it go through,
491 * we may end up dereferencing a pointer beyond the bounds of the
492 * GTT.
493 *
494 * The VBO spec allows application termination in this case, and it's
495 * probably a service to the poor programmer to do so rather than
496 * trying to just not render.
497 */
498 assert(input->offset < brw->vb.buffers[input->buffer].bo->size);
499 } else {
500 /* Queue the buffer object up to be uploaded in the next pass,
501 * when we've decided if we're doing interleaved or not.
502 */
503 if (nr_uploads == 0) {
504 interleaved = glarray->StrideB;
505 ptr = glarray->Ptr;
506 }
507 else if (interleaved != glarray->StrideB ||
508 glarray->Ptr < ptr ||
509 (uintptr_t)(glarray->Ptr - ptr) + glarray->_ElementSize > interleaved)
510 {
511 /* If our stride is different from the first attribute's stride,
512 * or if the first attribute's stride didn't cover our element,
513 * disable the interleaved upload optimization. The second case
514 * can most commonly occur in cases where there is a single vertex
515 * and, for example, the data is stored on the application's
516 * stack.
517 *
518 * NOTE: This will also disable the optimization in cases where
519 * the data is in a different order than the array indices.
520 * Something like:
521 *
522 * float data[...];
523 * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]);
524 * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]);
525 */
526 interleaved = 0;
527 }
528
529 upload[nr_uploads++] = input;
530 }
531 }
532
533 /* If we need to upload all the arrays, then we can trim those arrays to
534 * only the used elements [min_index, max_index] so long as we adjust all
535 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
536 */
537 brw->vb.start_vertex_bias = 0;
538 delta = min_index;
539 if (nr_uploads == brw->vb.nr_enabled) {
540 brw->vb.start_vertex_bias = -delta;
541 delta = 0;
542 }
543
544 /* Handle any arrays to be uploaded. */
545 if (nr_uploads > 1) {
546 if (interleaved) {
547 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
548 /* All uploads are interleaved, so upload the arrays together as
549 * interleaved. First, upload the contents and set up upload[0].
550 */
551 copy_array_to_vbo_array(brw, upload[0], min_index, max_index,
552 buffer, interleaved);
553 buffer->offset -= delta * interleaved;
554
555 for (i = 0; i < nr_uploads; i++) {
556 /* Then, just point upload[i] at upload[0]'s buffer. */
557 upload[i]->offset =
558 ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
559 upload[i]->buffer = j;
560 }
561 j++;
562
563 nr_uploads = 0;
564 }
565 }
566 /* Upload non-interleaved arrays */
567 for (i = 0; i < nr_uploads; i++) {
568 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
569 if (upload[i]->glarray->InstanceDivisor == 0) {
570 copy_array_to_vbo_array(brw, upload[i], min_index, max_index,
571 buffer, upload[i]->glarray->_ElementSize);
572 } else {
573 /* This is an instanced attribute, since its InstanceDivisor
574 * is not zero. Therefore, its data will be stepped after the
575 * instanced draw has been run InstanceDivisor times.
576 */
577 uint32_t instanced_attr_max_index =
578 (brw->num_instances - 1) / upload[i]->glarray->InstanceDivisor;
579 copy_array_to_vbo_array(brw, upload[i], 0, instanced_attr_max_index,
580 buffer, upload[i]->glarray->_ElementSize);
581 }
582 buffer->offset -= delta * buffer->stride;
583 buffer->step_rate = upload[i]->glarray->InstanceDivisor;
584 upload[i]->buffer = j++;
585 upload[i]->offset = 0;
586 }
587
588 brw->vb.nr_buffers = j;
589 }
590
591 void
592 brw_prepare_shader_draw_parameters(struct brw_context *brw)
593 {
594 /* For non-indirect draws, upload gl_BaseVertex. */
595 if ((brw->vs.prog_data->uses_basevertex ||
596 brw->vs.prog_data->uses_baseinstance) &&
597 brw->draw.draw_params_bo == NULL) {
598 intel_upload_data(brw, &brw->draw.params, sizeof(brw->draw.params), 4,
599 &brw->draw.draw_params_bo,
600 &brw->draw.draw_params_offset);
601 }
602
603 if (brw->vs.prog_data->uses_drawid) {
604 intel_upload_data(brw, &brw->draw.gl_drawid, sizeof(brw->draw.gl_drawid), 4,
605 &brw->draw.draw_id_bo,
606 &brw->draw.draw_id_offset);
607 }
608 }
609
610 /**
611 * Emit a VERTEX_BUFFER_STATE entry (part of 3DSTATE_VERTEX_BUFFERS).
612 */
613 static uint32_t *
614 emit_vertex_buffer_state(struct brw_context *brw,
615 unsigned buffer_nr,
616 drm_intel_bo *bo,
617 unsigned bo_ending_address,
618 unsigned bo_offset,
619 unsigned stride,
620 unsigned step_rate,
621 uint32_t *__map)
622 {
623 struct gl_context *ctx = &brw->ctx;
624 uint32_t dw0;
625
626 if (brw->gen >= 6) {
627 dw0 = (buffer_nr << GEN6_VB0_INDEX_SHIFT) |
628 (step_rate ? GEN6_VB0_ACCESS_INSTANCEDATA
629 : GEN6_VB0_ACCESS_VERTEXDATA);
630 } else {
631 dw0 = (buffer_nr << BRW_VB0_INDEX_SHIFT) |
632 (step_rate ? BRW_VB0_ACCESS_INSTANCEDATA
633 : BRW_VB0_ACCESS_VERTEXDATA);
634 }
635
636 if (brw->gen >= 7)
637 dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
638
639 if (brw->gen == 7)
640 dw0 |= GEN7_MOCS_L3 << 16;
641
642 WARN_ONCE(stride >= (brw->gen >= 5 ? 2048 : 2047),
643 "VBO stride %d too large, bad rendering may occur\n",
644 stride);
645 OUT_BATCH(dw0 | (stride << BRW_VB0_PITCH_SHIFT));
646 OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, bo_offset);
647 if (brw->gen >= 5) {
648 OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, bo_ending_address);
649 } else {
650 OUT_BATCH(0);
651 }
652 OUT_BATCH(step_rate);
653
654 return __map;
655 }
656 #define EMIT_VERTEX_BUFFER_STATE(...) __map = emit_vertex_buffer_state(__VA_ARGS__, __map)
657
658 static void
659 brw_emit_vertices(struct brw_context *brw)
660 {
661 GLuint i;
662
663 brw_prepare_vertices(brw);
664 brw_prepare_shader_draw_parameters(brw);
665
666 brw_emit_query_begin(brw);
667
668 unsigned nr_elements = brw->vb.nr_enabled;
669 if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid ||
670 brw->vs.prog_data->uses_basevertex || brw->vs.prog_data->uses_baseinstance)
671 ++nr_elements;
672 if (brw->vs.prog_data->uses_drawid)
673 nr_elements++;
674
675 /* If the VS doesn't read any inputs (calculating vertex position from
676 * a state variable for some reason, for example), emit a single pad
677 * VERTEX_ELEMENT struct and bail.
678 *
679 * The stale VB state stays in place, but they don't do anything unless
680 * a VE loads from them.
681 */
682 if (nr_elements == 0) {
683 BEGIN_BATCH(3);
684 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
685 if (brw->gen >= 6) {
686 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
687 GEN6_VE0_VALID |
688 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
689 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
690 } else {
691 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
692 BRW_VE0_VALID |
693 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
694 (0 << BRW_VE0_SRC_OFFSET_SHIFT));
695 }
696 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
697 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
698 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
699 (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
700 ADVANCE_BATCH();
701 return;
702 }
703
704 /* Now emit VB and VEP state packets.
705 */
706
707 const bool uses_draw_params =
708 brw->vs.prog_data->uses_basevertex ||
709 brw->vs.prog_data->uses_baseinstance;
710 const unsigned nr_buffers = brw->vb.nr_buffers +
711 uses_draw_params + brw->vs.prog_data->uses_drawid;
712
713 if (nr_buffers) {
714 if (brw->gen >= 6) {
715 assert(nr_buffers <= 33);
716 } else {
717 assert(nr_buffers <= 17);
718 }
719
720 BEGIN_BATCH(1 + 4 * nr_buffers);
721 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4 * nr_buffers - 1));
722 for (i = 0; i < brw->vb.nr_buffers; i++) {
723 struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
724 EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo, buffer->bo->size - 1,
725 buffer->offset, buffer->stride,
726 buffer->step_rate);
727
728 }
729
730 if (uses_draw_params) {
731 EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers,
732 brw->draw.draw_params_bo,
733 brw->draw.draw_params_bo->size - 1,
734 brw->draw.draw_params_offset,
735 0, /* stride */
736 0); /* step rate */
737 }
738
739 if (brw->vs.prog_data->uses_drawid) {
740 EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
741 brw->draw.draw_id_bo,
742 brw->draw.draw_id_bo->size - 1,
743 brw->draw.draw_id_offset,
744 0, /* stride */
745 0); /* step rate */
746 }
747
748 ADVANCE_BATCH();
749 }
750
751 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
752 * for VertexID/InstanceID.
753 */
754 if (brw->gen >= 6) {
755 assert(nr_elements <= 34);
756 } else {
757 assert(nr_elements <= 18);
758 }
759
760 struct brw_vertex_element *gen6_edgeflag_input = NULL;
761
762 BEGIN_BATCH(1 + nr_elements * 2);
763 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
764 for (i = 0; i < brw->vb.nr_enabled; i++) {
765 struct brw_vertex_element *input = brw->vb.enabled[i];
766 uint32_t format = brw_get_vertex_surface_type(brw, input->glarray);
767 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
768 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
769 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
770 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
771
772 if (input == &brw->vb.inputs[VERT_ATTRIB_EDGEFLAG]) {
773 /* Gen6+ passes edgeflag as sideband along with the vertex, instead
774 * of in the VUE. We have to upload it sideband as the last vertex
775 * element according to the B-Spec.
776 */
777 if (brw->gen >= 6) {
778 gen6_edgeflag_input = input;
779 continue;
780 }
781 }
782
783 switch (input->glarray->Size) {
784 case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
785 case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
786 case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
787 case 3: comp3 = input->glarray->Integer ? BRW_VE1_COMPONENT_STORE_1_INT
788 : BRW_VE1_COMPONENT_STORE_1_FLT;
789 break;
790 }
791
792 if (brw->gen >= 6) {
793 OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
794 GEN6_VE0_VALID |
795 (format << BRW_VE0_FORMAT_SHIFT) |
796 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
797 } else {
798 OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
799 BRW_VE0_VALID |
800 (format << BRW_VE0_FORMAT_SHIFT) |
801 (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
802 }
803
804 if (brw->gen >= 5)
805 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
806 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
807 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
808 (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
809 else
810 OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
811 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
812 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
813 (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
814 ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
815 }
816
817 if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid ||
818 brw->vs.prog_data->uses_basevertex || brw->vs.prog_data->uses_baseinstance) {
819 uint32_t dw0 = 0, dw1 = 0;
820 uint32_t comp0 = BRW_VE1_COMPONENT_STORE_0;
821 uint32_t comp1 = BRW_VE1_COMPONENT_STORE_0;
822 uint32_t comp2 = BRW_VE1_COMPONENT_STORE_0;
823 uint32_t comp3 = BRW_VE1_COMPONENT_STORE_0;
824
825 if (brw->vs.prog_data->uses_basevertex)
826 comp0 = BRW_VE1_COMPONENT_STORE_SRC;
827
828 if (brw->vs.prog_data->uses_baseinstance)
829 comp1 = BRW_VE1_COMPONENT_STORE_SRC;
830
831 if (brw->vs.prog_data->uses_vertexid)
832 comp2 = BRW_VE1_COMPONENT_STORE_VID;
833
834 if (brw->vs.prog_data->uses_instanceid)
835 comp3 = BRW_VE1_COMPONENT_STORE_IID;
836
837 dw1 = (comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
838 (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
839 (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
840 (comp3 << BRW_VE1_COMPONENT_3_SHIFT);
841
842 if (brw->gen >= 6) {
843 dw0 |= GEN6_VE0_VALID |
844 brw->vb.nr_buffers << GEN6_VE0_INDEX_SHIFT |
845 BRW_SURFACEFORMAT_R32G32_UINT << BRW_VE0_FORMAT_SHIFT;
846 } else {
847 dw0 |= BRW_VE0_VALID |
848 brw->vb.nr_buffers << BRW_VE0_INDEX_SHIFT |
849 BRW_SURFACEFORMAT_R32G32_UINT << BRW_VE0_FORMAT_SHIFT;
850 dw1 |= (i * 4) << BRW_VE1_DST_OFFSET_SHIFT;
851 }
852
853 /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
854 * the format is ignored and the value is always int.
855 */
856
857 OUT_BATCH(dw0);
858 OUT_BATCH(dw1);
859 }
860
861 if (brw->vs.prog_data->uses_drawid) {
862 uint32_t dw0 = 0, dw1 = 0;
863
864 dw1 = (BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
865 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
866 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
867 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT);
868
869 if (brw->gen >= 6) {
870 dw0 |= GEN6_VE0_VALID |
871 ((brw->vb.nr_buffers + 1) << GEN6_VE0_INDEX_SHIFT) |
872 (BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT);
873 } else {
874 dw0 |= BRW_VE0_VALID |
875 ((brw->vb.nr_buffers + 1) << BRW_VE0_INDEX_SHIFT) |
876 (BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT);
877
878 dw1 |= (i * 4) << BRW_VE1_DST_OFFSET_SHIFT;
879 }
880
881 OUT_BATCH(dw0);
882 OUT_BATCH(dw1);
883 }
884
885 if (brw->gen >= 6 && gen6_edgeflag_input) {
886 uint32_t format =
887 brw_get_vertex_surface_type(brw, gen6_edgeflag_input->glarray);
888
889 OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
890 GEN6_VE0_VALID |
891 GEN6_VE0_EDGE_FLAG_ENABLE |
892 (format << BRW_VE0_FORMAT_SHIFT) |
893 (gen6_edgeflag_input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
894 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
895 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
896 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
897 (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
898 }
899
900 ADVANCE_BATCH();
901 }
902
903 const struct brw_tracked_state brw_vertices = {
904 .dirty = {
905 .mesa = _NEW_POLYGON,
906 .brw = BRW_NEW_BATCH |
907 BRW_NEW_VERTICES |
908 BRW_NEW_VS_PROG_DATA,
909 },
910 .emit = brw_emit_vertices,
911 };
912
913 static void
914 brw_upload_indices(struct brw_context *brw)
915 {
916 struct gl_context *ctx = &brw->ctx;
917 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
918 GLuint ib_size;
919 drm_intel_bo *old_bo = brw->ib.bo;
920 struct gl_buffer_object *bufferobj;
921 GLuint offset;
922 GLuint ib_type_size;
923
924 if (index_buffer == NULL)
925 return;
926
927 ib_type_size = _mesa_sizeof_type(index_buffer->type);
928 ib_size = ib_type_size * index_buffer->count;
929 bufferobj = index_buffer->obj;
930
931 /* Turn into a proper VBO:
932 */
933 if (!_mesa_is_bufferobj(bufferobj)) {
934 /* Get new bufferobj, offset:
935 */
936 intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
937 &brw->ib.bo, &offset);
938 } else {
939 offset = (GLuint) (unsigned long) index_buffer->ptr;
940
941 /* If the index buffer isn't aligned to its element size, we have to
942 * rebase it into a temporary.
943 */
944 if ((ib_type_size - 1) & offset) {
945 perf_debug("copying index buffer to a temporary to work around "
946 "misaligned offset %d\n", offset);
947
948 GLubyte *map = ctx->Driver.MapBufferRange(ctx,
949 offset,
950 ib_size,
951 GL_MAP_READ_BIT,
952 bufferobj,
953 MAP_INTERNAL);
954
955 intel_upload_data(brw, map, ib_size, ib_type_size,
956 &brw->ib.bo, &offset);
957
958 ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
959 } else {
960 drm_intel_bo *bo =
961 intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj),
962 offset, ib_size);
963 if (bo != brw->ib.bo) {
964 drm_intel_bo_unreference(brw->ib.bo);
965 brw->ib.bo = bo;
966 drm_intel_bo_reference(bo);
967 }
968 }
969 }
970
971 /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading
972 * the index buffer state when we're just moving the start index
973 * of our drawing.
974 */
975 brw->ib.start_vertex_offset = offset / ib_type_size;
976
977 if (brw->ib.bo != old_bo)
978 brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
979
980 if (index_buffer->type != brw->ib.type) {
981 brw->ib.type = index_buffer->type;
982 brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
983 }
984 }
985
986 const struct brw_tracked_state brw_indices = {
987 .dirty = {
988 .mesa = 0,
989 .brw = BRW_NEW_INDICES,
990 },
991 .emit = brw_upload_indices,
992 };
993
994 static void
995 brw_emit_index_buffer(struct brw_context *brw)
996 {
997 const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
998 GLuint cut_index_setting;
999
1000 if (index_buffer == NULL)
1001 return;
1002
1003 if (brw->prim_restart.enable_cut_index && !brw->is_haswell) {
1004 cut_index_setting = BRW_CUT_INDEX_ENABLE;
1005 } else {
1006 cut_index_setting = 0;
1007 }
1008
1009 BEGIN_BATCH(3);
1010 OUT_BATCH(CMD_INDEX_BUFFER << 16 |
1011 cut_index_setting |
1012 brw_get_index_type(index_buffer->type) |
1013 1);
1014 OUT_RELOC(brw->ib.bo,
1015 I915_GEM_DOMAIN_VERTEX, 0,
1016 0);
1017 OUT_RELOC(brw->ib.bo,
1018 I915_GEM_DOMAIN_VERTEX, 0,
1019 brw->ib.bo->size - 1);
1020 ADVANCE_BATCH();
1021 }
1022
1023 const struct brw_tracked_state brw_index_buffer = {
1024 .dirty = {
1025 .mesa = 0,
1026 .brw = BRW_NEW_BATCH |
1027 BRW_NEW_INDEX_BUFFER,
1028 },
1029 .emit = brw_emit_index_buffer,
1030 };