i965: Rename brw->no_batch_wrap to intel_batchbuffer::no_wrap
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <sys/errno.h>
27
28 #include "main/context.h"
29 #include "main/condrender.h"
30 #include "main/samplerobj.h"
31 #include "main/state.h"
32 #include "main/enums.h"
33 #include "main/macros.h"
34 #include "main/transformfeedback.h"
35 #include "main/framebuffer.h"
36 #include "tnl/tnl.h"
37 #include "vbo/vbo_context.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "drivers/common/meta.h"
41 #include "util/bitscan.h"
42
43 #include "brw_blorp.h"
44 #include "brw_draw.h"
45 #include "brw_defines.h"
46 #include "compiler/brw_eu_defines.h"
47 #include "brw_context.h"
48 #include "brw_state.h"
49
50 #include "intel_batchbuffer.h"
51 #include "intel_buffers.h"
52 #include "intel_fbo.h"
53 #include "intel_mipmap_tree.h"
54 #include "intel_buffer_objects.h"
55
56 #define FILE_DEBUG_FLAG DEBUG_PRIMS
57
58
59 static const GLenum reduced_prim[GL_POLYGON+1] = {
60 [GL_POINTS] = GL_POINTS,
61 [GL_LINES] = GL_LINES,
62 [GL_LINE_LOOP] = GL_LINES,
63 [GL_LINE_STRIP] = GL_LINES,
64 [GL_TRIANGLES] = GL_TRIANGLES,
65 [GL_TRIANGLE_STRIP] = GL_TRIANGLES,
66 [GL_TRIANGLE_FAN] = GL_TRIANGLES,
67 [GL_QUADS] = GL_TRIANGLES,
68 [GL_QUAD_STRIP] = GL_TRIANGLES,
69 [GL_POLYGON] = GL_TRIANGLES
70 };
71
72 /* When the primitive changes, set a state bit and re-validate. Not
73 * the nicest and would rather deal with this by having all the
74 * programs be immune to the active primitive (ie. cope with all
75 * possibilities). That may not be realistic however.
76 */
77 static void
78 brw_set_prim(struct brw_context *brw, const struct _mesa_prim *prim)
79 {
80 struct gl_context *ctx = &brw->ctx;
81 uint32_t hw_prim = get_hw_prim_for_gl_prim(prim->mode);
82
83 DBG("PRIM: %s\n", _mesa_enum_to_string(prim->mode));
84
85 /* Slight optimization to avoid the GS program when not needed:
86 */
87 if (prim->mode == GL_QUAD_STRIP &&
88 ctx->Light.ShadeModel != GL_FLAT &&
89 ctx->Polygon.FrontMode == GL_FILL &&
90 ctx->Polygon.BackMode == GL_FILL)
91 hw_prim = _3DPRIM_TRISTRIP;
92
93 if (prim->mode == GL_QUADS && prim->count == 4 &&
94 ctx->Light.ShadeModel != GL_FLAT &&
95 ctx->Polygon.FrontMode == GL_FILL &&
96 ctx->Polygon.BackMode == GL_FILL) {
97 hw_prim = _3DPRIM_TRIFAN;
98 }
99
100 if (hw_prim != brw->primitive) {
101 brw->primitive = hw_prim;
102 brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
103
104 if (reduced_prim[prim->mode] != brw->reduced_primitive) {
105 brw->reduced_primitive = reduced_prim[prim->mode];
106 brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE;
107 }
108 }
109 }
110
111 static void
112 gen6_set_prim(struct brw_context *brw, const struct _mesa_prim *prim)
113 {
114 const struct gl_context *ctx = &brw->ctx;
115 uint32_t hw_prim;
116
117 DBG("PRIM: %s\n", _mesa_enum_to_string(prim->mode));
118
119 if (prim->mode == GL_PATCHES) {
120 hw_prim = _3DPRIM_PATCHLIST(ctx->TessCtrlProgram.patch_vertices);
121 } else {
122 hw_prim = get_hw_prim_for_gl_prim(prim->mode);
123 }
124
125 if (hw_prim != brw->primitive) {
126 brw->primitive = hw_prim;
127 brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
128 if (prim->mode == GL_PATCHES)
129 brw->ctx.NewDriverState |= BRW_NEW_PATCH_PRIMITIVE;
130 }
131 }
132
133
134 /**
135 * The hardware is capable of removing dangling vertices on its own; however,
136 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
137 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
138 * This function manually trims dangling vertices from a draw call involving
139 * quads so that those dangling vertices won't get drawn when we convert to
140 * trifans/tristrips.
141 */
142 static GLuint
143 trim(GLenum prim, GLuint length)
144 {
145 if (prim == GL_QUAD_STRIP)
146 return length > 3 ? (length - length % 2) : 0;
147 else if (prim == GL_QUADS)
148 return length - length % 4;
149 else
150 return length;
151 }
152
153
154 static void
155 brw_emit_prim(struct brw_context *brw,
156 const struct _mesa_prim *prim,
157 uint32_t hw_prim,
158 struct brw_transform_feedback_object *xfb_obj,
159 unsigned stream)
160 {
161 const struct gen_device_info *devinfo = &brw->screen->devinfo;
162 int verts_per_instance;
163 int vertex_access_type;
164 int indirect_flag;
165
166 DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim->mode),
167 prim->start, prim->count);
168
169 int start_vertex_location = prim->start;
170 int base_vertex_location = prim->basevertex;
171
172 if (prim->indexed) {
173 vertex_access_type = devinfo->gen >= 7 ?
174 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
175 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
176 start_vertex_location += brw->ib.start_vertex_offset;
177 base_vertex_location += brw->vb.start_vertex_bias;
178 } else {
179 vertex_access_type = devinfo->gen >= 7 ?
180 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL :
181 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
182 start_vertex_location += brw->vb.start_vertex_bias;
183 }
184
185 /* We only need to trim the primitive count on pre-Gen6. */
186 if (devinfo->gen < 6)
187 verts_per_instance = trim(prim->mode, prim->count);
188 else
189 verts_per_instance = prim->count;
190
191 /* If nothing to emit, just return. */
192 if (verts_per_instance == 0 && !prim->is_indirect && !xfb_obj)
193 return;
194
195 /* If we're set to always flush, do it before and after the primitive emit.
196 * We want to catch both missed flushes that hurt instruction/state cache
197 * and missed flushes of the render cache as it heads to other parts of
198 * the besides the draw code.
199 */
200 if (brw->always_flush_cache)
201 brw_emit_mi_flush(brw);
202
203 /* If indirect, emit a bunch of loads from the indirect BO. */
204 if (xfb_obj) {
205 indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
206
207 brw_load_register_mem(brw, GEN7_3DPRIM_VERTEX_COUNT,
208 xfb_obj->prim_count_bo,
209 stream * sizeof(uint32_t));
210 BEGIN_BATCH(9);
211 OUT_BATCH(MI_LOAD_REGISTER_IMM | (9 - 2));
212 OUT_BATCH(GEN7_3DPRIM_INSTANCE_COUNT);
213 OUT_BATCH(prim->num_instances);
214 OUT_BATCH(GEN7_3DPRIM_START_VERTEX);
215 OUT_BATCH(0);
216 OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
217 OUT_BATCH(0);
218 OUT_BATCH(GEN7_3DPRIM_START_INSTANCE);
219 OUT_BATCH(0);
220 ADVANCE_BATCH();
221 } else if (prim->is_indirect) {
222 struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
223 struct brw_bo *bo = intel_bufferobj_buffer(brw,
224 intel_buffer_object(indirect_buffer),
225 prim->indirect_offset, 5 * sizeof(GLuint), false);
226
227 indirect_flag = GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE;
228
229 brw_load_register_mem(brw, GEN7_3DPRIM_VERTEX_COUNT, bo,
230 prim->indirect_offset + 0);
231 brw_load_register_mem(brw, GEN7_3DPRIM_INSTANCE_COUNT, bo,
232 prim->indirect_offset + 4);
233
234 brw_load_register_mem(brw, GEN7_3DPRIM_START_VERTEX, bo,
235 prim->indirect_offset + 8);
236 if (prim->indexed) {
237 brw_load_register_mem(brw, GEN7_3DPRIM_BASE_VERTEX, bo,
238 prim->indirect_offset + 12);
239 brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
240 prim->indirect_offset + 16);
241 } else {
242 brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE, bo,
243 prim->indirect_offset + 12);
244 BEGIN_BATCH(3);
245 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
246 OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX);
247 OUT_BATCH(0);
248 ADVANCE_BATCH();
249 }
250 } else {
251 indirect_flag = 0;
252 }
253
254 BEGIN_BATCH(devinfo->gen >= 7 ? 7 : 6);
255
256 if (devinfo->gen >= 7) {
257 const int predicate_enable =
258 (brw->predicate.state == BRW_PREDICATE_STATE_USE_BIT)
259 ? GEN7_3DPRIM_PREDICATE_ENABLE : 0;
260
261 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag | predicate_enable);
262 OUT_BATCH(hw_prim | vertex_access_type);
263 } else {
264 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
265 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
266 vertex_access_type);
267 }
268 OUT_BATCH(verts_per_instance);
269 OUT_BATCH(start_vertex_location);
270 OUT_BATCH(prim->num_instances);
271 OUT_BATCH(prim->base_instance);
272 OUT_BATCH(base_vertex_location);
273 ADVANCE_BATCH();
274
275 if (brw->always_flush_cache)
276 brw_emit_mi_flush(brw);
277 }
278
279
280 static void
281 brw_merge_inputs(struct brw_context *brw,
282 const struct gl_vertex_array *arrays[])
283 {
284 const struct gen_device_info *devinfo = &brw->screen->devinfo;
285 const struct gl_context *ctx = &brw->ctx;
286 GLuint i;
287
288 for (i = 0; i < brw->vb.nr_buffers; i++) {
289 brw_bo_unreference(brw->vb.buffers[i].bo);
290 brw->vb.buffers[i].bo = NULL;
291 }
292 brw->vb.nr_buffers = 0;
293
294 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
295 brw->vb.inputs[i].buffer = -1;
296 brw->vb.inputs[i].glarray = arrays[i];
297 }
298
299 if (devinfo->gen < 8 && !devinfo->is_haswell) {
300 uint64_t mask = ctx->VertexProgram._Current->info.inputs_read;
301 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
302 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
303 */
304 while (mask) {
305 uint8_t wa_flags = 0;
306
307 i = u_bit_scan64(&mask);
308
309 switch (brw->vb.inputs[i].glarray->Type) {
310
311 case GL_FIXED:
312 wa_flags = brw->vb.inputs[i].glarray->Size;
313 break;
314
315 case GL_INT_2_10_10_10_REV:
316 wa_flags |= BRW_ATTRIB_WA_SIGN;
317 /* fallthough */
318
319 case GL_UNSIGNED_INT_2_10_10_10_REV:
320 if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
321 wa_flags |= BRW_ATTRIB_WA_BGRA;
322
323 if (brw->vb.inputs[i].glarray->Normalized)
324 wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
325 else if (!brw->vb.inputs[i].glarray->Integer)
326 wa_flags |= BRW_ATTRIB_WA_SCALE;
327
328 break;
329 }
330
331 if (brw->vb.attrib_wa_flags[i] != wa_flags) {
332 brw->vb.attrib_wa_flags[i] = wa_flags;
333 brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
334 }
335 }
336 }
337 }
338
339 /* Disable auxiliary buffers if a renderbuffer is also bound as a texture
340 * or shader image. This causes a self-dependency, where both rendering
341 * and sampling may concurrently read or write the CCS buffer, causing
342 * incorrect pixels.
343 */
344 static bool
345 intel_disable_rb_aux_buffer(struct brw_context *brw,
346 struct intel_mipmap_tree *tex_mt,
347 const char *usage)
348 {
349 const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
350 bool found = false;
351
352 /* We only need to worry about color compression and fast clears. */
353 if (tex_mt->aux_usage != ISL_AUX_USAGE_CCS_D &&
354 tex_mt->aux_usage != ISL_AUX_USAGE_CCS_E)
355 return false;
356
357 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
358 const struct intel_renderbuffer *irb =
359 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
360
361 if (irb && irb->mt->bo == tex_mt->bo) {
362 found = brw->draw_aux_buffer_disabled[i] = true;
363 }
364 }
365
366 if (found) {
367 perf_debug("Disabling CCS because a renderbuffer is also bound %s.\n",
368 usage);
369 }
370
371 return found;
372 }
373
374 /**
375 * \brief Resolve buffers before drawing.
376 *
377 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
378 * enabled depth texture, and flush the render cache for any dirty textures.
379 */
380 void
381 brw_predraw_resolve_inputs(struct brw_context *brw)
382 {
383 struct gl_context *ctx = &brw->ctx;
384 struct intel_texture_object *tex_obj;
385
386 memset(brw->draw_aux_buffer_disabled, 0,
387 sizeof(brw->draw_aux_buffer_disabled));
388
389 /* Resolve depth buffer and render cache of each enabled texture. */
390 int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
391 for (int i = 0; i <= maxEnabledUnit; i++) {
392 if (!ctx->Texture.Unit[i]._Current)
393 continue;
394 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
395 if (!tex_obj || !tex_obj->mt)
396 continue;
397
398 struct gl_sampler_object *sampler = _mesa_get_samplerobj(ctx, i);
399 enum isl_format view_format =
400 translate_tex_format(brw, tex_obj->_Format, sampler->sRGBDecode);
401
402 const bool disable_aux =
403 intel_disable_rb_aux_buffer(brw, tex_obj->mt, "for sampling");
404
405 intel_miptree_prepare_texture(brw, tex_obj->mt, view_format,
406 disable_aux);
407
408 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
409
410 if (tex_obj->base.StencilSampling ||
411 tex_obj->mt->format == MESA_FORMAT_S_UINT8) {
412 intel_update_r8stencil(brw, tex_obj->mt);
413 }
414 }
415
416 /* Resolve color for each active shader image. */
417 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
418 const struct gl_program *prog = ctx->_Shader->CurrentProgram[i];
419
420 if (unlikely(prog && prog->info.num_images)) {
421 for (unsigned j = 0; j < prog->info.num_images; j++) {
422 struct gl_image_unit *u =
423 &ctx->ImageUnits[prog->sh.ImageUnits[j]];
424 tex_obj = intel_texture_object(u->TexObj);
425
426 if (tex_obj && tex_obj->mt) {
427 intel_disable_rb_aux_buffer(brw, tex_obj->mt,
428 "as a shader image");
429
430 intel_miptree_prepare_image(brw, tex_obj->mt);
431
432 brw_render_cache_set_check_flush(brw, tex_obj->mt->bo);
433 }
434 }
435 }
436 }
437 }
438
439 static void
440 brw_predraw_resolve_framebuffer(struct brw_context *brw)
441 {
442 struct gl_context *ctx = &brw->ctx;
443 struct intel_renderbuffer *depth_irb;
444
445 /* Resolve the depth buffer's HiZ buffer. */
446 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
447 if (depth_irb && depth_irb->mt) {
448 intel_miptree_prepare_depth(brw, depth_irb->mt,
449 depth_irb->mt_level,
450 depth_irb->mt_layer,
451 depth_irb->layer_count);
452 }
453
454 /* Resolve color buffers for non-coherent framebuffer fetch. */
455 if (!ctx->Extensions.MESA_shader_framebuffer_fetch &&
456 ctx->FragmentProgram._Current &&
457 ctx->FragmentProgram._Current->info.outputs_read) {
458 const struct gl_framebuffer *fb = ctx->DrawBuffer;
459
460 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
461 const struct intel_renderbuffer *irb =
462 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
463
464 if (irb) {
465 intel_miptree_prepare_fb_fetch(brw, irb->mt, irb->mt_level,
466 irb->mt_layer, irb->layer_count);
467 }
468 }
469 }
470
471 struct gl_framebuffer *fb = ctx->DrawBuffer;
472 for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
473 struct intel_renderbuffer *irb =
474 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
475
476 if (irb == NULL || irb->mt == NULL)
477 continue;
478
479 intel_miptree_prepare_render(brw, irb->mt, irb->mt_level,
480 irb->mt_layer, irb->layer_count,
481 ctx->Color.sRGBEnabled,
482 ctx->Color.BlendEnabled & (1 << i));
483 }
484 }
485
486 /**
487 * \brief Call this after drawing to mark which buffers need resolving
488 *
489 * If the depth buffer was written to and if it has an accompanying HiZ
490 * buffer, then mark that it needs a depth resolve.
491 *
492 * If the color buffer is a multisample window system buffer, then
493 * mark that it needs a downsample.
494 *
495 * Also mark any render targets which will be textured as needing a render
496 * cache flush.
497 */
498 static void
499 brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
500 {
501 struct gl_context *ctx = &brw->ctx;
502 struct gl_framebuffer *fb = ctx->DrawBuffer;
503
504 struct intel_renderbuffer *front_irb = NULL;
505 struct intel_renderbuffer *back_irb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
506 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
507 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
508 struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
509
510 if (_mesa_is_front_buffer_drawing(fb))
511 front_irb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
512
513 if (front_irb)
514 front_irb->need_downsample = true;
515 if (back_irb)
516 back_irb->need_downsample = true;
517 if (depth_irb) {
518 bool depth_written = brw_depth_writes_enabled(brw);
519 if (depth_att->Layered) {
520 intel_miptree_finish_depth(brw, depth_irb->mt,
521 depth_irb->mt_level,
522 depth_irb->mt_layer,
523 depth_irb->layer_count,
524 depth_written);
525 } else {
526 intel_miptree_finish_depth(brw, depth_irb->mt,
527 depth_irb->mt_level,
528 depth_irb->mt_layer, 1,
529 depth_written);
530 }
531 if (depth_written)
532 brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
533 }
534
535 if (ctx->Extensions.ARB_stencil_texturing &&
536 stencil_irb && brw->stencil_write_enabled) {
537 brw_render_cache_set_add_bo(brw, stencil_irb->mt->bo);
538 }
539
540 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
541 struct intel_renderbuffer *irb =
542 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
543
544 if (!irb)
545 continue;
546
547 brw_render_cache_set_add_bo(brw, irb->mt->bo);
548 intel_miptree_finish_render(brw, irb->mt, irb->mt_level,
549 irb->mt_layer, irb->layer_count,
550 ctx->Color.sRGBEnabled,
551 ctx->Color.BlendEnabled & (1 << i));
552 }
553 }
554
555 static void
556 intel_renderbuffer_move_temp_back(struct brw_context *brw,
557 struct intel_renderbuffer *irb)
558 {
559 if (irb->align_wa_mt == NULL)
560 return;
561
562 brw_render_cache_set_check_flush(brw, irb->align_wa_mt->bo);
563
564 intel_miptree_copy_slice(brw, irb->align_wa_mt, 0, 0,
565 irb->mt,
566 irb->Base.Base.TexImage->Level, irb->mt_layer);
567
568 intel_miptree_reference(&irb->align_wa_mt, NULL);
569
570 /* Finally restore the x,y to correspond to full miptree. */
571 intel_renderbuffer_set_draw_offset(irb);
572
573 /* Make sure render surface state gets re-emitted with updated miptree. */
574 brw->NewGLState |= _NEW_BUFFERS;
575 }
576
577 static void
578 brw_postdraw_reconcile_align_wa_slices(struct brw_context *brw)
579 {
580 struct gl_context *ctx = &brw->ctx;
581 struct gl_framebuffer *fb = ctx->DrawBuffer;
582
583 struct intel_renderbuffer *depth_irb =
584 intel_get_renderbuffer(fb, BUFFER_DEPTH);
585 struct intel_renderbuffer *stencil_irb =
586 intel_get_renderbuffer(fb, BUFFER_STENCIL);
587
588 if (depth_irb && depth_irb->align_wa_mt)
589 intel_renderbuffer_move_temp_back(brw, depth_irb);
590
591 if (stencil_irb && stencil_irb->align_wa_mt)
592 intel_renderbuffer_move_temp_back(brw, stencil_irb);
593
594 for (unsigned i = 0; i < fb->_NumColorDrawBuffers; i++) {
595 struct intel_renderbuffer *irb =
596 intel_renderbuffer(fb->_ColorDrawBuffers[i]);
597
598 if (!irb || irb->align_wa_mt == NULL)
599 continue;
600
601 intel_renderbuffer_move_temp_back(brw, irb);
602 }
603 }
604
605 static void
606 brw_prepare_drawing(struct gl_context *ctx,
607 const struct gl_vertex_array *arrays[],
608 const struct _mesa_index_buffer *ib,
609 bool index_bounds_valid,
610 GLuint min_index,
611 GLuint max_index)
612 {
613 struct brw_context *brw = brw_context(ctx);
614
615 if (ctx->NewState)
616 _mesa_update_state(ctx);
617
618 /* We have to validate the textures *before* checking for fallbacks;
619 * otherwise, the software fallback won't be able to rely on the
620 * texture state, the firstLevel and lastLevel fields won't be
621 * set in the intel texture object (they'll both be 0), and the
622 * software fallback will segfault if it attempts to access any
623 * texture level other than level 0.
624 */
625 brw_validate_textures(brw);
626
627 /* Find the highest sampler unit used by each shader program. A bit-count
628 * won't work since ARB programs use the texture unit number as the sampler
629 * index.
630 */
631 brw->wm.base.sampler_count =
632 util_last_bit(ctx->FragmentProgram._Current->SamplersUsed);
633 brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
634 util_last_bit(ctx->GeometryProgram._Current->SamplersUsed) : 0;
635 brw->tes.base.sampler_count = ctx->TessEvalProgram._Current ?
636 util_last_bit(ctx->TessEvalProgram._Current->SamplersUsed) : 0;
637 brw->tcs.base.sampler_count = ctx->TessCtrlProgram._Current ?
638 util_last_bit(ctx->TessCtrlProgram._Current->SamplersUsed) : 0;
639 brw->vs.base.sampler_count =
640 util_last_bit(ctx->VertexProgram._Current->SamplersUsed);
641
642 intel_prepare_render(brw);
643
644 /* This workaround has to happen outside of brw_upload_render_state()
645 * because it may flush the batchbuffer for a blit, affecting the state
646 * flags.
647 */
648 brw_workaround_depthstencil_alignment(brw, 0);
649
650 /* Resolves must occur after updating renderbuffers, updating context state,
651 * and finalizing textures but before setting up any hardware state for
652 * this draw call.
653 */
654 brw_predraw_resolve_inputs(brw);
655 brw_predraw_resolve_framebuffer(brw);
656
657 /* Bind all inputs, derive varying and size information:
658 */
659 brw_merge_inputs(brw, arrays);
660
661 brw->ib.ib = ib;
662 brw->ctx.NewDriverState |= BRW_NEW_INDICES;
663
664 brw->vb.index_bounds_valid = index_bounds_valid;
665 brw->vb.min_index = min_index;
666 brw->vb.max_index = max_index;
667 brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
668 }
669
670 static void
671 brw_finish_drawing(struct gl_context *ctx)
672 {
673 struct brw_context *brw = brw_context(ctx);
674
675 if (brw->always_flush_batch)
676 intel_batchbuffer_flush(brw);
677
678 brw_program_cache_check_size(brw);
679 brw_postdraw_reconcile_align_wa_slices(brw);
680 brw_postdraw_set_buffers_need_resolve(brw);
681
682 if (brw->draw.draw_params_count_bo) {
683 brw_bo_unreference(brw->draw.draw_params_count_bo);
684 brw->draw.draw_params_count_bo = NULL;
685 }
686 }
687
688 /* May fail if out of video memory for texture or vbo upload, or on
689 * fallback conditions.
690 */
691 static void
692 brw_draw_single_prim(struct gl_context *ctx,
693 const struct gl_vertex_array *arrays[],
694 const struct _mesa_prim *prim,
695 unsigned prim_id,
696 struct brw_transform_feedback_object *xfb_obj,
697 unsigned stream,
698 struct gl_buffer_object *indirect)
699 {
700 struct brw_context *brw = brw_context(ctx);
701 const struct gen_device_info *devinfo = &brw->screen->devinfo;
702 bool fail_next = false;
703
704 /* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
705 * atoms that happen on every draw call.
706 */
707 brw->ctx.NewDriverState |= BRW_NEW_DRAW_CALL;
708
709 /* Flush the batch if the batch/state buffers are nearly full. We can
710 * grow them if needed, but this is not free, so we'd like to avoid it.
711 */
712 intel_batchbuffer_require_space(brw, 1500, RENDER_RING);
713 brw_require_statebuffer_space(brw, 2400);
714 intel_batchbuffer_save_state(brw);
715
716 if (brw->num_instances != prim->num_instances ||
717 brw->basevertex != prim->basevertex ||
718 brw->baseinstance != prim->base_instance) {
719 brw->num_instances = prim->num_instances;
720 brw->basevertex = prim->basevertex;
721 brw->baseinstance = prim->base_instance;
722 if (prim_id > 0) { /* For i == 0 we just did this before the loop */
723 brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
724 brw_merge_inputs(brw, arrays);
725 }
726 }
727
728 /* Determine if we need to flag BRW_NEW_VERTICES for updating the
729 * gl_BaseVertexARB or gl_BaseInstanceARB values. For indirect draw, we
730 * always flag if the shader uses one of the values. For direct draws,
731 * we only flag if the values change.
732 */
733 const int new_basevertex =
734 prim->indexed ? prim->basevertex : prim->start;
735 const int new_baseinstance = prim->base_instance;
736 const struct brw_vs_prog_data *vs_prog_data =
737 brw_vs_prog_data(brw->vs.base.prog_data);
738 if (prim_id > 0) {
739 const bool uses_draw_parameters =
740 vs_prog_data->uses_basevertex ||
741 vs_prog_data->uses_baseinstance;
742
743 if ((uses_draw_parameters && prim->is_indirect) ||
744 (vs_prog_data->uses_basevertex &&
745 brw->draw.params.gl_basevertex != new_basevertex) ||
746 (vs_prog_data->uses_baseinstance &&
747 brw->draw.params.gl_baseinstance != new_baseinstance))
748 brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
749 }
750
751 brw->draw.params.gl_basevertex = new_basevertex;
752 brw->draw.params.gl_baseinstance = new_baseinstance;
753 brw_bo_unreference(brw->draw.draw_params_bo);
754
755 if (prim->is_indirect) {
756 /* Point draw_params_bo at the indirect buffer. */
757 brw->draw.draw_params_bo =
758 intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
759 brw_bo_reference(brw->draw.draw_params_bo);
760 brw->draw.draw_params_offset =
761 prim->indirect_offset + (prim->indexed ? 12 : 8);
762 } else {
763 /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
764 * has to upload gl_BaseVertex and such if they're needed.
765 */
766 brw->draw.draw_params_bo = NULL;
767 brw->draw.draw_params_offset = 0;
768 }
769
770 /* gl_DrawID always needs its own vertex buffer since it's not part of
771 * the indirect parameter buffer. If the program uses gl_DrawID we need
772 * to flag BRW_NEW_VERTICES. For the first iteration, we don't have
773 * valid vs_prog_data, but we always flag BRW_NEW_VERTICES before
774 * the loop.
775 */
776 brw->draw.gl_drawid = prim->draw_id;
777 brw_bo_unreference(brw->draw.draw_id_bo);
778 brw->draw.draw_id_bo = NULL;
779 if (prim_id > 0 && vs_prog_data->uses_drawid)
780 brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
781
782 if (devinfo->gen < 6)
783 brw_set_prim(brw, prim);
784 else
785 gen6_set_prim(brw, prim);
786
787 retry:
788
789 /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
790 * that the state updated in the loop outside of this block is that in
791 * *_set_prim or intel_batchbuffer_flush(), which only impacts
792 * brw->ctx.NewDriverState.
793 */
794 if (brw->ctx.NewDriverState) {
795 brw->batch.no_wrap = true;
796 brw_upload_render_state(brw);
797 }
798
799 brw_emit_prim(brw, prim, brw->primitive, xfb_obj, stream);
800
801 brw->batch.no_wrap = false;
802
803 if (!brw_batch_has_aperture_space(brw, 0)) {
804 if (!fail_next) {
805 intel_batchbuffer_reset_to_saved(brw);
806 intel_batchbuffer_flush(brw);
807 fail_next = true;
808 goto retry;
809 } else {
810 int ret = intel_batchbuffer_flush(brw);
811 WARN_ONCE(ret == -ENOSPC,
812 "i965: Single primitive emit exceeded "
813 "available aperture space\n");
814 }
815 }
816
817 /* Now that we know we haven't run out of aperture space, we can safely
818 * reset the dirty bits.
819 */
820 if (brw->ctx.NewDriverState)
821 brw_render_state_finished(brw);
822
823 return;
824 }
825
826 void
827 brw_draw_prims(struct gl_context *ctx,
828 const struct _mesa_prim *prims,
829 GLuint nr_prims,
830 const struct _mesa_index_buffer *ib,
831 GLboolean index_bounds_valid,
832 GLuint min_index,
833 GLuint max_index,
834 struct gl_transform_feedback_object *gl_xfb_obj,
835 unsigned stream,
836 struct gl_buffer_object *indirect)
837 {
838 unsigned i;
839 struct brw_context *brw = brw_context(ctx);
840 const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
841 int predicate_state = brw->predicate.state;
842 int combine_op = MI_PREDICATE_COMBINEOP_SET;
843 struct brw_transform_feedback_object *xfb_obj =
844 (struct brw_transform_feedback_object *) gl_xfb_obj;
845
846 if (!brw_check_conditional_render(brw))
847 return;
848
849 /* Handle primitive restart if needed */
850 if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib, indirect)) {
851 /* The draw was handled, so we can exit now */
852 return;
853 }
854
855 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
856 * won't support all the extensions we support.
857 */
858 if (ctx->RenderMode != GL_RENDER) {
859 perf_debug("%s render mode not supported in hardware\n",
860 _mesa_enum_to_string(ctx->RenderMode));
861 _swsetup_Wakeup(ctx);
862 _tnl_wakeup(ctx);
863 _tnl_draw_prims(ctx, prims, nr_prims, ib,
864 index_bounds_valid, min_index, max_index, NULL, 0, NULL);
865 return;
866 }
867
868 /* If we're going to have to upload any of the user's vertex arrays, then
869 * get the minimum and maximum of their index buffer so we know what range
870 * to upload.
871 */
872 if (!index_bounds_valid && !vbo_all_varyings_in_vbos(arrays)) {
873 perf_debug("Scanning index buffer to compute index buffer bounds. "
874 "Use glDrawRangeElements() to avoid this.\n");
875 vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
876 index_bounds_valid = true;
877 }
878
879 brw_prepare_drawing(ctx, arrays, ib, index_bounds_valid, min_index,
880 max_index);
881 /* Try drawing with the hardware, but don't do anything else if we can't
882 * manage it. swrast doesn't support our featureset, so we can't fall back
883 * to it.
884 */
885
886 if (brw->draw.draw_params_count_bo &&
887 predicate_state == BRW_PREDICATE_STATE_USE_BIT) {
888 /* We need to empty the MI_PREDICATE_DATA register since it might
889 * already be set.
890 */
891
892 BEGIN_BATCH(4);
893 OUT_BATCH(MI_PREDICATE_DATA);
894 OUT_BATCH(0u);
895 OUT_BATCH(MI_PREDICATE_DATA + 4);
896 OUT_BATCH(0u);
897 ADVANCE_BATCH();
898
899 /* We need to combine the results of both predicates.*/
900 combine_op = MI_PREDICATE_COMBINEOP_AND;
901 }
902
903 for (i = 0; i < nr_prims; i++) {
904 /* Implementation of ARB_indirect_parameters via predicates */
905 if (brw->draw.draw_params_count_bo) {
906 struct brw_bo *draw_id_bo = NULL;
907 uint32_t draw_id_offset;
908
909 intel_upload_data(brw, &prims[i].draw_id, 4, 4, &draw_id_bo,
910 &draw_id_offset);
911
912 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_FLUSH_ENABLE);
913
914 brw_load_register_mem(brw, MI_PREDICATE_SRC0,
915 brw->draw.draw_params_count_bo,
916 brw->draw.draw_params_count_offset);
917 brw_load_register_mem(brw, MI_PREDICATE_SRC1, draw_id_bo,
918 draw_id_offset);
919
920 BEGIN_BATCH(1);
921 OUT_BATCH(GEN7_MI_PREDICATE |
922 MI_PREDICATE_LOADOP_LOADINV | combine_op |
923 MI_PREDICATE_COMPAREOP_DELTAS_EQUAL);
924 ADVANCE_BATCH();
925
926 brw->predicate.state = BRW_PREDICATE_STATE_USE_BIT;
927
928 brw_bo_unreference(draw_id_bo);
929 }
930
931 brw_draw_single_prim(ctx, arrays, &prims[i], i, xfb_obj, stream,
932 indirect);
933 }
934
935 brw_finish_drawing(ctx);
936 brw->predicate.state = predicate_state;
937 }
938
939 void
940 brw_draw_indirect_prims(struct gl_context *ctx,
941 GLuint mode,
942 struct gl_buffer_object *indirect_data,
943 GLsizeiptr indirect_offset,
944 unsigned draw_count,
945 unsigned stride,
946 struct gl_buffer_object *indirect_params,
947 GLsizeiptr indirect_params_offset,
948 const struct _mesa_index_buffer *ib)
949 {
950 struct brw_context *brw = brw_context(ctx);
951 struct _mesa_prim *prim;
952 GLsizei i;
953
954 prim = calloc(draw_count, sizeof(*prim));
955 if (prim == NULL) {
956 _mesa_error(ctx, GL_OUT_OF_MEMORY, "gl%sDraw%sIndirect%s",
957 (draw_count > 1) ? "Multi" : "",
958 ib ? "Elements" : "Arrays",
959 indirect_params ? "CountARB" : "");
960 return;
961 }
962
963 prim[0].begin = 1;
964 prim[draw_count - 1].end = 1;
965 for (i = 0; i < draw_count; ++i, indirect_offset += stride) {
966 prim[i].mode = mode;
967 prim[i].indexed = ib != NULL;
968 prim[i].indirect_offset = indirect_offset;
969 prim[i].is_indirect = 1;
970 prim[i].draw_id = i;
971 }
972
973 if (indirect_params) {
974 brw->draw.draw_params_count_bo =
975 intel_buffer_object(indirect_params)->buffer;
976 brw_bo_reference(brw->draw.draw_params_count_bo);
977 brw->draw.draw_params_count_offset = indirect_params_offset;
978 }
979
980 brw_draw_prims(ctx, prim, draw_count,
981 ib, false, 0, ~0,
982 NULL, 0,
983 indirect_data);
984
985 free(prim);
986 }
987
988 void
989 brw_draw_init(struct brw_context *brw)
990 {
991 struct gl_context *ctx = &brw->ctx;
992 struct vbo_context *vbo = vbo_context(ctx);
993
994 /* Register our drawing function:
995 */
996 vbo->draw_prims = brw_draw_prims;
997 vbo->draw_indirect_prims = brw_draw_indirect_prims;
998
999 for (int i = 0; i < VERT_ATTRIB_MAX; i++)
1000 brw->vb.inputs[i].buffer = -1;
1001 brw->vb.nr_buffers = 0;
1002 brw->vb.nr_enabled = 0;
1003 }
1004
1005 void
1006 brw_draw_destroy(struct brw_context *brw)
1007 {
1008 unsigned i;
1009
1010 for (i = 0; i < brw->vb.nr_buffers; i++) {
1011 brw_bo_unreference(brw->vb.buffers[i].bo);
1012 brw->vb.buffers[i].bo = NULL;
1013 }
1014 brw->vb.nr_buffers = 0;
1015
1016 for (i = 0; i < brw->vb.nr_enabled; i++) {
1017 brw->vb.enabled[i]->buffer = -1;
1018 }
1019 brw->vb.nr_enabled = 0;
1020
1021 brw_bo_unreference(brw->ib.bo);
1022 brw->ib.bo = NULL;
1023 }