mesa: Move RasterDiscard to toplevel of gl_context.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <sys/errno.h>
29
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/condrender.h"
33 #include "main/samplerobj.h"
34 #include "main/state.h"
35 #include "main/enums.h"
36 #include "main/macros.h"
37 #include "tnl/tnl.h"
38 #include "vbo/vbo_context.h"
39 #include "swrast/swrast.h"
40 #include "swrast_setup/swrast_setup.h"
41 #include "drivers/common/meta.h"
42
43 #include "brw_draw.h"
44 #include "brw_defines.h"
45 #include "brw_context.h"
46 #include "brw_state.h"
47
48 #include "intel_batchbuffer.h"
49 #include "intel_fbo.h"
50 #include "intel_mipmap_tree.h"
51 #include "intel_regions.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_PRIMS
54
55 static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
56 _3DPRIM_POINTLIST,
57 _3DPRIM_LINELIST,
58 _3DPRIM_LINELOOP,
59 _3DPRIM_LINESTRIP,
60 _3DPRIM_TRILIST,
61 _3DPRIM_TRISTRIP,
62 _3DPRIM_TRIFAN,
63 _3DPRIM_QUADLIST,
64 _3DPRIM_QUADSTRIP,
65 _3DPRIM_POLYGON
66 };
67
68
69 static const GLenum reduced_prim[GL_POLYGON+1] = {
70 GL_POINTS,
71 GL_LINES,
72 GL_LINES,
73 GL_LINES,
74 GL_TRIANGLES,
75 GL_TRIANGLES,
76 GL_TRIANGLES,
77 GL_TRIANGLES,
78 GL_TRIANGLES,
79 GL_TRIANGLES
80 };
81
82
83 /* When the primitive changes, set a state bit and re-validate. Not
84 * the nicest and would rather deal with this by having all the
85 * programs be immune to the active primitive (ie. cope with all
86 * possibilities). That may not be realistic however.
87 */
88 static void brw_set_prim(struct brw_context *brw,
89 const struct _mesa_prim *prim)
90 {
91 struct gl_context *ctx = &brw->intel.ctx;
92 uint32_t hw_prim = prim_to_hw_prim[prim->mode];
93
94 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
95
96 /* Slight optimization to avoid the GS program when not needed:
97 */
98 if (prim->mode == GL_QUAD_STRIP &&
99 ctx->Light.ShadeModel != GL_FLAT &&
100 ctx->Polygon.FrontMode == GL_FILL &&
101 ctx->Polygon.BackMode == GL_FILL)
102 hw_prim = _3DPRIM_TRISTRIP;
103
104 if (prim->mode == GL_QUADS && prim->count == 4 &&
105 ctx->Light.ShadeModel != GL_FLAT &&
106 ctx->Polygon.FrontMode == GL_FILL &&
107 ctx->Polygon.BackMode == GL_FILL) {
108 hw_prim = _3DPRIM_TRIFAN;
109 }
110
111 if (hw_prim != brw->primitive) {
112 brw->primitive = hw_prim;
113 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
114
115 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) {
116 brw->intel.reduced_primitive = reduced_prim[prim->mode];
117 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
118 }
119 }
120 }
121
122 static void gen6_set_prim(struct brw_context *brw,
123 const struct _mesa_prim *prim)
124 {
125 uint32_t hw_prim;
126
127 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
128
129 if (brw->hiz.op) {
130 assert(prim->mode == GL_TRIANGLES);
131 hw_prim = _3DPRIM_RECTLIST;
132 } else {
133 hw_prim = prim_to_hw_prim[prim->mode];
134 }
135
136 if (hw_prim != brw->primitive) {
137 brw->primitive = hw_prim;
138 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
139 }
140 }
141
142
143 static GLuint trim(GLenum prim, GLuint length)
144 {
145 if (prim == GL_QUAD_STRIP)
146 return length > 3 ? (length - length % 2) : 0;
147 else if (prim == GL_QUADS)
148 return length - length % 4;
149 else
150 return length;
151 }
152
153
154 static void brw_emit_prim(struct brw_context *brw,
155 const struct _mesa_prim *prim,
156 uint32_t hw_prim)
157 {
158 struct intel_context *intel = &brw->intel;
159 int verts_per_instance;
160 int vertex_access_type;
161 int start_vertex_location;
162 int base_vertex_location;
163
164 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
165 prim->start, prim->count);
166
167 start_vertex_location = prim->start;
168 base_vertex_location = prim->basevertex;
169 if (prim->indexed) {
170 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
171 start_vertex_location += brw->ib.start_vertex_offset;
172 base_vertex_location += brw->vb.start_vertex_bias;
173 } else {
174 vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
175 start_vertex_location += brw->vb.start_vertex_bias;
176 }
177
178 verts_per_instance = trim(prim->mode, prim->count);
179
180 /* If nothing to emit, just return. */
181 if (verts_per_instance == 0)
182 return;
183
184 /* If we're set to always flush, do it before and after the primitive emit.
185 * We want to catch both missed flushes that hurt instruction/state cache
186 * and missed flushes of the render cache as it heads to other parts of
187 * the besides the draw code.
188 */
189 if (intel->always_flush_cache) {
190 intel_batchbuffer_emit_mi_flush(intel);
191 }
192
193 BEGIN_BATCH(6);
194 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
195 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
196 vertex_access_type);
197 OUT_BATCH(verts_per_instance);
198 OUT_BATCH(start_vertex_location);
199 OUT_BATCH(1); // instance count
200 OUT_BATCH(0); // start instance location
201 OUT_BATCH(base_vertex_location);
202 ADVANCE_BATCH();
203
204 intel->batch.need_workaround_flush = true;
205
206 if (intel->always_flush_cache) {
207 intel_batchbuffer_emit_mi_flush(intel);
208 }
209 }
210
211 static void gen7_emit_prim(struct brw_context *brw,
212 const struct _mesa_prim *prim,
213 uint32_t hw_prim)
214 {
215 struct intel_context *intel = &brw->intel;
216 int verts_per_instance;
217 int vertex_access_type;
218 int start_vertex_location;
219 int base_vertex_location;
220
221 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
222 prim->start, prim->count);
223
224 start_vertex_location = prim->start;
225 base_vertex_location = prim->basevertex;
226 if (prim->indexed) {
227 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
228 start_vertex_location += brw->ib.start_vertex_offset;
229 base_vertex_location += brw->vb.start_vertex_bias;
230 } else {
231 vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
232 start_vertex_location += brw->vb.start_vertex_bias;
233 }
234
235 verts_per_instance = trim(prim->mode, prim->count);
236
237 /* If nothing to emit, just return. */
238 if (verts_per_instance == 0)
239 return;
240
241 /* If we're set to always flush, do it before and after the primitive emit.
242 * We want to catch both missed flushes that hurt instruction/state cache
243 * and missed flushes of the render cache as it heads to other parts of
244 * the besides the draw code.
245 */
246 if (intel->always_flush_cache) {
247 intel_batchbuffer_emit_mi_flush(intel);
248 }
249
250 BEGIN_BATCH(7);
251 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
252 OUT_BATCH(hw_prim | vertex_access_type);
253 OUT_BATCH(verts_per_instance);
254 OUT_BATCH(start_vertex_location);
255 OUT_BATCH(1); // instance count
256 OUT_BATCH(0); // start instance location
257 OUT_BATCH(base_vertex_location);
258 ADVANCE_BATCH();
259
260 if (intel->always_flush_cache) {
261 intel_batchbuffer_emit_mi_flush(intel);
262 }
263 }
264
265
266 static void brw_merge_inputs( struct brw_context *brw,
267 const struct gl_client_array *arrays[])
268 {
269 struct brw_vertex_info old = brw->vb.info;
270 GLuint i;
271
272 for (i = 0; i < brw->vb.nr_buffers; i++) {
273 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
274 brw->vb.buffers[i].bo = NULL;
275 }
276 brw->vb.nr_buffers = 0;
277
278 memset(&brw->vb.info, 0, sizeof(brw->vb.info));
279
280 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
281 brw->vb.inputs[i].buffer = -1;
282 brw->vb.inputs[i].glarray = arrays[i];
283 brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
284
285 if (arrays[i]->StrideB != 0)
286 brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
287 ((i%16) * 2);
288 }
289
290 /* Raise statechanges if input sizes have changed. */
291 if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
292 brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
293 }
294
295 /*
296 * \brief Resolve buffers before drawing.
297 *
298 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each
299 * enabled depth texture.
300 *
301 * (In the future, this will also perform MSAA resolves).
302 */
303 static void
304 brw_predraw_resolve_buffers(struct brw_context *brw)
305 {
306 struct gl_context *ctx = &brw->intel.ctx;
307 struct intel_context *intel = &brw->intel;
308 struct intel_renderbuffer *depth_irb;
309 struct intel_texture_object *tex_obj;
310 bool did_resolve = false;
311
312 /* Avoid recursive HiZ op. */
313 if (brw->hiz.op) {
314 return;
315 }
316
317 /* Resolve the depth buffer's HiZ buffer. */
318 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
319 if (depth_irb && depth_irb->mt) {
320 did_resolve |= intel_renderbuffer_resolve_hiz(intel, depth_irb);
321 }
322
323 /* Resolve depth buffer of each enabled depth texture. */
324 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
325 if (!ctx->Texture.Unit[i]._ReallyEnabled)
326 continue;
327 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
328 if (!tex_obj || !tex_obj->mt)
329 continue;
330 did_resolve |= intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
331 }
332
333 if (did_resolve) {
334 /* Call vbo_bind_array() to synchronize the vbo module's vertex
335 * attributes to the gl_context's.
336 *
337 * Details
338 * -------
339 * The vbo module tracks vertex attributes separately from the
340 * gl_context. Specifically, the vbo module maintins vertex attributes
341 * in vbo_exec_context::array::inputs, which is synchronized with
342 * gl_context::Array::ArrayObj::VertexAttrib by vbo_bind_array().
343 * vbo_draw_arrays() calls vbo_bind_array() to perform the
344 * synchronization before calling the real draw call,
345 * vbo_context::draw_arrays.
346 *
347 * At this point (after performing a resolve meta-op but before calling
348 * vbo_bind_array), the gl_context's vertex attributes have been
349 * restored to their original state (that is, their state before the
350 * meta-op began), but the vbo module's vertex attribute are those used
351 * in the last meta-op. Therefore we must manually synchronize the two with
352 * vbo_bind_array() before continuing with the original draw command.
353 */
354 _mesa_update_state(ctx);
355 vbo_bind_arrays(ctx);
356 _mesa_update_state(ctx);
357 }
358 }
359
360 /**
361 * \brief Call this after drawing to mark which buffers need resolving
362 *
363 * If the depth buffer was written to and if it has an accompanying HiZ
364 * buffer, then mark that it needs a depth resolve.
365 *
366 * (In the future, this will also mark needed MSAA resolves).
367 */
368 static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
369 {
370 struct gl_context *ctx = &brw->intel.ctx;
371 struct gl_framebuffer *fb = ctx->DrawBuffer;
372 struct intel_renderbuffer *depth_irb =
373 intel_get_renderbuffer(fb, BUFFER_DEPTH);
374
375 if (depth_irb &&
376 ctx->Depth.Mask &&
377 !brw->hiz.op) {
378 intel_renderbuffer_set_needs_depth_resolve(depth_irb);
379 }
380 }
381
382 /**
383 * Update internal counters based on the the drawing operation described in
384 * prim.
385 */
386 static void
387 brw_update_primitive_count(struct brw_context *brw,
388 const struct _mesa_prim *prim)
389 {
390 uint32_t count = count_tessellated_primitives(prim);
391 brw->sol.primitives_generated += count;
392 if (brw->intel.ctx.TransformFeedback.CurrentObject->Active) {
393 /* Update brw->sol.svbi_0_max_index to reflect the amount by which the
394 * hardware is going to increment SVBI 0 when this drawing operation
395 * occurs. This is necessary because the kernel does not (yet) save and
396 * restore GPU registers when context switching, so we'll need to be
397 * able to reload SVBI 0 with the correct value in case we have to start
398 * a new batch buffer.
399 */
400 unsigned svbi_postincrement_value =
401 brw->gs.prog_data->svbi_postincrement_value;
402 uint32_t space_avail =
403 (brw->sol.svbi_0_max_index - brw->sol.svbi_0_starting_index)
404 / svbi_postincrement_value;
405 uint32_t primitives_written = MIN2 (space_avail, count);
406 brw->sol.svbi_0_starting_index +=
407 svbi_postincrement_value * primitives_written;
408
409 /* And update the TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN query. */
410 brw->sol.primitives_written += primitives_written;
411 }
412 }
413
414 /* May fail if out of video memory for texture or vbo upload, or on
415 * fallback conditions.
416 */
417 static bool brw_try_draw_prims( struct gl_context *ctx,
418 const struct gl_client_array *arrays[],
419 const struct _mesa_prim *prim,
420 GLuint nr_prims,
421 const struct _mesa_index_buffer *ib,
422 GLuint min_index,
423 GLuint max_index )
424 {
425 struct intel_context *intel = intel_context(ctx);
426 struct brw_context *brw = brw_context(ctx);
427 bool retval = true;
428 GLuint i;
429 bool fail_next = false;
430
431 if (ctx->NewState)
432 _mesa_update_state( ctx );
433
434 /* We have to validate the textures *before* checking for fallbacks;
435 * otherwise, the software fallback won't be able to rely on the
436 * texture state, the firstLevel and lastLevel fields won't be
437 * set in the intel texture object (they'll both be 0), and the
438 * software fallback will segfault if it attempts to access any
439 * texture level other than level 0.
440 */
441 brw_validate_textures( brw );
442
443 /* Resolves must occur after updating state and finalizing textures but
444 * before setting up any hardware state for this draw call.
445 */
446 brw_predraw_resolve_buffers(brw);
447
448 /* Bind all inputs, derive varying and size information:
449 */
450 brw_merge_inputs( brw, arrays );
451
452 brw->ib.ib = ib;
453 brw->state.dirty.brw |= BRW_NEW_INDICES;
454
455 brw->vb.min_index = min_index;
456 brw->vb.max_index = max_index;
457 brw->state.dirty.brw |= BRW_NEW_VERTICES;
458
459 /* Have to validate state quite late. Will rebuild tnl_program,
460 * which depends on varying information.
461 *
462 * Note this is where brw->vs->prog_data.inputs_read is calculated,
463 * so can't access it earlier.
464 */
465
466 intel_prepare_render(intel);
467
468 for (i = 0; i < nr_prims; i++) {
469 int estimated_max_prim_size;
470
471 estimated_max_prim_size = 512; /* batchbuffer commands */
472 estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
473 (sizeof(struct brw_sampler_state) +
474 sizeof(struct gen5_sampler_default_color)));
475 estimated_max_prim_size += 1024; /* gen6 VS push constants */
476 estimated_max_prim_size += 1024; /* gen6 WM push constants */
477 estimated_max_prim_size += 512; /* misc. pad */
478
479 /* Flush the batch if it's approaching full, so that we don't wrap while
480 * we've got validated state that needs to be in the same batch as the
481 * primitives.
482 */
483 intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
484 intel_batchbuffer_save_state(intel);
485
486 if (intel->gen < 6)
487 brw_set_prim(brw, &prim[i]);
488 else
489 gen6_set_prim(brw, &prim[i]);
490
491 retry:
492 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
493 * that the state updated in the loop outside of this block is that in
494 * *_set_prim or intel_batchbuffer_flush(), which only impacts
495 * brw->state.dirty.brw.
496 */
497 if (brw->state.dirty.brw) {
498 intel->no_batch_wrap = true;
499 brw_upload_state(brw);
500
501 if (unlikely(brw->intel.Fallback)) {
502 intel->no_batch_wrap = false;
503 retval = false;
504 goto out;
505 }
506 }
507
508 if (intel->gen >= 7)
509 gen7_emit_prim(brw, &prim[i], brw->primitive);
510 else
511 brw_emit_prim(brw, &prim[i], brw->primitive);
512
513 intel->no_batch_wrap = false;
514
515 if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
516 if (!fail_next) {
517 intel_batchbuffer_reset_to_saved(intel);
518 intel_batchbuffer_flush(intel);
519 fail_next = true;
520 goto retry;
521 } else {
522 if (intel_batchbuffer_flush(intel) == -ENOSPC) {
523 static bool warned = false;
524
525 if (!warned) {
526 fprintf(stderr, "i965: Single primitive emit exceeded"
527 "available aperture space\n");
528 warned = true;
529 }
530
531 retval = false;
532 }
533 }
534 }
535
536 if (!_mesa_meta_in_progress(ctx))
537 brw_update_primitive_count(brw, &prim[i]);
538 }
539
540 if (intel->always_flush_batch)
541 intel_batchbuffer_flush(intel);
542 out:
543
544 brw_state_cache_check_size(brw);
545 brw_postdraw_set_buffers_need_resolve(brw);
546
547 return retval;
548 }
549
550 void brw_draw_prims( struct gl_context *ctx,
551 const struct gl_client_array *arrays[],
552 const struct _mesa_prim *prim,
553 GLuint nr_prims,
554 const struct _mesa_index_buffer *ib,
555 GLboolean index_bounds_valid,
556 GLuint min_index,
557 GLuint max_index,
558 struct gl_transform_feedback_object *tfb_vertcount )
559 {
560 bool retval;
561
562 if (!_mesa_check_conditional_render(ctx))
563 return;
564
565 if (!vbo_all_varyings_in_vbos(arrays)) {
566 if (!index_bounds_valid)
567 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
568
569 /* Decide if we want to rebase. If so we end up recursing once
570 * only into this function.
571 */
572 if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) {
573 vbo_rebase_prims(ctx, arrays,
574 prim, nr_prims,
575 ib, min_index, max_index,
576 brw_draw_prims );
577 return;
578 }
579 }
580
581 /* Make a first attempt at drawing:
582 */
583 retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
584
585 /* Otherwise, we really are out of memory. Pass the drawing
586 * command to the software tnl module and which will in turn call
587 * swrast to do the drawing.
588 */
589 if (!retval) {
590 _swsetup_Wakeup(ctx);
591 _tnl_wakeup(ctx);
592 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
593 }
594
595 }
596
597 void brw_draw_init( struct brw_context *brw )
598 {
599 struct gl_context *ctx = &brw->intel.ctx;
600 struct vbo_context *vbo = vbo_context(ctx);
601 int i;
602
603 /* Register our drawing function:
604 */
605 vbo->draw_prims = brw_draw_prims;
606
607 for (i = 0; i < VERT_ATTRIB_MAX; i++)
608 brw->vb.inputs[i].buffer = -1;
609 brw->vb.nr_buffers = 0;
610 brw->vb.nr_enabled = 0;
611 }
612
613 void brw_draw_destroy( struct brw_context *brw )
614 {
615 int i;
616
617 for (i = 0; i < brw->vb.nr_buffers; i++) {
618 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
619 brw->vb.buffers[i].bo = NULL;
620 }
621 brw->vb.nr_buffers = 0;
622
623 for (i = 0; i < brw->vb.nr_enabled; i++) {
624 brw->vb.enabled[i]->buffer = -1;
625 }
626 brw->vb.nr_enabled = 0;
627
628 drm_intel_bo_unreference(brw->ib.bo);
629 brw->ib.bo = NULL;
630 }