7b33b76e75a5db77c488b8bdd30bb1759afad335
[mesa.git] / src / mesa / drivers / dri / i965 / brw_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <sys/errno.h>
29
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/condrender.h"
33 #include "main/samplerobj.h"
34 #include "main/state.h"
35 #include "main/enums.h"
36 #include "main/macros.h"
37 #include "main/transformfeedback.h"
38 #include "tnl/tnl.h"
39 #include "vbo/vbo_context.h"
40 #include "swrast/swrast.h"
41 #include "swrast_setup/swrast_setup.h"
42 #include "drivers/common/meta.h"
43
44 #include "brw_blorp.h"
45 #include "brw_draw.h"
46 #include "brw_defines.h"
47 #include "brw_context.h"
48 #include "brw_state.h"
49
50 #include "intel_batchbuffer.h"
51 #include "intel_fbo.h"
52 #include "intel_mipmap_tree.h"
53 #include "intel_regions.h"
54
55 #define FILE_DEBUG_FLAG DEBUG_PRIMS
56
57 const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
58 _3DPRIM_POINTLIST,
59 _3DPRIM_LINELIST,
60 _3DPRIM_LINELOOP,
61 _3DPRIM_LINESTRIP,
62 _3DPRIM_TRILIST,
63 _3DPRIM_TRISTRIP,
64 _3DPRIM_TRIFAN,
65 _3DPRIM_QUADLIST,
66 _3DPRIM_QUADSTRIP,
67 _3DPRIM_POLYGON,
68 _3DPRIM_LINELIST_ADJ,
69 _3DPRIM_LINESTRIP_ADJ,
70 _3DPRIM_TRILIST_ADJ,
71 _3DPRIM_TRISTRIP_ADJ,
72 };
73
74
75 static const GLenum reduced_prim[GL_POLYGON+1] = {
76 GL_POINTS,
77 GL_LINES,
78 GL_LINES,
79 GL_LINES,
80 GL_TRIANGLES,
81 GL_TRIANGLES,
82 GL_TRIANGLES,
83 GL_TRIANGLES,
84 GL_TRIANGLES,
85 GL_TRIANGLES
86 };
87
88
89 /* When the primitive changes, set a state bit and re-validate. Not
90 * the nicest and would rather deal with this by having all the
91 * programs be immune to the active primitive (ie. cope with all
92 * possibilities). That may not be realistic however.
93 */
94 static void brw_set_prim(struct brw_context *brw,
95 const struct _mesa_prim *prim)
96 {
97 struct gl_context *ctx = &brw->ctx;
98 uint32_t hw_prim = prim_to_hw_prim[prim->mode];
99
100 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
101
102 /* Slight optimization to avoid the GS program when not needed:
103 */
104 if (prim->mode == GL_QUAD_STRIP &&
105 ctx->Light.ShadeModel != GL_FLAT &&
106 ctx->Polygon.FrontMode == GL_FILL &&
107 ctx->Polygon.BackMode == GL_FILL)
108 hw_prim = _3DPRIM_TRISTRIP;
109
110 if (prim->mode == GL_QUADS && prim->count == 4 &&
111 ctx->Light.ShadeModel != GL_FLAT &&
112 ctx->Polygon.FrontMode == GL_FILL &&
113 ctx->Polygon.BackMode == GL_FILL) {
114 hw_prim = _3DPRIM_TRIFAN;
115 }
116
117 if (hw_prim != brw->primitive) {
118 brw->primitive = hw_prim;
119 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
120
121 if (reduced_prim[prim->mode] != brw->reduced_primitive) {
122 brw->reduced_primitive = reduced_prim[prim->mode];
123 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
124 }
125 }
126 }
127
128 static void gen6_set_prim(struct brw_context *brw,
129 const struct _mesa_prim *prim)
130 {
131 uint32_t hw_prim;
132
133 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
134
135 hw_prim = prim_to_hw_prim[prim->mode];
136
137 if (hw_prim != brw->primitive) {
138 brw->primitive = hw_prim;
139 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
140 }
141 }
142
143
144 /**
145 * The hardware is capable of removing dangling vertices on its own; however,
146 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
147 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
148 * This function manually trims dangling vertices from a draw call involving
149 * quads so that those dangling vertices won't get drawn when we convert to
150 * trifans/tristrips.
151 */
152 static GLuint trim(GLenum prim, GLuint length)
153 {
154 if (prim == GL_QUAD_STRIP)
155 return length > 3 ? (length - length % 2) : 0;
156 else if (prim == GL_QUADS)
157 return length - length % 4;
158 else
159 return length;
160 }
161
162
163 static void brw_emit_prim(struct brw_context *brw,
164 const struct _mesa_prim *prim,
165 uint32_t hw_prim)
166 {
167 int verts_per_instance;
168 int vertex_access_type;
169 int start_vertex_location;
170 int base_vertex_location;
171
172 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
173 prim->start, prim->count);
174
175 start_vertex_location = prim->start;
176 base_vertex_location = prim->basevertex;
177 if (prim->indexed) {
178 vertex_access_type = brw->gen >= 7 ?
179 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
180 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
181 start_vertex_location += brw->ib.start_vertex_offset;
182 base_vertex_location += brw->vb.start_vertex_bias;
183 } else {
184 vertex_access_type = brw->gen >= 7 ?
185 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL :
186 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
187 start_vertex_location += brw->vb.start_vertex_bias;
188 }
189
190 /* We only need to trim the primitive count on pre-Gen6. */
191 if (brw->gen < 6)
192 verts_per_instance = trim(prim->mode, prim->count);
193 else
194 verts_per_instance = prim->count;
195
196 /* If nothing to emit, just return. */
197 if (verts_per_instance == 0)
198 return;
199
200 /* If we're set to always flush, do it before and after the primitive emit.
201 * We want to catch both missed flushes that hurt instruction/state cache
202 * and missed flushes of the render cache as it heads to other parts of
203 * the besides the draw code.
204 */
205 if (brw->always_flush_cache) {
206 intel_batchbuffer_emit_mi_flush(brw);
207 }
208
209 if (brw->gen >= 7) {
210 BEGIN_BATCH(7);
211 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
212 OUT_BATCH(hw_prim | vertex_access_type);
213 } else {
214 BEGIN_BATCH(6);
215 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
216 hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
217 vertex_access_type);
218 }
219 OUT_BATCH(verts_per_instance);
220 OUT_BATCH(start_vertex_location);
221 OUT_BATCH(prim->num_instances);
222 OUT_BATCH(prim->base_instance);
223 OUT_BATCH(base_vertex_location);
224 ADVANCE_BATCH();
225
226 /* Only used on Sandybridge; harmless to set elsewhere. */
227 brw->batch.need_workaround_flush = true;
228
229 if (brw->always_flush_cache) {
230 intel_batchbuffer_emit_mi_flush(brw);
231 }
232 }
233
234
235 static void brw_merge_inputs( struct brw_context *brw,
236 const struct gl_client_array *arrays[])
237 {
238 GLuint i;
239
240 for (i = 0; i < brw->vb.nr_buffers; i++) {
241 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
242 brw->vb.buffers[i].bo = NULL;
243 }
244 brw->vb.nr_buffers = 0;
245
246 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
247 brw->vb.inputs[i].buffer = -1;
248 brw->vb.inputs[i].glarray = arrays[i];
249 brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
250 }
251 }
252
253 /*
254 * \brief Resolve buffers before drawing.
255 *
256 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each
257 * enabled depth texture.
258 *
259 * (In the future, this will also perform MSAA resolves).
260 */
261 static void
262 brw_predraw_resolve_buffers(struct brw_context *brw)
263 {
264 struct gl_context *ctx = &brw->ctx;
265 struct intel_renderbuffer *depth_irb;
266 struct intel_texture_object *tex_obj;
267
268 /* Resolve the depth buffer's HiZ buffer. */
269 depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
270 if (depth_irb)
271 intel_renderbuffer_resolve_hiz(brw, depth_irb);
272
273 /* Resolve depth buffer of each enabled depth texture, and color buffer of
274 * each fast-clear-enabled color texture.
275 */
276 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
277 if (!ctx->Texture.Unit[i]._ReallyEnabled)
278 continue;
279 tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
280 if (!tex_obj || !tex_obj->mt)
281 continue;
282 intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
283 intel_miptree_resolve_color(brw, tex_obj->mt);
284 }
285 }
286
287 /**
288 * \brief Call this after drawing to mark which buffers need resolving
289 *
290 * If the depth buffer was written to and if it has an accompanying HiZ
291 * buffer, then mark that it needs a depth resolve.
292 *
293 * If the color buffer is a multisample window system buffer, then
294 * mark that it needs a downsample.
295 */
296 static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
297 {
298 struct gl_context *ctx = &brw->ctx;
299 struct gl_framebuffer *fb = ctx->DrawBuffer;
300
301 struct intel_renderbuffer *front_irb = NULL;
302 struct intel_renderbuffer *back_irb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
303 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
304 struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
305
306 if (brw->is_front_buffer_rendering)
307 front_irb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
308
309 if (front_irb)
310 intel_renderbuffer_set_needs_downsample(front_irb);
311 if (back_irb)
312 intel_renderbuffer_set_needs_downsample(back_irb);
313 if (depth_irb && ctx->Depth.Mask)
314 intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
315 }
316
317 /* May fail if out of video memory for texture or vbo upload, or on
318 * fallback conditions.
319 */
320 static bool brw_try_draw_prims( struct gl_context *ctx,
321 const struct gl_client_array *arrays[],
322 const struct _mesa_prim *prims,
323 GLuint nr_prims,
324 const struct _mesa_index_buffer *ib,
325 GLuint min_index,
326 GLuint max_index )
327 {
328 struct brw_context *brw = brw_context(ctx);
329 bool retval = true;
330 GLuint i;
331 bool fail_next = false;
332
333 if (ctx->NewState)
334 _mesa_update_state( ctx );
335
336 /* Find the highest sampler unit used by each shader program. A bit-count
337 * won't work since ARB programs use the texture unit number as the sampler
338 * index.
339 */
340 brw->wm.base.sampler_count =
341 _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
342 brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
343 _mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
344 brw->vs.base.sampler_count =
345 _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);
346
347 /* We have to validate the textures *before* checking for fallbacks;
348 * otherwise, the software fallback won't be able to rely on the
349 * texture state, the firstLevel and lastLevel fields won't be
350 * set in the intel texture object (they'll both be 0), and the
351 * software fallback will segfault if it attempts to access any
352 * texture level other than level 0.
353 */
354 brw_validate_textures( brw );
355
356 intel_prepare_render(brw);
357
358 /* This workaround has to happen outside of brw_upload_state() because it
359 * may flush the batchbuffer for a blit, affecting the state flags.
360 */
361 brw_workaround_depthstencil_alignment(brw, 0);
362
363 /* Resolves must occur after updating renderbuffers, updating context state,
364 * and finalizing textures but before setting up any hardware state for
365 * this draw call.
366 */
367 brw_predraw_resolve_buffers(brw);
368
369 /* Bind all inputs, derive varying and size information:
370 */
371 brw_merge_inputs( brw, arrays );
372
373 brw->ib.ib = ib;
374 brw->state.dirty.brw |= BRW_NEW_INDICES;
375
376 brw->vb.min_index = min_index;
377 brw->vb.max_index = max_index;
378 brw->state.dirty.brw |= BRW_NEW_VERTICES;
379
380 for (i = 0; i < nr_prims; i++) {
381 int estimated_max_prim_size;
382
383 estimated_max_prim_size = 512; /* batchbuffer commands */
384 estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
385 (sizeof(struct brw_sampler_state) +
386 sizeof(struct gen5_sampler_default_color)));
387 estimated_max_prim_size += 1024; /* gen6 VS push constants */
388 estimated_max_prim_size += 1024; /* gen6 WM push constants */
389 estimated_max_prim_size += 512; /* misc. pad */
390
391 /* Flush the batch if it's approaching full, so that we don't wrap while
392 * we've got validated state that needs to be in the same batch as the
393 * primitives.
394 */
395 intel_batchbuffer_require_space(brw, estimated_max_prim_size, false);
396 intel_batchbuffer_save_state(brw);
397
398 if (brw->num_instances != prims[i].num_instances) {
399 brw->num_instances = prims[i].num_instances;
400 brw->state.dirty.brw |= BRW_NEW_VERTICES;
401 brw_merge_inputs(brw, arrays);
402 }
403 if (brw->basevertex != prims[i].basevertex) {
404 brw->basevertex = prims[i].basevertex;
405 brw->state.dirty.brw |= BRW_NEW_VERTICES;
406 brw_merge_inputs(brw, arrays);
407 }
408 if (brw->gen < 6)
409 brw_set_prim(brw, &prims[i]);
410 else
411 gen6_set_prim(brw, &prims[i]);
412
413 retry:
414 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
415 * that the state updated in the loop outside of this block is that in
416 * *_set_prim or intel_batchbuffer_flush(), which only impacts
417 * brw->state.dirty.brw.
418 */
419 if (brw->state.dirty.brw) {
420 brw->no_batch_wrap = true;
421 brw_upload_state(brw);
422 }
423
424 brw_emit_prim(brw, &prims[i], brw->primitive);
425
426 brw->no_batch_wrap = false;
427
428 if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
429 if (!fail_next) {
430 intel_batchbuffer_reset_to_saved(brw);
431 intel_batchbuffer_flush(brw);
432 fail_next = true;
433 goto retry;
434 } else {
435 if (intel_batchbuffer_flush(brw) == -ENOSPC) {
436 static bool warned = false;
437
438 if (!warned) {
439 fprintf(stderr, "i965: Single primitive emit exceeded"
440 "available aperture space\n");
441 warned = true;
442 }
443
444 retval = false;
445 }
446 }
447 }
448 }
449
450 if (brw->always_flush_batch)
451 intel_batchbuffer_flush(brw);
452
453 brw_state_cache_check_size(brw);
454 brw_postdraw_set_buffers_need_resolve(brw);
455
456 return retval;
457 }
458
459 void brw_draw_prims( struct gl_context *ctx,
460 const struct _mesa_prim *prims,
461 GLuint nr_prims,
462 const struct _mesa_index_buffer *ib,
463 GLboolean index_bounds_valid,
464 GLuint min_index,
465 GLuint max_index,
466 struct gl_transform_feedback_object *unused_tfb_object)
467 {
468 struct brw_context *brw = brw_context(ctx);
469 const struct gl_client_array **arrays = ctx->Array._DrawArrays;
470
471 assert(unused_tfb_object == NULL);
472
473 if (!_mesa_check_conditional_render(ctx))
474 return;
475
476 /* Handle primitive restart if needed */
477 if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib)) {
478 /* The draw was handled, so we can exit now */
479 return;
480 }
481
482 /* If we're going to have to upload any of the user's vertex arrays, then
483 * get the minimum and maximum of their index buffer so we know what range
484 * to upload.
485 */
486 if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid) {
487 perf_debug("Scanning index buffer to compute index buffer bounds. "
488 "Use glDrawRangeElements() to avoid this.\n");
489 vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
490 }
491
492 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
493 * won't support all the extensions we support.
494 */
495 if (ctx->RenderMode != GL_RENDER) {
496 perf_debug("%s render mode not supported in hardware\n",
497 _mesa_lookup_enum_by_nr(ctx->RenderMode));
498 _swsetup_Wakeup(ctx);
499 _tnl_wakeup(ctx);
500 _tnl_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
501 return;
502 }
503
504 /* Try drawing with the hardware, but don't do anything else if we can't
505 * manage it. swrast doesn't support our featureset, so we can't fall back
506 * to it.
507 */
508 brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
509 }
510
511 void brw_draw_init( struct brw_context *brw )
512 {
513 struct gl_context *ctx = &brw->ctx;
514 struct vbo_context *vbo = vbo_context(ctx);
515 int i;
516
517 /* Register our drawing function:
518 */
519 vbo->draw_prims = brw_draw_prims;
520
521 for (i = 0; i < VERT_ATTRIB_MAX; i++)
522 brw->vb.inputs[i].buffer = -1;
523 brw->vb.nr_buffers = 0;
524 brw->vb.nr_enabled = 0;
525 }
526
527 void brw_draw_destroy( struct brw_context *brw )
528 {
529 int i;
530
531 for (i = 0; i < brw->vb.nr_buffers; i++) {
532 drm_intel_bo_unreference(brw->vb.buffers[i].bo);
533 brw->vb.buffers[i].bo = NULL;
534 }
535 brw->vb.nr_buffers = 0;
536
537 for (i = 0; i < brw->vb.nr_enabled; i++) {
538 brw->vb.enabled[i]->buffer = -1;
539 }
540 brw->vb.nr_enabled = 0;
541
542 drm_intel_bo_unreference(brw->ib.bo);
543 brw->ib.bo = NULL;
544 }