Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / gen8_depth_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "intel_batchbuffer.h"
25 #include "intel_mipmap_tree.h"
26 #include "intel_fbo.h"
27 #include "intel_resolve_map.h"
28 #include "brw_context.h"
29 #include "brw_state.h"
30 #include "brw_defines.h"
31 #include "brw_wm.h"
32
33 /**
34 * Helper function to emit depth related command packets.
35 */
36 static void
37 emit_depth_packets(struct brw_context *brw,
38 struct intel_mipmap_tree *depth_mt,
39 uint32_t depthbuffer_format,
40 uint32_t depth_surface_type,
41 bool depth_writable,
42 struct intel_mipmap_tree *stencil_mt,
43 bool stencil_writable,
44 bool hiz,
45 uint32_t width,
46 uint32_t height,
47 uint32_t depth,
48 uint32_t lod,
49 uint32_t min_array_element)
50 {
51 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
52
53 /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
54 if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
55 assert(brw->hw_ctx);
56 return;
57 }
58
59 brw_emit_depth_stall_flushes(brw);
60
61 /* _NEW_BUFFERS, _NEW_DEPTH, _NEW_STENCIL */
62 BEGIN_BATCH(8);
63 OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (8 - 2));
64 OUT_BATCH(depth_surface_type << 29 |
65 (depth_writable ? (1 << 28) : 0) |
66 (stencil_mt != NULL && stencil_writable) << 27 |
67 (hiz ? 1 : 0) << 22 |
68 depthbuffer_format << 18 |
69 (depth_mt ? depth_mt->pitch - 1 : 0));
70 if (depth_mt) {
71 OUT_RELOC64(depth_mt->bo,
72 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
73 } else {
74 OUT_BATCH(0);
75 OUT_BATCH(0);
76 }
77 OUT_BATCH(((width - 1) << 4) | ((height - 1) << 18) | lod);
78 OUT_BATCH(((depth - 1) << 21) | (min_array_element << 10) | mocs_wb);
79 OUT_BATCH(0);
80 OUT_BATCH(((depth - 1) << 21) | (depth_mt ? depth_mt->qpitch >> 2 : 0));
81 ADVANCE_BATCH();
82
83 if (!hiz) {
84 BEGIN_BATCH(5);
85 OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
86 OUT_BATCH(0);
87 OUT_BATCH(0);
88 OUT_BATCH(0);
89 OUT_BATCH(0);
90 ADVANCE_BATCH();
91 } else {
92 BEGIN_BATCH(5);
93 OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
94 OUT_BATCH((depth_mt->hiz_buf->pitch - 1) | mocs_wb << 25);
95 OUT_RELOC64(depth_mt->hiz_buf->bo,
96 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
97 OUT_BATCH(depth_mt->hiz_buf->qpitch >> 2);
98 ADVANCE_BATCH();
99 }
100
101 if (stencil_mt == NULL) {
102 BEGIN_BATCH(5);
103 OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
104 OUT_BATCH(0);
105 OUT_BATCH(0);
106 OUT_BATCH(0);
107 OUT_BATCH(0);
108 ADVANCE_BATCH();
109 } else {
110 BEGIN_BATCH(5);
111 OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
112 /* The stencil buffer has quirky pitch requirements. From the Graphics
113 * BSpec: vol2a.11 3D Pipeline Windower > Early Depth/Stencil Processing
114 * > Depth/Stencil Buffer State > 3DSTATE_STENCIL_BUFFER [DevIVB+],
115 * field "Surface Pitch":
116 *
117 * The pitch must be set to 2x the value computed based on width, as
118 * the stencil buffer is stored with two rows interleaved.
119 *
120 * (Note that it is not 100% clear whether this intended to apply to
121 * Gen7; the BSpec flags this comment as "DevILK,DevSNB" (which would
122 * imply that it doesn't), however the comment appears on a "DevIVB+"
123 * page (which would imply that it does). Experiments with the hardware
124 * indicate that it does.
125 */
126 OUT_BATCH(HSW_STENCIL_ENABLED | mocs_wb << 22 |
127 (2 * stencil_mt->pitch - 1));
128 OUT_RELOC64(stencil_mt->bo,
129 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
130 OUT_BATCH(stencil_mt ? stencil_mt->qpitch >> 2 : 0);
131 ADVANCE_BATCH();
132 }
133
134 BEGIN_BATCH(3);
135 OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
136 OUT_BATCH(depth_mt ? depth_mt->depth_clear_value : 0);
137 OUT_BATCH(1);
138 ADVANCE_BATCH();
139
140 brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
141 }
142
143 /* Awful vtable-compatible function; should be cleaned up in the future. */
144 void
145 gen8_emit_depth_stencil_hiz(struct brw_context *brw,
146 struct intel_mipmap_tree *depth_mt,
147 uint32_t depth_offset,
148 uint32_t depthbuffer_format,
149 uint32_t depth_surface_type,
150 struct intel_mipmap_tree *stencil_mt,
151 bool hiz, bool separate_stencil,
152 uint32_t width, uint32_t height,
153 uint32_t tile_x, uint32_t tile_y)
154 {
155 struct gl_context *ctx = &brw->ctx;
156 struct gl_framebuffer *fb = ctx->DrawBuffer;
157 uint32_t surftype;
158 unsigned int depth = 1;
159 unsigned int min_array_element;
160 GLenum gl_target = GL_TEXTURE_2D;
161 unsigned int lod;
162 const struct intel_mipmap_tree *mt = depth_mt ? depth_mt : stencil_mt;
163 const struct intel_renderbuffer *irb = NULL;
164 const struct gl_renderbuffer *rb = NULL;
165
166 irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
167 if (!irb)
168 irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
169 rb = (struct gl_renderbuffer *) irb;
170
171 if (rb) {
172 depth = MAX2(irb->layer_count, 1);
173 if (rb->TexImage)
174 gl_target = rb->TexImage->TexObject->Target;
175 }
176
177 switch (gl_target) {
178 case GL_TEXTURE_CUBE_MAP_ARRAY:
179 case GL_TEXTURE_CUBE_MAP:
180 /* The PRM claims that we should use BRW_SURFACE_CUBE for this
181 * situation, but experiments show that gl_Layer doesn't work when we do
182 * this. So we use BRW_SURFACE_2D, since for rendering purposes this is
183 * equivalent.
184 */
185 surftype = BRW_SURFACE_2D;
186 depth *= 6;
187 break;
188 case GL_TEXTURE_3D:
189 assert(mt);
190 depth = MAX2(mt->logical_depth0, 1);
191 surftype = translate_tex_target(gl_target);
192 break;
193 case GL_TEXTURE_1D_ARRAY:
194 case GL_TEXTURE_1D:
195 if (brw->gen >= 9) {
196 /* WaDisable1DDepthStencil. Skylake+ doesn't support 1D depth
197 * textures but it does allow pretending it's a 2D texture
198 * instead.
199 */
200 surftype = BRW_SURFACE_2D;
201 break;
202 }
203 /* fallthrough */
204 default:
205 surftype = translate_tex_target(gl_target);
206 break;
207 }
208
209 min_array_element = irb ? irb->mt_layer : 0;
210
211 lod = irb ? irb->mt_level - irb->mt->first_level : 0;
212
213 if (mt) {
214 width = mt->logical_width0;
215 height = mt->logical_height0;
216 }
217
218 emit_depth_packets(brw, depth_mt, brw_depthbuffer_format(brw), surftype,
219 ctx->Depth.Mask != 0,
220 stencil_mt, ctx->Stencil._WriteEnabled,
221 hiz, width, height, depth, lod, min_array_element);
222 }
223
224 /**
225 * Should we set the PMA FIX ENABLE bit?
226 *
227 * To avoid unnecessary depth related stalls, we need to set this bit.
228 * However, there is a very complicated formula which governs when it
229 * is legal to do so. This function computes that.
230 *
231 * See the documenation for the CACHE_MODE_1 register, bit 11.
232 */
233 static bool
234 pma_fix_enable(const struct brw_context *brw)
235 {
236 const struct gl_context *ctx = &brw->ctx;
237 /* _NEW_BUFFERS */
238 struct intel_renderbuffer *depth_irb =
239 intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
240
241 /* 3DSTATE_WM::ForceThreadDispatch is never used. */
242 const bool wm_force_thread_dispatch = false;
243
244 /* 3DSTATE_RASTER::ForceSampleCount is never used. */
245 const bool raster_force_sample_count_nonzero = false;
246
247 /* _NEW_BUFFERS:
248 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
249 * 3DSTATE_DEPTH_BUFFER::HIZ Enable
250 */
251 const bool hiz_enabled = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
252
253 /* BRW_NEW_FS_PROG_DATA:
254 * 3DSTATE_WM::Early Depth/Stencil Control != EDSC_PREPS (2).
255 */
256 const bool edsc_not_preps = !brw->wm.prog_data->early_fragment_tests;
257
258 /* 3DSTATE_PS_EXTRA::PixelShaderValid is always true. */
259 const bool pixel_shader_valid = true;
260
261 /* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
262 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
263 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
264 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
265 *
266 * HiZ operations are done outside of the normal state upload, so they're
267 * definitely not happening now.
268 */
269 const bool in_hiz_op = false;
270
271 /* _NEW_DEPTH:
272 * DEPTH_STENCIL_STATE::DepthTestEnable
273 */
274 const bool depth_test_enabled = depth_irb && ctx->Depth.Test;
275
276 /* _NEW_DEPTH:
277 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
278 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE.
279 */
280 const bool depth_writes_enabled = ctx->Depth.Mask;
281
282 /* _NEW_STENCIL:
283 * !DEPTH_STENCIL_STATE::Stencil Buffer Write Enable ||
284 * !3DSTATE_DEPTH_BUFFER::Stencil Buffer Enable ||
285 * !3DSTATE_STENCIL_BUFFER::Stencil Buffer Enable
286 */
287 const bool stencil_writes_enabled = ctx->Stencil._WriteEnabled;
288
289 /* BRW_NEW_FS_PROG_DATA:
290 * 3DSTATE_PS_EXTRA::Pixel Shader Computed Depth Mode != PSCDEPTH_OFF
291 */
292 const bool ps_computes_depth =
293 brw->wm.prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF;
294
295 /* BRW_NEW_FS_PROG_DATA: 3DSTATE_PS_EXTRA::PixelShaderKillsPixels
296 * BRW_NEW_FS_PROG_DATA: 3DSTATE_PS_EXTRA::oMask Present to RenderTarget
297 * _NEW_MULTISAMPLE: 3DSTATE_PS_BLEND::AlphaToCoverageEnable
298 * _NEW_COLOR: 3DSTATE_PS_BLEND::AlphaTestEnable
299 *
300 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false.
301 * 3DSTATE_WM::ForceKillPix != ForceOff is always true.
302 */
303 const bool kill_pixel =
304 brw->wm.prog_data->uses_kill ||
305 brw->wm.prog_data->uses_omask ||
306 (ctx->Multisample._Enabled && ctx->Multisample.SampleAlphaToCoverage) ||
307 ctx->Color.AlphaEnabled;
308
309 /* The big formula in CACHE_MODE_1::NP PMA FIX ENABLE. */
310 return !wm_force_thread_dispatch &&
311 !raster_force_sample_count_nonzero &&
312 hiz_enabled &&
313 edsc_not_preps &&
314 pixel_shader_valid &&
315 !in_hiz_op &&
316 depth_test_enabled &&
317 (ps_computes_depth ||
318 (kill_pixel && (depth_writes_enabled || stencil_writes_enabled)));
319 }
320
321 static void
322 write_pma_stall_bits(struct brw_context *brw, uint32_t pma_stall_bits)
323 {
324 struct gl_context *ctx = &brw->ctx;
325
326 /* If we haven't actually changed the value, bail now to avoid unnecessary
327 * pipeline stalls and register writes.
328 */
329 if (brw->pma_stall_bits == pma_stall_bits)
330 return;
331
332 brw->pma_stall_bits = pma_stall_bits;
333
334 /* According to the PIPE_CONTROL documentation, software should emit a
335 * PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set prior
336 * to the LRI. If stencil buffer writes are enabled, then a Render Cache
337 * Flush is also necessary.
338 */
339 const uint32_t render_cache_flush =
340 ctx->Stencil._WriteEnabled ? PIPE_CONTROL_RENDER_TARGET_FLUSH : 0;
341 brw_emit_pipe_control_flush(brw,
342 PIPE_CONTROL_CS_STALL |
343 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
344 render_cache_flush);
345
346 /* CACHE_MODE_1 is a non-privileged register. */
347 BEGIN_BATCH(3);
348 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
349 OUT_BATCH(GEN7_CACHE_MODE_1);
350 OUT_BATCH(GEN8_HIZ_PMA_MASK_BITS | pma_stall_bits);
351 ADVANCE_BATCH();
352
353 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
354 * Flush bits is often necessary. We do it regardless because it's easier.
355 * The render cache flush is also necessary if stencil writes are enabled.
356 */
357 brw_emit_pipe_control_flush(brw,
358 PIPE_CONTROL_DEPTH_STALL |
359 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
360 render_cache_flush);
361
362 }
363
364 static void
365 gen8_emit_pma_stall_workaround(struct brw_context *brw)
366 {
367 uint32_t bits = 0;
368
369 if (brw->gen >= 9)
370 return;
371
372 if (pma_fix_enable(brw))
373 bits |= GEN8_HIZ_NP_PMA_FIX_ENABLE | GEN8_HIZ_NP_EARLY_Z_FAILS_DISABLE;
374
375 write_pma_stall_bits(brw, bits);
376 }
377
378 const struct brw_tracked_state gen8_pma_fix = {
379 .dirty = {
380 .mesa = _NEW_BUFFERS |
381 _NEW_COLOR |
382 _NEW_DEPTH |
383 _NEW_MULTISAMPLE |
384 _NEW_STENCIL,
385 .brw = BRW_NEW_FS_PROG_DATA,
386 },
387 .emit = gen8_emit_pma_stall_workaround
388 };
389
390 /**
391 * Emit packets to perform a depth/HiZ resolve or fast depth/stencil clear.
392 *
393 * See the "Optimized Depth Buffer Clear and/or Stencil Buffer Clear" section
394 * of the hardware documentation for details.
395 */
396 void
397 gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
398 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
399 {
400 if (op == GEN6_HIZ_OP_NONE)
401 return;
402
403 /* Disable the PMA stall fix since we're about to do a HiZ operation. */
404 if (brw->gen == 8)
405 write_pma_stall_bits(brw, 0);
406
407 assert(mt->first_level == 0);
408 assert(mt->logical_depth0 >= 1);
409
410 /* If we're operating on LOD 0, align to 8x4 to meet the alignment
411 * requirements for most HiZ operations. Otherwise, use the actual size
412 * to allow the hardware to calculate the miplevel offsets correctly.
413 */
414 uint32_t surface_width = ALIGN(mt->logical_width0, level == 0 ? 8 : 1);
415 uint32_t surface_height = ALIGN(mt->logical_height0, level == 0 ? 4 : 1);
416
417 /* From the documentation for 3DSTATE_WM_HZ_OP: "3DSTATE_MULTISAMPLE packet
418 * must be used prior to this packet to change the Number of Multisamples.
419 * This packet must not be used to change Number of Multisamples in a
420 * rendering sequence."
421 */
422 if (brw->num_samples != mt->num_samples) {
423 gen8_emit_3dstate_multisample(brw, mt->num_samples);
424 brw->NewGLState |= _NEW_MULTISAMPLE;
425 }
426
427 /* The basic algorithm is:
428 * - If needed, emit 3DSTATE_{DEPTH,HIER_DEPTH,STENCIL}_BUFFER and
429 * 3DSTATE_CLEAR_PARAMS packets to set up the relevant buffers.
430 * - If needed, emit 3DSTATE_DRAWING_RECTANGLE.
431 * - Emit 3DSTATE_WM_HZ_OP with a bit set for the particular operation.
432 * - Do a special PIPE_CONTROL to trigger an implicit rectangle primitive.
433 * - Emit 3DSTATE_WM_HZ_OP with no bits set to return to normal rendering.
434 */
435 emit_depth_packets(brw, mt,
436 brw_depth_format(brw, mt->format),
437 BRW_SURFACE_2D,
438 true, /* depth writes */
439 NULL, false, /* no stencil for now */
440 true, /* hiz */
441 surface_width,
442 surface_height,
443 mt->logical_depth0,
444 level,
445 layer); /* min_array_element */
446
447 /* Depth buffer clears and HiZ resolves must use an 8x4 aligned rectangle.
448 * Note that intel_miptree_level_enable_hiz disables HiZ for miplevels > 0
449 * which aren't 8x4 aligned, so expanding the size is safe - it'll just
450 * draw into empty padding space.
451 */
452 unsigned rect_width = ALIGN(minify(mt->logical_width0, level), 8);
453 unsigned rect_height = ALIGN(minify(mt->logical_height0, level), 4);
454
455 BEGIN_BATCH(4);
456 OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
457 OUT_BATCH(0);
458 OUT_BATCH(((rect_width - 1) & 0xffff) | ((rect_height - 1) << 16));
459 OUT_BATCH(0);
460 ADVANCE_BATCH();
461
462 /* Emit 3DSTATE_WM_HZ_OP to override pipeline state for the particular
463 * resolve or clear operation we want to perform.
464 */
465 uint32_t dw1 = 0;
466
467 switch (op) {
468 case GEN6_HIZ_OP_DEPTH_RESOLVE:
469 dw1 |= GEN8_WM_HZ_DEPTH_RESOLVE;
470 break;
471 case GEN6_HIZ_OP_HIZ_RESOLVE:
472 dw1 |= GEN8_WM_HZ_HIZ_RESOLVE;
473 break;
474 case GEN6_HIZ_OP_DEPTH_CLEAR:
475 dw1 |= GEN8_WM_HZ_DEPTH_CLEAR;
476 break;
477 case GEN6_HIZ_OP_NONE:
478 unreachable("Should not get here.");
479 }
480
481 if (mt->num_samples > 0)
482 dw1 |= SET_FIELD(ffs(mt->num_samples) - 1, GEN8_WM_HZ_NUM_SAMPLES);
483
484 BEGIN_BATCH(5);
485 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
486 OUT_BATCH(dw1);
487 OUT_BATCH(0);
488 OUT_BATCH(SET_FIELD(rect_width, GEN8_WM_HZ_CLEAR_RECTANGLE_X_MAX) |
489 SET_FIELD(rect_height, GEN8_WM_HZ_CLEAR_RECTANGLE_Y_MAX));
490 OUT_BATCH(SET_FIELD(0xFFFF, GEN8_WM_HZ_SAMPLE_MASK));
491 ADVANCE_BATCH();
492
493 /* Emit a PIPE_CONTROL with "Post-Sync Operation" set to "Write Immediate
494 * Data", and no other bits set. This causes 3DSTATE_WM_HZ_OP's state to
495 * take effect, and spawns a rectangle primitive.
496 */
497 brw_emit_pipe_control_write(brw,
498 PIPE_CONTROL_WRITE_IMMEDIATE,
499 brw->workaround_bo, 0, 0, 0);
500
501 /* Emit 3DSTATE_WM_HZ_OP again to disable the state overrides. */
502 BEGIN_BATCH(5);
503 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
504 OUT_BATCH(0);
505 OUT_BATCH(0);
506 OUT_BATCH(0);
507 OUT_BATCH(0);
508 ADVANCE_BATCH();
509
510 /* Mark this buffer as needing a TC flush, as we've rendered to it. */
511 brw_render_cache_set_add_bo(brw, mt->bo);
512
513 /* We've clobbered all of the depth packets, and the drawing rectangle,
514 * so we need to ensure those packets are re-emitted before the next
515 * primitive.
516 *
517 * Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
518 */
519 brw->NewGLState |= _NEW_DEPTH | _NEW_BUFFERS;
520 }