i965: Move PSCDEPTH calculations from draw time to compile time.
[mesa.git] / src / mesa / drivers / dri / i965 / gen8_depth_state.c
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "intel_batchbuffer.h"
25 #include "intel_mipmap_tree.h"
26 #include "intel_fbo.h"
27 #include "intel_resolve_map.h"
28 #include "brw_context.h"
29 #include "brw_state.h"
30 #include "brw_defines.h"
31 #include "brw_wm.h"
32
33 /**
34 * Helper function to emit depth related command packets.
35 */
36 static void
37 emit_depth_packets(struct brw_context *brw,
38 struct intel_mipmap_tree *depth_mt,
39 uint32_t depthbuffer_format,
40 uint32_t depth_surface_type,
41 bool depth_writable,
42 struct intel_mipmap_tree *stencil_mt,
43 bool stencil_writable,
44 uint32_t stencil_offset,
45 bool hiz,
46 uint32_t width,
47 uint32_t height,
48 uint32_t depth,
49 uint32_t lod,
50 uint32_t min_array_element)
51 {
52 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
53
54 /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
55 if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
56 assert(brw->hw_ctx);
57 return;
58 }
59
60 intel_emit_depth_stall_flushes(brw);
61
62 /* _NEW_BUFFERS, _NEW_DEPTH, _NEW_STENCIL */
63 BEGIN_BATCH(8);
64 OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (8 - 2));
65 OUT_BATCH(depth_surface_type << 29 |
66 (depth_writable ? (1 << 28) : 0) |
67 (stencil_mt != NULL && stencil_writable) << 27 |
68 (hiz ? 1 : 0) << 22 |
69 depthbuffer_format << 18 |
70 (depth_mt ? depth_mt->pitch - 1 : 0));
71 if (depth_mt) {
72 OUT_RELOC64(depth_mt->bo,
73 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
74 } else {
75 OUT_BATCH(0);
76 OUT_BATCH(0);
77 }
78 OUT_BATCH(((width - 1) << 4) | ((height - 1) << 18) | lod);
79 OUT_BATCH(((depth - 1) << 21) | (min_array_element << 10) | mocs_wb);
80 OUT_BATCH(0);
81 OUT_BATCH(((depth - 1) << 21) | (depth_mt ? depth_mt->qpitch >> 2 : 0));
82 ADVANCE_BATCH();
83
84 if (!hiz) {
85 BEGIN_BATCH(5);
86 OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
87 OUT_BATCH(0);
88 OUT_BATCH(0);
89 OUT_BATCH(0);
90 OUT_BATCH(0);
91 ADVANCE_BATCH();
92 } else {
93 BEGIN_BATCH(5);
94 OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
95 OUT_BATCH((depth_mt->hiz_mt->pitch - 1) | mocs_wb << 25);
96 OUT_RELOC64(depth_mt->hiz_mt->bo,
97 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
98 OUT_BATCH(depth_mt->hiz_mt->qpitch >> 2);
99 ADVANCE_BATCH();
100 }
101
102 if (stencil_mt == NULL) {
103 BEGIN_BATCH(5);
104 OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
105 OUT_BATCH(0);
106 OUT_BATCH(0);
107 OUT_BATCH(0);
108 OUT_BATCH(0);
109 ADVANCE_BATCH();
110 } else {
111 BEGIN_BATCH(5);
112 OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
113 /* The stencil buffer has quirky pitch requirements. From the Graphics
114 * BSpec: vol2a.11 3D Pipeline Windower > Early Depth/Stencil Processing
115 * > Depth/Stencil Buffer State > 3DSTATE_STENCIL_BUFFER [DevIVB+],
116 * field "Surface Pitch":
117 *
118 * The pitch must be set to 2x the value computed based on width, as
119 * the stencil buffer is stored with two rows interleaved.
120 *
121 * (Note that it is not 100% clear whether this intended to apply to
122 * Gen7; the BSpec flags this comment as "DevILK,DevSNB" (which would
123 * imply that it doesn't), however the comment appears on a "DevIVB+"
124 * page (which would imply that it does). Experiments with the hardware
125 * indicate that it does.
126 */
127 OUT_BATCH(HSW_STENCIL_ENABLED | mocs_wb << 22 |
128 (2 * stencil_mt->pitch - 1));
129 OUT_RELOC64(stencil_mt->bo,
130 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
131 stencil_offset);
132 OUT_BATCH(stencil_mt ? stencil_mt->qpitch >> 2 : 0);
133 ADVANCE_BATCH();
134 }
135
136 BEGIN_BATCH(3);
137 OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
138 OUT_BATCH(depth_mt ? depth_mt->depth_clear_value : 0);
139 OUT_BATCH(1);
140 ADVANCE_BATCH();
141
142 brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
143 }
144
145 /* Awful vtable-compatible function; should be cleaned up in the future. */
146 void
147 gen8_emit_depth_stencil_hiz(struct brw_context *brw,
148 struct intel_mipmap_tree *depth_mt,
149 uint32_t depth_offset,
150 uint32_t depthbuffer_format,
151 uint32_t depth_surface_type,
152 struct intel_mipmap_tree *stencil_mt,
153 bool hiz, bool separate_stencil,
154 uint32_t width, uint32_t height,
155 uint32_t tile_x, uint32_t tile_y)
156 {
157 struct gl_context *ctx = &brw->ctx;
158 struct gl_framebuffer *fb = ctx->DrawBuffer;
159 uint32_t surftype;
160 unsigned int depth = 1;
161 unsigned int min_array_element;
162 GLenum gl_target = GL_TEXTURE_2D;
163 unsigned int lod;
164 const struct intel_mipmap_tree *mt = depth_mt ? depth_mt : stencil_mt;
165 const struct intel_renderbuffer *irb = NULL;
166 const struct gl_renderbuffer *rb = NULL;
167
168 irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
169 if (!irb)
170 irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
171 rb = (struct gl_renderbuffer *) irb;
172
173 if (rb) {
174 depth = MAX2(irb->layer_count, 1);
175 if (rb->TexImage)
176 gl_target = rb->TexImage->TexObject->Target;
177 }
178
179 switch (gl_target) {
180 case GL_TEXTURE_CUBE_MAP_ARRAY:
181 case GL_TEXTURE_CUBE_MAP:
182 /* The PRM claims that we should use BRW_SURFACE_CUBE for this
183 * situation, but experiments show that gl_Layer doesn't work when we do
184 * this. So we use BRW_SURFACE_2D, since for rendering purposes this is
185 * equivalent.
186 */
187 surftype = BRW_SURFACE_2D;
188 depth *= 6;
189 break;
190 case GL_TEXTURE_3D:
191 assert(mt);
192 depth = MAX2(mt->logical_depth0, 1);
193 /* fallthrough */
194 default:
195 surftype = translate_tex_target(gl_target);
196 break;
197 }
198
199 min_array_element = irb ? irb->mt_layer : 0;
200
201 lod = irb ? irb->mt_level - irb->mt->first_level : 0;
202
203 if (mt) {
204 width = mt->logical_width0;
205 height = mt->logical_height0;
206 }
207
208 emit_depth_packets(brw, depth_mt, brw_depthbuffer_format(brw), surftype,
209 ctx->Depth.Mask != 0,
210 stencil_mt, ctx->Stencil._WriteEnabled,
211 brw->depthstencil.stencil_offset,
212 hiz, width, height, depth, lod, min_array_element);
213 }
214
215 /**
216 * Should we set the PMA FIX ENABLE bit?
217 *
218 * To avoid unnecessary depth related stalls, we need to set this bit.
219 * However, there is a very complicated formula which governs when it
220 * is legal to do so. This function computes that.
221 *
222 * See the documenation for the CACHE_MODE_1 register, bit 11.
223 */
224 static bool
225 pma_fix_enable(const struct brw_context *brw)
226 {
227 const struct gl_context *ctx = &brw->ctx;
228 /* _NEW_BUFFERS */
229 struct intel_renderbuffer *depth_irb =
230 intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
231
232 /* 3DSTATE_WM::ForceThreadDispatch is never used. */
233 const bool wm_force_thread_dispatch = false;
234
235 /* 3DSTATE_RASTER::ForceSampleCount is never used. */
236 const bool raster_force_sample_count_nonzero = false;
237
238 /* _NEW_BUFFERS:
239 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
240 * 3DSTATE_DEPTH_BUFFER::HIZ Enable
241 */
242 const bool hiz_enabled = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
243
244 /* 3DSTATE_WM::Early Depth/Stencil Control != EDSC_PREPS (2).
245 * We always leave this set to EDSC_NORMAL (0).
246 */
247 const bool edsc_not_preps = true;
248
249 /* 3DSTATE_PS_EXTRA::PixelShaderValid is always true. */
250 const bool pixel_shader_valid = true;
251
252 /* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
253 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
254 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
255 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
256 *
257 * HiZ operations are done outside of the normal state upload, so they're
258 * definitely not happening now.
259 */
260 const bool in_hiz_op = false;
261
262 /* _NEW_DEPTH:
263 * DEPTH_STENCIL_STATE::DepthTestEnable
264 */
265 const bool depth_test_enabled = depth_irb && ctx->Depth.Test;
266
267 /* _NEW_DEPTH:
268 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
269 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE.
270 */
271 const bool depth_writes_enabled = ctx->Depth.Mask;
272
273 /* _NEW_STENCIL:
274 * !DEPTH_STENCIL_STATE::Stencil Buffer Write Enable ||
275 * !3DSTATE_DEPTH_BUFFER::Stencil Buffer Enable ||
276 * !3DSTATE_STENCIL_BUFFER::Stencil Buffer Enable
277 */
278 const bool stencil_writes_enabled = ctx->Stencil._WriteEnabled;
279
280 /* BRW_NEW_FS_PROG_DATA:
281 * 3DSTATE_PS_EXTRA::Pixel Shader Computed Depth Mode != PSCDEPTH_OFF
282 */
283 const bool ps_computes_depth =
284 brw->wm.prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF;
285
286 /* BRW_NEW_FS_PROG_DATA: 3DSTATE_PS_EXTRA::PixelShaderKillsPixels
287 * BRW_NEW_FS_PROG_DATA: 3DSTATE_PS_EXTRA::oMask Present to RenderTarget
288 * _NEW_MULTISAMPLE: 3DSTATE_PS_BLEND::AlphaToCoverageEnable
289 * _NEW_COLOR: 3DSTATE_PS_BLEND::AlphaTestEnable
290 *
291 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false.
292 * 3DSTATE_WM::ForceKillPix != ForceOff is always true.
293 */
294 const bool kill_pixel =
295 brw->wm.prog_data->uses_kill ||
296 brw->wm.prog_data->uses_omask ||
297 (ctx->Multisample._Enabled && ctx->Multisample.SampleAlphaToCoverage) ||
298 ctx->Color.AlphaEnabled;
299
300 /* The big formula in CACHE_MODE_1::NP PMA FIX ENABLE. */
301 return !wm_force_thread_dispatch &&
302 !raster_force_sample_count_nonzero &&
303 hiz_enabled &&
304 edsc_not_preps &&
305 pixel_shader_valid &&
306 !in_hiz_op &&
307 depth_test_enabled &&
308 (ps_computes_depth ||
309 (kill_pixel && (depth_writes_enabled || stencil_writes_enabled)));
310 }
311
312 static void
313 write_pma_stall_bits(struct brw_context *brw, uint32_t pma_stall_bits)
314 {
315 struct gl_context *ctx = &brw->ctx;
316
317 /* If we haven't actually changed the value, bail now to avoid unnecessary
318 * pipeline stalls and register writes.
319 */
320 if (brw->pma_stall_bits == pma_stall_bits)
321 return;
322
323 brw->pma_stall_bits = pma_stall_bits;
324
325 /* According to the PIPE_CONTROL documentation, software should emit a
326 * PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set prior
327 * to the LRI. If stencil buffer writes are enabled, then a Render Cache
328 * Flush is also necessary.
329 */
330 const uint32_t render_cache_flush =
331 ctx->Stencil._WriteEnabled ? PIPE_CONTROL_WRITE_FLUSH : 0;
332 brw_emit_pipe_control_flush(brw,
333 PIPE_CONTROL_CS_STALL |
334 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
335 render_cache_flush);
336
337 /* CACHE_MODE_1 is a non-privileged register. */
338 BEGIN_BATCH(3);
339 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
340 OUT_BATCH(GEN7_CACHE_MODE_1);
341 OUT_BATCH(GEN8_HIZ_PMA_MASK_BITS | pma_stall_bits);
342 ADVANCE_BATCH();
343
344 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
345 * Flush bits is often necessary. We do it regardless because it's easier.
346 * The render cache flush is also necessary if stencil writes are enabled.
347 */
348 brw_emit_pipe_control_flush(brw,
349 PIPE_CONTROL_DEPTH_STALL |
350 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
351 render_cache_flush);
352
353 }
354
355 static void
356 gen8_emit_pma_stall_workaround(struct brw_context *brw)
357 {
358 uint32_t bits = 0;
359 if (pma_fix_enable(brw))
360 bits |= GEN8_HIZ_NP_PMA_FIX_ENABLE | GEN8_HIZ_NP_EARLY_Z_FAILS_DISABLE;
361
362 write_pma_stall_bits(brw, bits);
363 }
364
365 const struct brw_tracked_state gen8_pma_fix = {
366 .dirty = {
367 .mesa = _NEW_BUFFERS |
368 _NEW_COLOR |
369 _NEW_DEPTH |
370 _NEW_MULTISAMPLE |
371 _NEW_STENCIL,
372 .brw = BRW_NEW_FS_PROG_DATA,
373 },
374 .emit = gen8_emit_pma_stall_workaround
375 };
376
377 /**
378 * Emit packets to perform a depth/HiZ resolve or fast depth/stencil clear.
379 *
380 * See the "Optimized Depth Buffer Clear and/or Stencil Buffer Clear" section
381 * of the hardware documentation for details.
382 */
383 void
384 gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
385 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
386 {
387 if (op == GEN6_HIZ_OP_NONE)
388 return;
389
390 /* Disable the PMA stall fix since we're about to do a HiZ operation. */
391 write_pma_stall_bits(brw, 0);
392
393 assert(mt->first_level == 0);
394 assert(mt->logical_depth0 >= 1);
395
396 /* If we're operating on LOD 0, align to 8x4 to meet the alignment
397 * requirements for most HiZ operations. Otherwise, use the actual size
398 * to allow the hardware to calculate the miplevel offsets correctly.
399 */
400 uint32_t surface_width = ALIGN(mt->logical_width0, level == 0 ? 8 : 1);
401 uint32_t surface_height = ALIGN(mt->logical_height0, level == 0 ? 4 : 1);
402
403 /* The basic algorithm is:
404 * - If needed, emit 3DSTATE_{DEPTH,HIER_DEPTH,STENCIL}_BUFFER and
405 * 3DSTATE_CLEAR_PARAMS packets to set up the relevant buffers.
406 * - If needed, emit 3DSTATE_DRAWING_RECTANGLE.
407 * - Emit 3DSTATE_WM_HZ_OP with a bit set for the particular operation.
408 * - Do a special PIPE_CONTROL to trigger an implicit rectangle primitive.
409 * - Emit 3DSTATE_WM_HZ_OP with no bits set to return to normal rendering.
410 */
411 emit_depth_packets(brw, mt,
412 brw_depth_format(brw, mt->format),
413 BRW_SURFACE_2D,
414 true, /* depth writes */
415 NULL, false, 0, /* no stencil for now */
416 true, /* hiz */
417 surface_width,
418 surface_height,
419 mt->logical_depth0,
420 level,
421 layer); /* min_array_element */
422
423 /* Depth buffer clears and HiZ resolves must use an 8x4 aligned rectangle.
424 * Note that intel_miptree_level_enable_hiz disables HiZ for miplevels > 0
425 * which aren't 8x4 aligned, so expanding the size is safe - it'll just
426 * draw into empty padding space.
427 */
428 unsigned rect_width = ALIGN(minify(mt->logical_width0, level), 8);
429 unsigned rect_height = ALIGN(minify(mt->logical_height0, level), 4);
430
431 BEGIN_BATCH(4);
432 OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
433 OUT_BATCH(0);
434 OUT_BATCH(((rect_width - 1) & 0xffff) | ((rect_height - 1) << 16));
435 OUT_BATCH(0);
436 ADVANCE_BATCH();
437
438 /* Emit 3DSTATE_WM_HZ_OP to override pipeline state for the particular
439 * resolve or clear operation we want to perform.
440 */
441 uint32_t dw1 = 0;
442
443 switch (op) {
444 case GEN6_HIZ_OP_DEPTH_RESOLVE:
445 dw1 |= GEN8_WM_HZ_DEPTH_RESOLVE;
446 break;
447 case GEN6_HIZ_OP_HIZ_RESOLVE:
448 dw1 |= GEN8_WM_HZ_HIZ_RESOLVE;
449 break;
450 case GEN6_HIZ_OP_DEPTH_CLEAR:
451 dw1 |= GEN8_WM_HZ_DEPTH_CLEAR;
452 break;
453 case GEN6_HIZ_OP_NONE:
454 unreachable("Should not get here.");
455 }
456
457 if (mt->num_samples > 0)
458 dw1 |= SET_FIELD(ffs(mt->num_samples) - 1, GEN8_WM_HZ_NUM_SAMPLES);
459
460 BEGIN_BATCH(5);
461 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
462 OUT_BATCH(dw1);
463 OUT_BATCH(0);
464 OUT_BATCH(SET_FIELD(rect_width, GEN8_WM_HZ_CLEAR_RECTANGLE_X_MAX) |
465 SET_FIELD(rect_height, GEN8_WM_HZ_CLEAR_RECTANGLE_Y_MAX));
466 OUT_BATCH(SET_FIELD(0xFFFF, GEN8_WM_HZ_SAMPLE_MASK));
467 ADVANCE_BATCH();
468
469 /* Emit a PIPE_CONTROL with "Post-Sync Operation" set to "Write Immediate
470 * Data", and no other bits set. This causes 3DSTATE_WM_HZ_OP's state to
471 * take effect, and spawns a rectangle primitive.
472 */
473 brw_emit_pipe_control_write(brw,
474 PIPE_CONTROL_WRITE_IMMEDIATE,
475 brw->batch.workaround_bo, 0, 0, 0);
476
477 /* Emit 3DSTATE_WM_HZ_OP again to disable the state overrides. */
478 BEGIN_BATCH(5);
479 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
480 OUT_BATCH(0);
481 OUT_BATCH(0);
482 OUT_BATCH(0);
483 OUT_BATCH(0);
484 ADVANCE_BATCH();
485
486 /* Mark this buffer as needing a TC flush, as we've rendered to it. */
487 brw_render_cache_set_add_bo(brw, mt->bo);
488
489 /* We've clobbered all of the depth packets, and the drawing rectangle,
490 * so we need to ensure those packets are re-emitted before the next
491 * primitive.
492 *
493 * Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
494 */
495 brw->state.dirty.mesa |= _NEW_DEPTH | _NEW_BUFFERS;
496 }