i965/gen4: Simplify depth/stencil invalidate check
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
37
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
41 #include "compiler/brw_eu_defines.h"
42
43 #include "main/framebuffer.h"
44 #include "main/fbobject.h"
45 #include "main/format_utils.h"
46 #include "main/glformats.h"
47
48 /**
49 * Upload pointers to the per-stage state.
50 *
51 * The state pointers in this packet are all relative to the general state
52 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
53 */
54 static void
55 upload_pipelined_state_pointers(struct brw_context *brw)
56 {
57 if (brw->gen == 5) {
58 /* Need to flush before changing clip max threads for errata. */
59 BEGIN_BATCH(1);
60 OUT_BATCH(MI_FLUSH);
61 ADVANCE_BATCH();
62 }
63
64 BEGIN_BATCH(7);
65 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
66 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
67 brw->vs.base.state_offset);
68 if (brw->ff_gs.prog_active)
69 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
70 brw->ff_gs.state_offset | 1);
71 else
72 OUT_BATCH(0);
73 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
74 brw->clip.state_offset | 1);
75 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
76 brw->sf.state_offset);
77 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
78 brw->wm.base.state_offset);
79 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
80 brw->cc.state_offset);
81 ADVANCE_BATCH();
82
83 brw->ctx.NewDriverState |= BRW_NEW_PSP;
84 }
85
86 static void
87 upload_psp_urb_cbs(struct brw_context *brw)
88 {
89 upload_pipelined_state_pointers(brw);
90 brw_upload_urb_fence(brw);
91 brw_upload_cs_urb_state(brw);
92 }
93
94 const struct brw_tracked_state brw_psp_urb_cbs = {
95 .dirty = {
96 .mesa = 0,
97 .brw = BRW_NEW_BATCH |
98 BRW_NEW_BLORP |
99 BRW_NEW_FF_GS_PROG_DATA |
100 BRW_NEW_GEN4_UNIT_STATE |
101 BRW_NEW_STATE_BASE_ADDRESS |
102 BRW_NEW_URB_FENCE,
103 },
104 .emit = upload_psp_urb_cbs,
105 };
106
107 uint32_t
108 brw_depthbuffer_format(struct brw_context *brw)
109 {
110 struct gl_context *ctx = &brw->ctx;
111 struct gl_framebuffer *fb = ctx->DrawBuffer;
112 struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
113 struct intel_renderbuffer *srb;
114
115 if (!drb &&
116 (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
117 !srb->mt->stencil_mt &&
118 (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
119 intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
120 drb = srb;
121 }
122
123 if (!drb)
124 return BRW_DEPTHFORMAT_D32_FLOAT;
125
126 return brw_depth_format(brw, drb->mt->format);
127 }
128
129 /**
130 * Returns the mask of how many bits of x and y must be handled through the
131 * depthbuffer's draw offset x and y fields.
132 *
133 * The draw offset x/y field of the depthbuffer packet is unfortunately shared
134 * between the depth, hiz, and stencil buffers. Because it can be hard to get
135 * all 3 to agree on this value, we want to do as much drawing offset
136 * adjustment as possible by moving the base offset of the 3 buffers, which is
137 * restricted to tile boundaries.
138 *
139 * For each buffer, the remainder must be applied through the x/y draw offset.
140 * This returns the worst-case mask of the low bits that have to go into the
141 * packet. If the 3 buffers don't agree on the drawing offset ANDed with this
142 * mask, then we're in trouble.
143 */
144 static void
145 brw_get_depthstencil_tile_masks(struct intel_mipmap_tree *depth_mt,
146 uint32_t depth_level,
147 uint32_t depth_layer,
148 struct intel_mipmap_tree *stencil_mt,
149 uint32_t *out_tile_mask_x,
150 uint32_t *out_tile_mask_y)
151 {
152 uint32_t tile_mask_x = 0, tile_mask_y = 0;
153
154 if (depth_mt) {
155 intel_get_tile_masks(depth_mt->tiling,
156 depth_mt->cpp,
157 &tile_mask_x, &tile_mask_y);
158 assert(!intel_miptree_level_has_hiz(depth_mt, depth_level));
159 }
160
161 if (stencil_mt) {
162 if (stencil_mt->stencil_mt)
163 stencil_mt = stencil_mt->stencil_mt;
164
165 if (stencil_mt->format == MESA_FORMAT_S_UINT8) {
166 /* Separate stencil buffer uses 64x64 tiles. */
167 tile_mask_x |= 63;
168 tile_mask_y |= 63;
169 } else {
170 uint32_t stencil_tile_mask_x, stencil_tile_mask_y;
171 intel_get_tile_masks(stencil_mt->tiling,
172 stencil_mt->cpp,
173 &stencil_tile_mask_x,
174 &stencil_tile_mask_y);
175
176 tile_mask_x |= stencil_tile_mask_x;
177 tile_mask_y |= stencil_tile_mask_y;
178 }
179 }
180
181 *out_tile_mask_x = tile_mask_x;
182 *out_tile_mask_y = tile_mask_y;
183 }
184
185 static struct intel_mipmap_tree *
186 get_stencil_miptree(struct intel_renderbuffer *irb)
187 {
188 if (!irb)
189 return NULL;
190 if (irb->mt->stencil_mt)
191 return irb->mt->stencil_mt;
192 return irb->mt;
193 }
194
195 void
196 brw_workaround_depthstencil_alignment(struct brw_context *brw,
197 GLbitfield clear_mask)
198 {
199 struct gl_context *ctx = &brw->ctx;
200 struct gl_framebuffer *fb = ctx->DrawBuffer;
201 bool rebase_depth = false;
202 bool rebase_stencil = false;
203 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
204 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
205 struct intel_mipmap_tree *depth_mt = NULL;
206 struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
207 uint32_t tile_x = 0, tile_y = 0, stencil_tile_x = 0, stencil_tile_y = 0;
208 uint32_t stencil_draw_x = 0, stencil_draw_y = 0;
209 bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
210 bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
211
212 if (depth_irb)
213 depth_mt = depth_irb->mt;
214
215 /* Initialize brw->depthstencil to 'nop' workaround state.
216 */
217 brw->depthstencil.tile_x = 0;
218 brw->depthstencil.tile_y = 0;
219 brw->depthstencil.depth_offset = 0;
220 brw->depthstencil.depth_mt = NULL;
221 brw->depthstencil.stencil_mt = NULL;
222 if (depth_irb)
223 brw->depthstencil.depth_mt = depth_mt;
224 if (stencil_irb)
225 brw->depthstencil.stencil_mt = get_stencil_miptree(stencil_irb);
226
227 /* Gen6+ doesn't require the workarounds, since we always program the
228 * surface state at the start of the whole surface.
229 */
230 if (brw->gen >= 6)
231 return;
232
233 /* Check if depth buffer is in depth/stencil format. If so, then it's only
234 * safe to invalidate it if we're also clearing stencil.
235 */
236 if (depth_irb && invalidate_depth &&
237 _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
238 invalidate_depth = invalidate_stencil && stencil_irb;
239
240 uint32_t tile_mask_x, tile_mask_y;
241 brw_get_depthstencil_tile_masks(depth_mt,
242 depth_mt ? depth_irb->mt_level : 0,
243 depth_mt ? depth_irb->mt_layer : 0,
244 stencil_mt,
245 &tile_mask_x, &tile_mask_y);
246
247 if (depth_irb) {
248 tile_x = depth_irb->draw_x & tile_mask_x;
249 tile_y = depth_irb->draw_y & tile_mask_y;
250
251 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
252 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
253 * Coordinate Offset X/Y":
254 *
255 * "The 3 LSBs of both offsets must be zero to ensure correct
256 * alignment"
257 */
258 if (tile_x & 7 || tile_y & 7)
259 rebase_depth = true;
260
261 /* We didn't even have intra-tile offsets before g45. */
262 if (!brw->has_surface_tile_offset) {
263 if (tile_x || tile_y)
264 rebase_depth = true;
265 }
266
267 if (rebase_depth) {
268 perf_debug("HW workaround: blitting depth level %d to a temporary "
269 "to fix alignment (depth tile offset %d,%d)\n",
270 depth_irb->mt_level, tile_x, tile_y);
271 intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
272 /* In the case of stencil_irb being the same packed depth/stencil
273 * texture but not the same rb, make it point at our rebased mt, too.
274 */
275 if (stencil_irb &&
276 stencil_irb != depth_irb &&
277 stencil_irb->mt == depth_mt) {
278 intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
279 intel_renderbuffer_set_draw_offset(stencil_irb);
280 }
281
282 stencil_mt = get_stencil_miptree(stencil_irb);
283
284 tile_x = depth_irb->draw_x & tile_mask_x;
285 tile_y = depth_irb->draw_y & tile_mask_y;
286 }
287
288 if (stencil_irb) {
289 assert(stencil_irb->mt == depth_irb->mt);
290 assert(stencil_irb->mt_level == depth_irb->mt_level);
291 assert(stencil_irb->mt_layer == depth_irb->mt_layer);
292 }
293 }
294
295 /* If we have (just) stencil, check it for ignored low bits as well */
296 if (!depth_irb && stencil_irb) {
297 intel_miptree_get_image_offset(stencil_mt,
298 stencil_irb->mt_level,
299 stencil_irb->mt_layer,
300 &stencil_draw_x, &stencil_draw_y);
301 stencil_tile_x = stencil_draw_x & tile_mask_x;
302 stencil_tile_y = stencil_draw_y & tile_mask_y;
303
304 if (stencil_tile_x & 7 || stencil_tile_y & 7)
305 rebase_stencil = true;
306
307 if (!brw->has_surface_tile_offset) {
308 if (stencil_tile_x || stencil_tile_y)
309 rebase_stencil = true;
310 }
311 }
312
313 if (rebase_stencil) {
314 /* If stencil needs rebase, there isn't a depth attachment and the
315 * combined depth-stencil is used for stencil only. Otherwise in case
316 * depth attachment is present both stencil and depth point to the same
317 * miptree. Rebase of depth is considered first updating stencil
318 * attachment accordingly - hence stencil is rebased only if there is no
319 * depth attachment.
320 */
321 assert(!depth_irb);
322 perf_debug("HW workaround: blitting stencil level %d to a temporary "
323 "to fix alignment (stencil tile offset %d,%d)\n",
324 stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
325
326 intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
327 stencil_mt = get_stencil_miptree(stencil_irb);
328
329 intel_miptree_get_image_offset(stencil_mt,
330 stencil_irb->mt_level,
331 stencil_irb->mt_layer,
332 &stencil_draw_x, &stencil_draw_y);
333 stencil_tile_x = stencil_draw_x & tile_mask_x;
334 stencil_tile_y = stencil_draw_y & tile_mask_y;
335 }
336
337 if (!depth_irb) {
338 tile_x = stencil_tile_x;
339 tile_y = stencil_tile_y;
340 }
341
342 /* While we just tried to get everything aligned, we may have failed to do
343 * so in the case of rendering to array or 3D textures, where nonzero faces
344 * will still have an offset post-rebase. At least give an informative
345 * warning.
346 */
347 WARN_ONCE((tile_x & 7) || (tile_y & 7),
348 "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
349 "Truncating offset, bad rendering may occur.\n");
350 tile_x &= ~7;
351 tile_y &= ~7;
352
353 /* Now, after rebasing, save off the new dephtstencil state so the hardware
354 * packets can just dereference that without re-calculating tile offsets.
355 */
356 brw->depthstencil.tile_x = tile_x;
357 brw->depthstencil.tile_y = tile_y;
358 if (depth_irb) {
359 depth_mt = depth_irb->mt;
360 brw->depthstencil.depth_mt = depth_mt;
361 brw->depthstencil.depth_offset =
362 intel_miptree_get_aligned_offset(depth_mt,
363 depth_irb->draw_x & ~tile_mask_x,
364 depth_irb->draw_y & ~tile_mask_y);
365 assert(!intel_renderbuffer_has_hiz(depth_irb));
366 }
367 if (stencil_irb) {
368 stencil_mt = get_stencil_miptree(stencil_irb);
369
370 brw->depthstencil.stencil_mt = stencil_mt;
371 assert(stencil_mt->format != MESA_FORMAT_S_UINT8);
372
373 if (!depth_irb) {
374 brw->depthstencil.depth_offset =
375 intel_miptree_get_aligned_offset(
376 stencil_mt,
377 stencil_irb->draw_x & ~tile_mask_x,
378 stencil_irb->draw_y & ~tile_mask_y);
379 }
380 }
381 }
382
383 void
384 brw_emit_depthbuffer(struct brw_context *brw)
385 {
386 struct gl_context *ctx = &brw->ctx;
387 struct gl_framebuffer *fb = ctx->DrawBuffer;
388 /* _NEW_BUFFERS */
389 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
390 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
391 struct intel_mipmap_tree *depth_mt = brw->depthstencil.depth_mt;
392 struct intel_mipmap_tree *stencil_mt = brw->depthstencil.stencil_mt;
393 uint32_t tile_x = brw->depthstencil.tile_x;
394 uint32_t tile_y = brw->depthstencil.tile_y;
395 bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
396 bool separate_stencil = false;
397 uint32_t depth_surface_type = BRW_SURFACE_NULL;
398 uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
399 uint32_t depth_offset = 0;
400 uint32_t width = 1, height = 1;
401
402 if (stencil_mt) {
403 separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
404
405 /* Gen7 supports only separate stencil */
406 assert(separate_stencil || brw->gen < 7);
407 }
408
409 /* If there's a packed depth/stencil bound to stencil only, we need to
410 * emit the packed depth/stencil buffer packet.
411 */
412 if (!depth_irb && stencil_irb && !separate_stencil) {
413 depth_irb = stencil_irb;
414 depth_mt = stencil_mt;
415 }
416
417 if (depth_irb && depth_mt) {
418 /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
419 * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
420 * depthstencil format.
421 *
422 * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
423 * set to the same value. Gens after 7 implicitly always set
424 * Separate_Stencil_Enable; software cannot disable it.
425 */
426 if ((brw->gen < 7 && hiz) || brw->gen >= 7) {
427 assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
428 }
429
430 /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
431 assert(brw->gen >= 7 || !separate_stencil || hiz);
432
433 assert(brw->gen < 6 || depth_mt->tiling == I915_TILING_Y);
434 assert(!hiz || depth_mt->tiling == I915_TILING_Y);
435
436 depthbuffer_format = brw_depthbuffer_format(brw);
437 depth_surface_type = BRW_SURFACE_2D;
438 depth_offset = brw->depthstencil.depth_offset;
439 width = depth_irb->Base.Base.Width;
440 height = depth_irb->Base.Base.Height;
441 } else if (separate_stencil) {
442 /*
443 * There exists a separate stencil buffer but no depth buffer.
444 *
445 * The stencil buffer inherits most of its fields from
446 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
447 * height.
448 *
449 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
450 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
451 * [DevGT+]: This field must be set to TRUE.
452 */
453 assert(brw->has_separate_stencil);
454
455 depth_surface_type = BRW_SURFACE_2D;
456 width = stencil_irb->Base.Base.Width;
457 height = stencil_irb->Base.Base.Height;
458 }
459
460 if (depth_mt)
461 brw_render_cache_set_check_flush(brw, depth_mt->bo);
462 if (stencil_mt)
463 brw_render_cache_set_check_flush(brw, stencil_mt->bo);
464
465 brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
466 depthbuffer_format, depth_surface_type,
467 stencil_mt, hiz, separate_stencil,
468 width, height, tile_x, tile_y);
469 }
470
471 uint32_t
472 brw_convert_depth_value(mesa_format format, float value)
473 {
474 switch (format) {
475 case MESA_FORMAT_Z_FLOAT32:
476 return float_as_int(value);
477 case MESA_FORMAT_Z_UNORM16:
478 return value * ((1u << 16) - 1);
479 case MESA_FORMAT_Z24_UNORM_X8_UINT:
480 return value * ((1u << 24) - 1);
481 default:
482 unreachable("Invalid depth format");
483 }
484 }
485
486 void
487 brw_emit_depth_stencil_hiz(struct brw_context *brw,
488 struct intel_mipmap_tree *depth_mt,
489 uint32_t depth_offset, uint32_t depthbuffer_format,
490 uint32_t depth_surface_type,
491 struct intel_mipmap_tree *stencil_mt,
492 bool hiz, bool separate_stencil,
493 uint32_t width, uint32_t height,
494 uint32_t tile_x, uint32_t tile_y)
495 {
496 (void)hiz;
497 (void)separate_stencil;
498 (void)stencil_mt;
499
500 assert(!hiz);
501 assert(!separate_stencil);
502
503 const unsigned len = (brw->is_g4x || brw->gen == 5) ? 6 : 5;
504
505 BEGIN_BATCH(len);
506 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
507 OUT_BATCH((depth_mt ? depth_mt->pitch - 1 : 0) |
508 (depthbuffer_format << 18) |
509 (BRW_TILEWALK_YMAJOR << 26) |
510 ((depth_mt ? depth_mt->tiling != I915_TILING_NONE : 1)
511 << 27) |
512 (depth_surface_type << 29));
513
514 if (depth_mt) {
515 OUT_RELOC(depth_mt->bo,
516 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
517 depth_offset);
518 } else {
519 OUT_BATCH(0);
520 }
521
522 OUT_BATCH(((width + tile_x - 1) << 6) |
523 ((height + tile_y - 1) << 19));
524 OUT_BATCH(0);
525
526 if (brw->is_g4x || brw->gen >= 5)
527 OUT_BATCH(tile_x | (tile_y << 16));
528 else
529 assert(tile_x == 0 && tile_y == 0);
530
531 if (brw->gen >= 6)
532 OUT_BATCH(0);
533
534 ADVANCE_BATCH();
535 }
536
537 const struct brw_tracked_state brw_depthbuffer = {
538 .dirty = {
539 .mesa = _NEW_BUFFERS,
540 .brw = BRW_NEW_BATCH |
541 BRW_NEW_BLORP,
542 },
543 .emit = brw_emit_depthbuffer,
544 };
545
546 void
547 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
548 {
549 const bool is_965 = brw->gen == 4 && !brw->is_g4x;
550 const uint32_t _3DSTATE_PIPELINE_SELECT =
551 is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
552
553 if (brw->gen >= 8 && brw->gen < 10) {
554 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
555 *
556 * Software must clear the COLOR_CALC_STATE Valid field in
557 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
558 * with Pipeline Select set to GPGPU.
559 *
560 * The internal hardware docs recommend the same workaround for Gen9
561 * hardware too.
562 */
563 if (pipeline == BRW_COMPUTE_PIPELINE) {
564 BEGIN_BATCH(2);
565 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
566 OUT_BATCH(0);
567 ADVANCE_BATCH();
568
569 brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
570 }
571 }
572
573 if (brw->gen >= 6) {
574 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
575 * PIPELINE_SELECT [DevBWR+]":
576 *
577 * Project: DEVSNB+
578 *
579 * Software must ensure all the write caches are flushed through a
580 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
581 * command to invalidate read only caches prior to programming
582 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
583 */
584 const unsigned dc_flush =
585 brw->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
586
587 brw_emit_pipe_control_flush(brw,
588 PIPE_CONTROL_RENDER_TARGET_FLUSH |
589 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
590 dc_flush |
591 PIPE_CONTROL_NO_WRITE |
592 PIPE_CONTROL_CS_STALL);
593
594 brw_emit_pipe_control_flush(brw,
595 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
596 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
597 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
598 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
599 PIPE_CONTROL_NO_WRITE);
600
601 } else {
602 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
603 * PIPELINE_SELECT [DevBWR+]":
604 *
605 * Project: PRE-DEVSNB
606 *
607 * Software must ensure the current pipeline is flushed via an
608 * MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
609 */
610 BEGIN_BATCH(1);
611 OUT_BATCH(MI_FLUSH);
612 ADVANCE_BATCH();
613 }
614
615 /* Select the pipeline */
616 BEGIN_BATCH(1);
617 OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
618 (brw->gen >= 9 ? (3 << 8) : 0) |
619 (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
620 ADVANCE_BATCH();
621
622 if (brw->gen == 7 && !brw->is_haswell &&
623 pipeline == BRW_RENDER_PIPELINE) {
624 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
625 * PIPELINE_SELECT [DevBWR+]":
626 *
627 * Project: DEVIVB, DEVHSW:GT3:A0
628 *
629 * Software must send a pipe_control with a CS stall and a post sync
630 * operation and then a dummy DRAW after every MI_SET_CONTEXT and
631 * after any PIPELINE_SELECT that is enabling 3D mode.
632 */
633 gen7_emit_cs_stall_flush(brw);
634
635 BEGIN_BATCH(7);
636 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
637 OUT_BATCH(_3DPRIM_POINTLIST);
638 OUT_BATCH(0);
639 OUT_BATCH(0);
640 OUT_BATCH(0);
641 OUT_BATCH(0);
642 OUT_BATCH(0);
643 ADVANCE_BATCH();
644 }
645 }
646
647 /**
648 * Misc invariant state packets
649 */
650 void
651 brw_upload_invariant_state(struct brw_context *brw)
652 {
653 const bool is_965 = brw->gen == 4 && !brw->is_g4x;
654
655 brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
656 brw->last_pipeline = BRW_RENDER_PIPELINE;
657
658 if (brw->gen >= 8) {
659 BEGIN_BATCH(3);
660 OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
661 OUT_BATCH(0);
662 OUT_BATCH(0);
663 ADVANCE_BATCH();
664 } else {
665 BEGIN_BATCH(2);
666 OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
667 OUT_BATCH(0);
668 ADVANCE_BATCH();
669 }
670
671 /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
672 if (!is_965) {
673 BEGIN_BATCH(3);
674 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
675 /* use legacy aa line coverage computation */
676 OUT_BATCH(0);
677 OUT_BATCH(0);
678 ADVANCE_BATCH();
679 }
680
681 const uint32_t _3DSTATE_VF_STATISTICS =
682 is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
683 BEGIN_BATCH(1);
684 OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
685 ADVANCE_BATCH();
686 }
687
688 const struct brw_tracked_state brw_invariant_state = {
689 .dirty = {
690 .mesa = 0,
691 .brw = BRW_NEW_BLORP |
692 BRW_NEW_CONTEXT,
693 },
694 .emit = brw_upload_invariant_state
695 };
696
697 /**
698 * Define the base addresses which some state is referenced from.
699 *
700 * This allows us to avoid having to emit relocations for the objects,
701 * and is actually required for binding table pointers on gen6.
702 *
703 * Surface state base address covers binding table pointers and
704 * surface state objects, but not the surfaces that the surface state
705 * objects point to.
706 */
707 void
708 brw_upload_state_base_address(struct brw_context *brw)
709 {
710 if (brw->batch.state_base_address_emitted)
711 return;
712
713 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
714 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
715 * programmed prior to STATE_BASE_ADDRESS.
716 *
717 * However, given that the instruction SBA (general state base
718 * address) on this chipset is always set to 0 across X and GL,
719 * maybe this isn't required for us in particular.
720 */
721
722 if (brw->gen >= 6) {
723 const unsigned dc_flush =
724 brw->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
725
726 /* Emit a render target cache flush.
727 *
728 * This isn't documented anywhere in the PRM. However, it seems to be
729 * necessary prior to changing the surface state base adress. We've
730 * seen issues in Vulkan where we get GPU hangs when using multi-level
731 * command buffers which clear depth, reset state base address, and then
732 * go render stuff.
733 *
734 * Normally, in GL, we would trust the kernel to do sufficient stalls
735 * and flushes prior to executing our batch. However, it doesn't seem
736 * as if the kernel's flushing is always sufficient and we don't want to
737 * rely on it.
738 *
739 * We make this an end-of-pipe sync instead of a normal flush because we
740 * do not know the current status of the GPU. On Haswell at least,
741 * having a fast-clear operation in flight at the same time as a normal
742 * rendering operation can cause hangs. Since the kernel's flushing is
743 * insufficient, we need to ensure that any rendering operations from
744 * other processes are definitely complete before we try to do our own
745 * rendering. It's a bit of a big hammer but it appears to work.
746 */
747 brw_emit_end_of_pipe_sync(brw,
748 PIPE_CONTROL_RENDER_TARGET_FLUSH |
749 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
750 dc_flush);
751 }
752
753 if (brw->gen >= 8) {
754 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
755 int pkt_len = brw->gen >= 9 ? 19 : 16;
756
757 BEGIN_BATCH(pkt_len);
758 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
759 /* General state base address: stateless DP read/write requests */
760 OUT_BATCH(mocs_wb << 4 | 1);
761 OUT_BATCH(0);
762 OUT_BATCH(mocs_wb << 16);
763 /* Surface state base address: */
764 OUT_RELOC64(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
765 mocs_wb << 4 | 1);
766 /* Dynamic state base address: */
767 OUT_RELOC64(brw->batch.bo,
768 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, 0,
769 mocs_wb << 4 | 1);
770 /* Indirect object base address: MEDIA_OBJECT data */
771 OUT_BATCH(mocs_wb << 4 | 1);
772 OUT_BATCH(0);
773 /* Instruction base address: shader kernels (incl. SIP) */
774 OUT_RELOC64(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
775 mocs_wb << 4 | 1);
776
777 /* General state buffer size */
778 OUT_BATCH(0xfffff001);
779 /* Dynamic state buffer size */
780 OUT_BATCH(ALIGN(brw->batch.bo->size, 4096) | 1);
781 /* Indirect object upper bound */
782 OUT_BATCH(0xfffff001);
783 /* Instruction access upper bound */
784 OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
785 if (brw->gen >= 9) {
786 OUT_BATCH(1);
787 OUT_BATCH(0);
788 OUT_BATCH(0);
789 }
790 ADVANCE_BATCH();
791 } else if (brw->gen >= 6) {
792 uint8_t mocs = brw->gen == 7 ? GEN7_MOCS_L3 : 0;
793
794 BEGIN_BATCH(10);
795 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
796 OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
797 mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
798 1); /* General State Base Address Modify Enable */
799 /* Surface state base address:
800 * BINDING_TABLE_STATE
801 * SURFACE_STATE
802 */
803 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
804 /* Dynamic state base address:
805 * SAMPLER_STATE
806 * SAMPLER_BORDER_COLOR_STATE
807 * CLIP, SF, WM/CC viewport state
808 * COLOR_CALC_STATE
809 * DEPTH_STENCIL_STATE
810 * BLEND_STATE
811 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
812 * Disable is clear, which we rely on)
813 */
814 OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
815 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
816
817 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
818 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
819 1); /* Instruction base address: shader kernels (incl. SIP) */
820
821 OUT_BATCH(1); /* General state upper bound */
822 /* Dynamic state upper bound. Although the documentation says that
823 * programming it to zero will cause it to be ignored, that is a lie.
824 * If this isn't programmed to a real bound, the sampler border color
825 * pointer is rejected, causing border color to mysteriously fail.
826 */
827 OUT_BATCH(0xfffff001);
828 OUT_BATCH(1); /* Indirect object upper bound */
829 OUT_BATCH(1); /* Instruction access upper bound */
830 ADVANCE_BATCH();
831 } else if (brw->gen == 5) {
832 BEGIN_BATCH(8);
833 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
834 OUT_BATCH(1); /* General state base address */
835 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
836 1); /* Surface state base address */
837 OUT_BATCH(1); /* Indirect object base address */
838 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
839 1); /* Instruction base address */
840 OUT_BATCH(0xfffff001); /* General state upper bound */
841 OUT_BATCH(1); /* Indirect object upper bound */
842 OUT_BATCH(1); /* Instruction access upper bound */
843 ADVANCE_BATCH();
844 } else {
845 BEGIN_BATCH(6);
846 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
847 OUT_BATCH(1); /* General state base address */
848 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
849 1); /* Surface state base address */
850 OUT_BATCH(1); /* Indirect object base address */
851 OUT_BATCH(1); /* General state upper bound */
852 OUT_BATCH(1); /* Indirect object upper bound */
853 ADVANCE_BATCH();
854 }
855
856 if (brw->gen >= 6) {
857 brw_emit_pipe_control_flush(brw,
858 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
859 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
860 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
861 }
862
863 /* According to section 3.6.1 of VOL1 of the 965 PRM,
864 * STATE_BASE_ADDRESS updates require a reissue of:
865 *
866 * 3DSTATE_PIPELINE_POINTERS
867 * 3DSTATE_BINDING_TABLE_POINTERS
868 * MEDIA_STATE_POINTERS
869 *
870 * and this continues through Ironlake. The Sandy Bridge PRM, vol
871 * 1 part 1 says that the folowing packets must be reissued:
872 *
873 * 3DSTATE_CC_POINTERS
874 * 3DSTATE_BINDING_TABLE_POINTERS
875 * 3DSTATE_SAMPLER_STATE_POINTERS
876 * 3DSTATE_VIEWPORT_STATE_POINTERS
877 * MEDIA_STATE_POINTERS
878 *
879 * Those are always reissued following SBA updates anyway (new
880 * batch time), except in the case of the program cache BO
881 * changing. Having a separate state flag makes the sequence more
882 * obvious.
883 */
884
885 brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
886 brw->batch.state_base_address_emitted = true;
887 }