i965/gen4: Remove redundant check for depth when rebasing stencil
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
37
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
41 #include "compiler/brw_eu_defines.h"
42
43 #include "main/framebuffer.h"
44 #include "main/fbobject.h"
45 #include "main/format_utils.h"
46 #include "main/glformats.h"
47
48 /**
49 * Upload pointers to the per-stage state.
50 *
51 * The state pointers in this packet are all relative to the general state
52 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
53 */
54 static void
55 upload_pipelined_state_pointers(struct brw_context *brw)
56 {
57 if (brw->gen == 5) {
58 /* Need to flush before changing clip max threads for errata. */
59 BEGIN_BATCH(1);
60 OUT_BATCH(MI_FLUSH);
61 ADVANCE_BATCH();
62 }
63
64 BEGIN_BATCH(7);
65 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
66 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
67 brw->vs.base.state_offset);
68 if (brw->ff_gs.prog_active)
69 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
70 brw->ff_gs.state_offset | 1);
71 else
72 OUT_BATCH(0);
73 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
74 brw->clip.state_offset | 1);
75 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
76 brw->sf.state_offset);
77 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
78 brw->wm.base.state_offset);
79 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
80 brw->cc.state_offset);
81 ADVANCE_BATCH();
82
83 brw->ctx.NewDriverState |= BRW_NEW_PSP;
84 }
85
86 static void
87 upload_psp_urb_cbs(struct brw_context *brw)
88 {
89 upload_pipelined_state_pointers(brw);
90 brw_upload_urb_fence(brw);
91 brw_upload_cs_urb_state(brw);
92 }
93
94 const struct brw_tracked_state brw_psp_urb_cbs = {
95 .dirty = {
96 .mesa = 0,
97 .brw = BRW_NEW_BATCH |
98 BRW_NEW_BLORP |
99 BRW_NEW_FF_GS_PROG_DATA |
100 BRW_NEW_GEN4_UNIT_STATE |
101 BRW_NEW_STATE_BASE_ADDRESS |
102 BRW_NEW_URB_FENCE,
103 },
104 .emit = upload_psp_urb_cbs,
105 };
106
107 uint32_t
108 brw_depthbuffer_format(struct brw_context *brw)
109 {
110 struct gl_context *ctx = &brw->ctx;
111 struct gl_framebuffer *fb = ctx->DrawBuffer;
112 struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
113 struct intel_renderbuffer *srb;
114
115 if (!drb &&
116 (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
117 !srb->mt->stencil_mt &&
118 (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
119 intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
120 drb = srb;
121 }
122
123 if (!drb)
124 return BRW_DEPTHFORMAT_D32_FLOAT;
125
126 return brw_depth_format(brw, drb->mt->format);
127 }
128
129 /**
130 * Returns the mask of how many bits of x and y must be handled through the
131 * depthbuffer's draw offset x and y fields.
132 *
133 * The draw offset x/y field of the depthbuffer packet is unfortunately shared
134 * between the depth, hiz, and stencil buffers. Because it can be hard to get
135 * all 3 to agree on this value, we want to do as much drawing offset
136 * adjustment as possible by moving the base offset of the 3 buffers, which is
137 * restricted to tile boundaries.
138 *
139 * For each buffer, the remainder must be applied through the x/y draw offset.
140 * This returns the worst-case mask of the low bits that have to go into the
141 * packet. If the 3 buffers don't agree on the drawing offset ANDed with this
142 * mask, then we're in trouble.
143 */
144 static void
145 brw_get_depthstencil_tile_masks(struct intel_mipmap_tree *depth_mt,
146 uint32_t depth_level,
147 uint32_t depth_layer,
148 struct intel_mipmap_tree *stencil_mt,
149 uint32_t *out_tile_mask_x,
150 uint32_t *out_tile_mask_y)
151 {
152 uint32_t tile_mask_x = 0, tile_mask_y = 0;
153
154 if (depth_mt) {
155 intel_get_tile_masks(depth_mt->tiling,
156 depth_mt->cpp,
157 &tile_mask_x, &tile_mask_y);
158 assert(!intel_miptree_level_has_hiz(depth_mt, depth_level));
159 }
160
161 if (stencil_mt) {
162 if (stencil_mt->stencil_mt)
163 stencil_mt = stencil_mt->stencil_mt;
164
165 if (stencil_mt->format == MESA_FORMAT_S_UINT8) {
166 /* Separate stencil buffer uses 64x64 tiles. */
167 tile_mask_x |= 63;
168 tile_mask_y |= 63;
169 } else {
170 uint32_t stencil_tile_mask_x, stencil_tile_mask_y;
171 intel_get_tile_masks(stencil_mt->tiling,
172 stencil_mt->cpp,
173 &stencil_tile_mask_x,
174 &stencil_tile_mask_y);
175
176 tile_mask_x |= stencil_tile_mask_x;
177 tile_mask_y |= stencil_tile_mask_y;
178 }
179 }
180
181 *out_tile_mask_x = tile_mask_x;
182 *out_tile_mask_y = tile_mask_y;
183 }
184
185 static struct intel_mipmap_tree *
186 get_stencil_miptree(struct intel_renderbuffer *irb)
187 {
188 if (!irb)
189 return NULL;
190 if (irb->mt->stencil_mt)
191 return irb->mt->stencil_mt;
192 return irb->mt;
193 }
194
195 void
196 brw_workaround_depthstencil_alignment(struct brw_context *brw,
197 GLbitfield clear_mask)
198 {
199 struct gl_context *ctx = &brw->ctx;
200 struct gl_framebuffer *fb = ctx->DrawBuffer;
201 bool rebase_depth = false;
202 bool rebase_stencil = false;
203 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
204 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
205 struct intel_mipmap_tree *depth_mt = NULL;
206 struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
207 uint32_t tile_x = 0, tile_y = 0, stencil_tile_x = 0, stencil_tile_y = 0;
208 uint32_t stencil_draw_x = 0, stencil_draw_y = 0;
209 bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
210 bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
211
212 if (depth_irb)
213 depth_mt = depth_irb->mt;
214
215 /* Initialize brw->depthstencil to 'nop' workaround state.
216 */
217 brw->depthstencil.tile_x = 0;
218 brw->depthstencil.tile_y = 0;
219 brw->depthstencil.depth_offset = 0;
220 brw->depthstencil.depth_mt = NULL;
221 brw->depthstencil.stencil_mt = NULL;
222 if (depth_irb)
223 brw->depthstencil.depth_mt = depth_mt;
224 if (stencil_irb)
225 brw->depthstencil.stencil_mt = get_stencil_miptree(stencil_irb);
226
227 /* Gen6+ doesn't require the workarounds, since we always program the
228 * surface state at the start of the whole surface.
229 */
230 if (brw->gen >= 6)
231 return;
232
233 /* Check if depth buffer is in depth/stencil format. If so, then it's only
234 * safe to invalidate it if we're also clearing stencil, and both depth_irb
235 * and stencil_irb point to the same miptree.
236 *
237 * Note: it's not sufficient to check for the case where
238 * _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL,
239 * because this fails to catch depth/stencil buffers on hardware that uses
240 * separate stencil. To catch that case, we check whether
241 * depth_mt->stencil_mt is non-NULL.
242 */
243 if (depth_irb && invalidate_depth &&
244 (_mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL ||
245 depth_mt->stencil_mt)) {
246 invalidate_depth = invalidate_stencil && depth_irb && stencil_irb
247 && depth_irb->mt == stencil_irb->mt;
248 }
249
250 uint32_t tile_mask_x, tile_mask_y;
251 brw_get_depthstencil_tile_masks(depth_mt,
252 depth_mt ? depth_irb->mt_level : 0,
253 depth_mt ? depth_irb->mt_layer : 0,
254 stencil_mt,
255 &tile_mask_x, &tile_mask_y);
256
257 if (depth_irb) {
258 tile_x = depth_irb->draw_x & tile_mask_x;
259 tile_y = depth_irb->draw_y & tile_mask_y;
260
261 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
262 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
263 * Coordinate Offset X/Y":
264 *
265 * "The 3 LSBs of both offsets must be zero to ensure correct
266 * alignment"
267 */
268 if (tile_x & 7 || tile_y & 7)
269 rebase_depth = true;
270
271 /* We didn't even have intra-tile offsets before g45. */
272 if (!brw->has_surface_tile_offset) {
273 if (tile_x || tile_y)
274 rebase_depth = true;
275 }
276
277 if (rebase_depth) {
278 perf_debug("HW workaround: blitting depth level %d to a temporary "
279 "to fix alignment (depth tile offset %d,%d)\n",
280 depth_irb->mt_level, tile_x, tile_y);
281 intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
282 /* In the case of stencil_irb being the same packed depth/stencil
283 * texture but not the same rb, make it point at our rebased mt, too.
284 */
285 if (stencil_irb &&
286 stencil_irb != depth_irb &&
287 stencil_irb->mt == depth_mt) {
288 intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
289 intel_renderbuffer_set_draw_offset(stencil_irb);
290 }
291
292 stencil_mt = get_stencil_miptree(stencil_irb);
293
294 tile_x = depth_irb->draw_x & tile_mask_x;
295 tile_y = depth_irb->draw_y & tile_mask_y;
296 }
297
298 if (stencil_irb) {
299 assert(stencil_irb->mt == depth_irb->mt);
300 assert(stencil_irb->mt_level == depth_irb->mt_level);
301 assert(stencil_irb->mt_layer == depth_irb->mt_layer);
302 }
303 }
304
305 /* If we have (just) stencil, check it for ignored low bits as well */
306 if (!depth_irb && stencil_irb) {
307 intel_miptree_get_image_offset(stencil_mt,
308 stencil_irb->mt_level,
309 stencil_irb->mt_layer,
310 &stencil_draw_x, &stencil_draw_y);
311 stencil_tile_x = stencil_draw_x & tile_mask_x;
312 stencil_tile_y = stencil_draw_y & tile_mask_y;
313
314 if (stencil_tile_x & 7 || stencil_tile_y & 7)
315 rebase_stencil = true;
316
317 if (!brw->has_surface_tile_offset) {
318 if (stencil_tile_x || stencil_tile_y)
319 rebase_stencil = true;
320 }
321 }
322
323 if (rebase_stencil) {
324 /* If stencil needs rebase, there isn't a depth attachment and the
325 * combined depth-stencil is used for stencil only. Otherwise in case
326 * depth attachment is present both stencil and depth point to the same
327 * miptree. Rebase of depth is considered first updating stencil
328 * attachment accordingly - hence stencil is rebased only if there is no
329 * depth attachment.
330 */
331 assert(!depth_irb);
332 perf_debug("HW workaround: blitting stencil level %d to a temporary "
333 "to fix alignment (stencil tile offset %d,%d)\n",
334 stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
335
336 intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
337 stencil_mt = get_stencil_miptree(stencil_irb);
338
339 intel_miptree_get_image_offset(stencil_mt,
340 stencil_irb->mt_level,
341 stencil_irb->mt_layer,
342 &stencil_draw_x, &stencil_draw_y);
343 stencil_tile_x = stencil_draw_x & tile_mask_x;
344 stencil_tile_y = stencil_draw_y & tile_mask_y;
345 }
346
347 if (!depth_irb) {
348 tile_x = stencil_tile_x;
349 tile_y = stencil_tile_y;
350 }
351
352 /* While we just tried to get everything aligned, we may have failed to do
353 * so in the case of rendering to array or 3D textures, where nonzero faces
354 * will still have an offset post-rebase. At least give an informative
355 * warning.
356 */
357 WARN_ONCE((tile_x & 7) || (tile_y & 7),
358 "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
359 "Truncating offset, bad rendering may occur.\n");
360 tile_x &= ~7;
361 tile_y &= ~7;
362
363 /* Now, after rebasing, save off the new dephtstencil state so the hardware
364 * packets can just dereference that without re-calculating tile offsets.
365 */
366 brw->depthstencil.tile_x = tile_x;
367 brw->depthstencil.tile_y = tile_y;
368 if (depth_irb) {
369 depth_mt = depth_irb->mt;
370 brw->depthstencil.depth_mt = depth_mt;
371 brw->depthstencil.depth_offset =
372 intel_miptree_get_aligned_offset(depth_mt,
373 depth_irb->draw_x & ~tile_mask_x,
374 depth_irb->draw_y & ~tile_mask_y);
375 assert(!intel_renderbuffer_has_hiz(depth_irb));
376 }
377 if (stencil_irb) {
378 stencil_mt = get_stencil_miptree(stencil_irb);
379
380 brw->depthstencil.stencil_mt = stencil_mt;
381 assert(stencil_mt->format != MESA_FORMAT_S_UINT8);
382
383 if (!depth_irb) {
384 brw->depthstencil.depth_offset =
385 intel_miptree_get_aligned_offset(
386 stencil_mt,
387 stencil_irb->draw_x & ~tile_mask_x,
388 stencil_irb->draw_y & ~tile_mask_y);
389 }
390 }
391 }
392
393 void
394 brw_emit_depthbuffer(struct brw_context *brw)
395 {
396 struct gl_context *ctx = &brw->ctx;
397 struct gl_framebuffer *fb = ctx->DrawBuffer;
398 /* _NEW_BUFFERS */
399 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
400 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
401 struct intel_mipmap_tree *depth_mt = brw->depthstencil.depth_mt;
402 struct intel_mipmap_tree *stencil_mt = brw->depthstencil.stencil_mt;
403 uint32_t tile_x = brw->depthstencil.tile_x;
404 uint32_t tile_y = brw->depthstencil.tile_y;
405 bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
406 bool separate_stencil = false;
407 uint32_t depth_surface_type = BRW_SURFACE_NULL;
408 uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
409 uint32_t depth_offset = 0;
410 uint32_t width = 1, height = 1;
411
412 if (stencil_mt) {
413 separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
414
415 /* Gen7 supports only separate stencil */
416 assert(separate_stencil || brw->gen < 7);
417 }
418
419 /* If there's a packed depth/stencil bound to stencil only, we need to
420 * emit the packed depth/stencil buffer packet.
421 */
422 if (!depth_irb && stencil_irb && !separate_stencil) {
423 depth_irb = stencil_irb;
424 depth_mt = stencil_mt;
425 }
426
427 if (depth_irb && depth_mt) {
428 /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
429 * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
430 * depthstencil format.
431 *
432 * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
433 * set to the same value. Gens after 7 implicitly always set
434 * Separate_Stencil_Enable; software cannot disable it.
435 */
436 if ((brw->gen < 7 && hiz) || brw->gen >= 7) {
437 assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
438 }
439
440 /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
441 assert(brw->gen >= 7 || !separate_stencil || hiz);
442
443 assert(brw->gen < 6 || depth_mt->tiling == I915_TILING_Y);
444 assert(!hiz || depth_mt->tiling == I915_TILING_Y);
445
446 depthbuffer_format = brw_depthbuffer_format(brw);
447 depth_surface_type = BRW_SURFACE_2D;
448 depth_offset = brw->depthstencil.depth_offset;
449 width = depth_irb->Base.Base.Width;
450 height = depth_irb->Base.Base.Height;
451 } else if (separate_stencil) {
452 /*
453 * There exists a separate stencil buffer but no depth buffer.
454 *
455 * The stencil buffer inherits most of its fields from
456 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
457 * height.
458 *
459 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
460 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
461 * [DevGT+]: This field must be set to TRUE.
462 */
463 assert(brw->has_separate_stencil);
464
465 depth_surface_type = BRW_SURFACE_2D;
466 width = stencil_irb->Base.Base.Width;
467 height = stencil_irb->Base.Base.Height;
468 }
469
470 if (depth_mt)
471 brw_render_cache_set_check_flush(brw, depth_mt->bo);
472 if (stencil_mt)
473 brw_render_cache_set_check_flush(brw, stencil_mt->bo);
474
475 brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
476 depthbuffer_format, depth_surface_type,
477 stencil_mt, hiz, separate_stencil,
478 width, height, tile_x, tile_y);
479 }
480
481 uint32_t
482 brw_convert_depth_value(mesa_format format, float value)
483 {
484 switch (format) {
485 case MESA_FORMAT_Z_FLOAT32:
486 return float_as_int(value);
487 case MESA_FORMAT_Z_UNORM16:
488 return value * ((1u << 16) - 1);
489 case MESA_FORMAT_Z24_UNORM_X8_UINT:
490 return value * ((1u << 24) - 1);
491 default:
492 unreachable("Invalid depth format");
493 }
494 }
495
496 void
497 brw_emit_depth_stencil_hiz(struct brw_context *brw,
498 struct intel_mipmap_tree *depth_mt,
499 uint32_t depth_offset, uint32_t depthbuffer_format,
500 uint32_t depth_surface_type,
501 struct intel_mipmap_tree *stencil_mt,
502 bool hiz, bool separate_stencil,
503 uint32_t width, uint32_t height,
504 uint32_t tile_x, uint32_t tile_y)
505 {
506 (void)hiz;
507 (void)separate_stencil;
508 (void)stencil_mt;
509
510 assert(!hiz);
511 assert(!separate_stencil);
512
513 const unsigned len = (brw->is_g4x || brw->gen == 5) ? 6 : 5;
514
515 BEGIN_BATCH(len);
516 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
517 OUT_BATCH((depth_mt ? depth_mt->pitch - 1 : 0) |
518 (depthbuffer_format << 18) |
519 (BRW_TILEWALK_YMAJOR << 26) |
520 ((depth_mt ? depth_mt->tiling != I915_TILING_NONE : 1)
521 << 27) |
522 (depth_surface_type << 29));
523
524 if (depth_mt) {
525 OUT_RELOC(depth_mt->bo,
526 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
527 depth_offset);
528 } else {
529 OUT_BATCH(0);
530 }
531
532 OUT_BATCH(((width + tile_x - 1) << 6) |
533 ((height + tile_y - 1) << 19));
534 OUT_BATCH(0);
535
536 if (brw->is_g4x || brw->gen >= 5)
537 OUT_BATCH(tile_x | (tile_y << 16));
538 else
539 assert(tile_x == 0 && tile_y == 0);
540
541 if (brw->gen >= 6)
542 OUT_BATCH(0);
543
544 ADVANCE_BATCH();
545 }
546
547 const struct brw_tracked_state brw_depthbuffer = {
548 .dirty = {
549 .mesa = _NEW_BUFFERS,
550 .brw = BRW_NEW_BATCH |
551 BRW_NEW_BLORP,
552 },
553 .emit = brw_emit_depthbuffer,
554 };
555
556 void
557 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
558 {
559 const bool is_965 = brw->gen == 4 && !brw->is_g4x;
560 const uint32_t _3DSTATE_PIPELINE_SELECT =
561 is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
562
563 if (brw->gen >= 8 && brw->gen < 10) {
564 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
565 *
566 * Software must clear the COLOR_CALC_STATE Valid field in
567 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
568 * with Pipeline Select set to GPGPU.
569 *
570 * The internal hardware docs recommend the same workaround for Gen9
571 * hardware too.
572 */
573 if (pipeline == BRW_COMPUTE_PIPELINE) {
574 BEGIN_BATCH(2);
575 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
576 OUT_BATCH(0);
577 ADVANCE_BATCH();
578
579 brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
580 }
581 }
582
583 if (brw->gen >= 6) {
584 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
585 * PIPELINE_SELECT [DevBWR+]":
586 *
587 * Project: DEVSNB+
588 *
589 * Software must ensure all the write caches are flushed through a
590 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
591 * command to invalidate read only caches prior to programming
592 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
593 */
594 const unsigned dc_flush =
595 brw->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
596
597 brw_emit_pipe_control_flush(brw,
598 PIPE_CONTROL_RENDER_TARGET_FLUSH |
599 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
600 dc_flush |
601 PIPE_CONTROL_NO_WRITE |
602 PIPE_CONTROL_CS_STALL);
603
604 brw_emit_pipe_control_flush(brw,
605 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
606 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
607 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
608 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
609 PIPE_CONTROL_NO_WRITE);
610
611 } else {
612 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
613 * PIPELINE_SELECT [DevBWR+]":
614 *
615 * Project: PRE-DEVSNB
616 *
617 * Software must ensure the current pipeline is flushed via an
618 * MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
619 */
620 BEGIN_BATCH(1);
621 OUT_BATCH(MI_FLUSH);
622 ADVANCE_BATCH();
623 }
624
625 /* Select the pipeline */
626 BEGIN_BATCH(1);
627 OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
628 (brw->gen >= 9 ? (3 << 8) : 0) |
629 (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
630 ADVANCE_BATCH();
631
632 if (brw->gen == 7 && !brw->is_haswell &&
633 pipeline == BRW_RENDER_PIPELINE) {
634 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
635 * PIPELINE_SELECT [DevBWR+]":
636 *
637 * Project: DEVIVB, DEVHSW:GT3:A0
638 *
639 * Software must send a pipe_control with a CS stall and a post sync
640 * operation and then a dummy DRAW after every MI_SET_CONTEXT and
641 * after any PIPELINE_SELECT that is enabling 3D mode.
642 */
643 gen7_emit_cs_stall_flush(brw);
644
645 BEGIN_BATCH(7);
646 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
647 OUT_BATCH(_3DPRIM_POINTLIST);
648 OUT_BATCH(0);
649 OUT_BATCH(0);
650 OUT_BATCH(0);
651 OUT_BATCH(0);
652 OUT_BATCH(0);
653 ADVANCE_BATCH();
654 }
655 }
656
657 /**
658 * Misc invariant state packets
659 */
660 void
661 brw_upload_invariant_state(struct brw_context *brw)
662 {
663 const bool is_965 = brw->gen == 4 && !brw->is_g4x;
664
665 brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
666 brw->last_pipeline = BRW_RENDER_PIPELINE;
667
668 if (brw->gen >= 8) {
669 BEGIN_BATCH(3);
670 OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
671 OUT_BATCH(0);
672 OUT_BATCH(0);
673 ADVANCE_BATCH();
674 } else {
675 BEGIN_BATCH(2);
676 OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
677 OUT_BATCH(0);
678 ADVANCE_BATCH();
679 }
680
681 /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
682 if (!is_965) {
683 BEGIN_BATCH(3);
684 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
685 /* use legacy aa line coverage computation */
686 OUT_BATCH(0);
687 OUT_BATCH(0);
688 ADVANCE_BATCH();
689 }
690
691 const uint32_t _3DSTATE_VF_STATISTICS =
692 is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
693 BEGIN_BATCH(1);
694 OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
695 ADVANCE_BATCH();
696 }
697
698 const struct brw_tracked_state brw_invariant_state = {
699 .dirty = {
700 .mesa = 0,
701 .brw = BRW_NEW_BLORP |
702 BRW_NEW_CONTEXT,
703 },
704 .emit = brw_upload_invariant_state
705 };
706
707 /**
708 * Define the base addresses which some state is referenced from.
709 *
710 * This allows us to avoid having to emit relocations for the objects,
711 * and is actually required for binding table pointers on gen6.
712 *
713 * Surface state base address covers binding table pointers and
714 * surface state objects, but not the surfaces that the surface state
715 * objects point to.
716 */
717 void
718 brw_upload_state_base_address(struct brw_context *brw)
719 {
720 if (brw->batch.state_base_address_emitted)
721 return;
722
723 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
724 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
725 * programmed prior to STATE_BASE_ADDRESS.
726 *
727 * However, given that the instruction SBA (general state base
728 * address) on this chipset is always set to 0 across X and GL,
729 * maybe this isn't required for us in particular.
730 */
731
732 if (brw->gen >= 6) {
733 const unsigned dc_flush =
734 brw->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
735
736 /* Emit a render target cache flush.
737 *
738 * This isn't documented anywhere in the PRM. However, it seems to be
739 * necessary prior to changing the surface state base adress. We've
740 * seen issues in Vulkan where we get GPU hangs when using multi-level
741 * command buffers which clear depth, reset state base address, and then
742 * go render stuff.
743 *
744 * Normally, in GL, we would trust the kernel to do sufficient stalls
745 * and flushes prior to executing our batch. However, it doesn't seem
746 * as if the kernel's flushing is always sufficient and we don't want to
747 * rely on it.
748 *
749 * We make this an end-of-pipe sync instead of a normal flush because we
750 * do not know the current status of the GPU. On Haswell at least,
751 * having a fast-clear operation in flight at the same time as a normal
752 * rendering operation can cause hangs. Since the kernel's flushing is
753 * insufficient, we need to ensure that any rendering operations from
754 * other processes are definitely complete before we try to do our own
755 * rendering. It's a bit of a big hammer but it appears to work.
756 */
757 brw_emit_end_of_pipe_sync(brw,
758 PIPE_CONTROL_RENDER_TARGET_FLUSH |
759 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
760 dc_flush);
761 }
762
763 if (brw->gen >= 8) {
764 uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
765 int pkt_len = brw->gen >= 9 ? 19 : 16;
766
767 BEGIN_BATCH(pkt_len);
768 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
769 /* General state base address: stateless DP read/write requests */
770 OUT_BATCH(mocs_wb << 4 | 1);
771 OUT_BATCH(0);
772 OUT_BATCH(mocs_wb << 16);
773 /* Surface state base address: */
774 OUT_RELOC64(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
775 mocs_wb << 4 | 1);
776 /* Dynamic state base address: */
777 OUT_RELOC64(brw->batch.bo,
778 I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, 0,
779 mocs_wb << 4 | 1);
780 /* Indirect object base address: MEDIA_OBJECT data */
781 OUT_BATCH(mocs_wb << 4 | 1);
782 OUT_BATCH(0);
783 /* Instruction base address: shader kernels (incl. SIP) */
784 OUT_RELOC64(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
785 mocs_wb << 4 | 1);
786
787 /* General state buffer size */
788 OUT_BATCH(0xfffff001);
789 /* Dynamic state buffer size */
790 OUT_BATCH(ALIGN(brw->batch.bo->size, 4096) | 1);
791 /* Indirect object upper bound */
792 OUT_BATCH(0xfffff001);
793 /* Instruction access upper bound */
794 OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
795 if (brw->gen >= 9) {
796 OUT_BATCH(1);
797 OUT_BATCH(0);
798 OUT_BATCH(0);
799 }
800 ADVANCE_BATCH();
801 } else if (brw->gen >= 6) {
802 uint8_t mocs = brw->gen == 7 ? GEN7_MOCS_L3 : 0;
803
804 BEGIN_BATCH(10);
805 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
806 OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
807 mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
808 1); /* General State Base Address Modify Enable */
809 /* Surface state base address:
810 * BINDING_TABLE_STATE
811 * SURFACE_STATE
812 */
813 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
814 /* Dynamic state base address:
815 * SAMPLER_STATE
816 * SAMPLER_BORDER_COLOR_STATE
817 * CLIP, SF, WM/CC viewport state
818 * COLOR_CALC_STATE
819 * DEPTH_STENCIL_STATE
820 * BLEND_STATE
821 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
822 * Disable is clear, which we rely on)
823 */
824 OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
825 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
826
827 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
828 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
829 1); /* Instruction base address: shader kernels (incl. SIP) */
830
831 OUT_BATCH(1); /* General state upper bound */
832 /* Dynamic state upper bound. Although the documentation says that
833 * programming it to zero will cause it to be ignored, that is a lie.
834 * If this isn't programmed to a real bound, the sampler border color
835 * pointer is rejected, causing border color to mysteriously fail.
836 */
837 OUT_BATCH(0xfffff001);
838 OUT_BATCH(1); /* Indirect object upper bound */
839 OUT_BATCH(1); /* Instruction access upper bound */
840 ADVANCE_BATCH();
841 } else if (brw->gen == 5) {
842 BEGIN_BATCH(8);
843 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
844 OUT_BATCH(1); /* General state base address */
845 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
846 1); /* Surface state base address */
847 OUT_BATCH(1); /* Indirect object base address */
848 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
849 1); /* Instruction base address */
850 OUT_BATCH(0xfffff001); /* General state upper bound */
851 OUT_BATCH(1); /* Indirect object upper bound */
852 OUT_BATCH(1); /* Instruction access upper bound */
853 ADVANCE_BATCH();
854 } else {
855 BEGIN_BATCH(6);
856 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
857 OUT_BATCH(1); /* General state base address */
858 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
859 1); /* Surface state base address */
860 OUT_BATCH(1); /* Indirect object base address */
861 OUT_BATCH(1); /* General state upper bound */
862 OUT_BATCH(1); /* Indirect object upper bound */
863 ADVANCE_BATCH();
864 }
865
866 if (brw->gen >= 6) {
867 brw_emit_pipe_control_flush(brw,
868 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
869 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
870 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
871 }
872
873 /* According to section 3.6.1 of VOL1 of the 965 PRM,
874 * STATE_BASE_ADDRESS updates require a reissue of:
875 *
876 * 3DSTATE_PIPELINE_POINTERS
877 * 3DSTATE_BINDING_TABLE_POINTERS
878 * MEDIA_STATE_POINTERS
879 *
880 * and this continues through Ironlake. The Sandy Bridge PRM, vol
881 * 1 part 1 says that the folowing packets must be reissued:
882 *
883 * 3DSTATE_CC_POINTERS
884 * 3DSTATE_BINDING_TABLE_POINTERS
885 * 3DSTATE_SAMPLER_STATE_POINTERS
886 * 3DSTATE_VIEWPORT_STATE_POINTERS
887 * MEDIA_STATE_POINTERS
888 *
889 * Those are always reissued following SBA updates anyway (new
890 * batch time), except in the case of the program cache BO
891 * changing. Having a separate state flag makes the sequence more
892 * obvious.
893 */
894
895 brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
896 brw->batch.state_base_address_emitted = true;
897 }