i965: Move brw_emit_depth_stencil_hiz higher up in the file
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
37
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
41 #include "compiler/brw_eu_defines.h"
42
43 #include "main/framebuffer.h"
44 #include "main/fbobject.h"
45 #include "main/format_utils.h"
46 #include "main/glformats.h"
47
48 /**
49 * Upload pointers to the per-stage state.
50 *
51 * The state pointers in this packet are all relative to the general state
52 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
53 */
54 static void
55 upload_pipelined_state_pointers(struct brw_context *brw)
56 {
57 const struct gen_device_info *devinfo = &brw->screen->devinfo;
58
59 if (devinfo->gen == 5) {
60 /* Need to flush before changing clip max threads for errata. */
61 BEGIN_BATCH(1);
62 OUT_BATCH(MI_FLUSH);
63 ADVANCE_BATCH();
64 }
65
66 BEGIN_BATCH(7);
67 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
68 OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
69 if (brw->ff_gs.prog_active)
70 OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
71 else
72 OUT_BATCH(0);
73 OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
74 OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
75 OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
76 OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
77 ADVANCE_BATCH();
78
79 brw->ctx.NewDriverState |= BRW_NEW_PSP;
80 }
81
82 static void
83 upload_psp_urb_cbs(struct brw_context *brw)
84 {
85 upload_pipelined_state_pointers(brw);
86 brw_upload_urb_fence(brw);
87 brw_upload_cs_urb_state(brw);
88 }
89
90 const struct brw_tracked_state brw_psp_urb_cbs = {
91 .dirty = {
92 .mesa = 0,
93 .brw = BRW_NEW_BATCH |
94 BRW_NEW_BLORP |
95 BRW_NEW_FF_GS_PROG_DATA |
96 BRW_NEW_GEN4_UNIT_STATE |
97 BRW_NEW_STATE_BASE_ADDRESS |
98 BRW_NEW_URB_FENCE,
99 },
100 .emit = upload_psp_urb_cbs,
101 };
102
103 uint32_t
104 brw_depthbuffer_format(struct brw_context *brw)
105 {
106 struct gl_context *ctx = &brw->ctx;
107 struct gl_framebuffer *fb = ctx->DrawBuffer;
108 struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
109 struct intel_renderbuffer *srb;
110
111 if (!drb &&
112 (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
113 !srb->mt->stencil_mt &&
114 (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
115 intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
116 drb = srb;
117 }
118
119 if (!drb)
120 return BRW_DEPTHFORMAT_D32_FLOAT;
121
122 return brw_depth_format(brw, drb->mt->format);
123 }
124
125 static struct intel_mipmap_tree *
126 get_stencil_miptree(struct intel_renderbuffer *irb)
127 {
128 if (!irb)
129 return NULL;
130 if (irb->mt->stencil_mt)
131 return irb->mt->stencil_mt;
132 return intel_renderbuffer_get_mt(irb);
133 }
134
135 static bool
136 rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
137 bool invalidate)
138 {
139 const struct gen_device_info *devinfo = &brw->screen->devinfo;
140 struct gl_context *ctx = &brw->ctx;
141 uint32_t tile_mask_x = 0, tile_mask_y = 0;
142
143 intel_get_tile_masks(irb->mt->surf.tiling, irb->mt->cpp,
144 &tile_mask_x, &tile_mask_y);
145 assert(!intel_miptree_level_has_hiz(irb->mt, irb->mt_level));
146
147 uint32_t tile_x = irb->draw_x & tile_mask_x;
148 uint32_t tile_y = irb->draw_y & tile_mask_y;
149
150 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
151 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
152 * Coordinate Offset X/Y":
153 *
154 * "The 3 LSBs of both offsets must be zero to ensure correct
155 * alignment"
156 */
157 bool rebase = tile_x & 7 || tile_y & 7;
158
159 /* We didn't even have intra-tile offsets before g45. */
160 rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
161
162 if (rebase) {
163 perf_debug("HW workaround: blitting depth level %d to a temporary "
164 "to fix alignment (depth tile offset %d,%d)\n",
165 irb->mt_level, tile_x, tile_y);
166 intel_renderbuffer_move_to_temp(brw, irb, invalidate);
167
168 /* There is now only single slice miptree. */
169 brw->depthstencil.tile_x = 0;
170 brw->depthstencil.tile_y = 0;
171 brw->depthstencil.depth_offset = 0;
172 return true;
173 }
174
175 /* While we just tried to get everything aligned, we may have failed to do
176 * so in the case of rendering to array or 3D textures, where nonzero faces
177 * will still have an offset post-rebase. At least give an informative
178 * warning.
179 */
180 WARN_ONCE((tile_x & 7) || (tile_y & 7),
181 "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
182 "Truncating offset (%u:%u), bad rendering may occur.\n",
183 tile_x, tile_y);
184 tile_x &= ~7;
185 tile_y &= ~7;
186
187 brw->depthstencil.tile_x = tile_x;
188 brw->depthstencil.tile_y = tile_y;
189 brw->depthstencil.depth_offset = intel_miptree_get_aligned_offset(
190 irb->mt,
191 irb->draw_x & ~tile_mask_x,
192 irb->draw_y & ~tile_mask_y);
193
194 return false;
195 }
196
197 void
198 brw_workaround_depthstencil_alignment(struct brw_context *brw,
199 GLbitfield clear_mask)
200 {
201 const struct gen_device_info *devinfo = &brw->screen->devinfo;
202 struct gl_context *ctx = &brw->ctx;
203 struct gl_framebuffer *fb = ctx->DrawBuffer;
204 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
205 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
206 struct intel_mipmap_tree *depth_mt = NULL;
207 bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
208 bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
209
210 if (depth_irb)
211 depth_mt = depth_irb->mt;
212
213 /* Initialize brw->depthstencil to 'nop' workaround state.
214 */
215 brw->depthstencil.tile_x = 0;
216 brw->depthstencil.tile_y = 0;
217 brw->depthstencil.depth_offset = 0;
218
219 /* Gen6+ doesn't require the workarounds, since we always program the
220 * surface state at the start of the whole surface.
221 */
222 if (devinfo->gen >= 6)
223 return;
224
225 /* Check if depth buffer is in depth/stencil format. If so, then it's only
226 * safe to invalidate it if we're also clearing stencil.
227 */
228 if (depth_irb && invalidate_depth &&
229 _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
230 invalidate_depth = invalidate_stencil && stencil_irb;
231
232 if (depth_irb) {
233 if (rebase_depth_stencil(brw, depth_irb, invalidate_depth)) {
234 /* In the case of stencil_irb being the same packed depth/stencil
235 * texture but not the same rb, make it point at our rebased mt, too.
236 */
237 if (stencil_irb &&
238 stencil_irb != depth_irb &&
239 stencil_irb->mt == depth_mt) {
240 intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
241 intel_renderbuffer_set_draw_offset(stencil_irb);
242 }
243 }
244
245 if (stencil_irb) {
246 assert(stencil_irb->mt == depth_irb->mt);
247 assert(stencil_irb->mt_level == depth_irb->mt_level);
248 assert(stencil_irb->mt_layer == depth_irb->mt_layer);
249 }
250 }
251
252 /* If there is no depth attachment, consider if stencil needs rebase. */
253 if (!depth_irb && stencil_irb)
254 rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
255 }
256
257 static void
258 brw_emit_depth_stencil_hiz(struct brw_context *brw,
259 struct intel_mipmap_tree *depth_mt,
260 uint32_t depth_offset, uint32_t depthbuffer_format,
261 uint32_t depth_surface_type,
262 struct intel_mipmap_tree *stencil_mt,
263 bool hiz, bool separate_stencil,
264 uint32_t width, uint32_t height,
265 uint32_t tile_x, uint32_t tile_y)
266 {
267 (void)hiz;
268 (void)separate_stencil;
269 (void)stencil_mt;
270
271 assert(!hiz);
272 assert(!separate_stencil);
273
274 const struct gen_device_info *devinfo = &brw->screen->devinfo;
275 const unsigned len = (devinfo->is_g4x || devinfo->gen == 5) ? 6 : 5;
276
277 BEGIN_BATCH(len);
278 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
279 OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch - 1 : 0) |
280 (depthbuffer_format << 18) |
281 (BRW_TILEWALK_YMAJOR << 26) |
282 (1 << 27) |
283 (depth_surface_type << 29));
284
285 if (depth_mt) {
286 OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
287 } else {
288 OUT_BATCH(0);
289 }
290
291 OUT_BATCH(((width + tile_x - 1) << 6) |
292 ((height + tile_y - 1) << 19));
293 OUT_BATCH(0);
294
295 if (devinfo->is_g4x || devinfo->gen >= 5)
296 OUT_BATCH(tile_x | (tile_y << 16));
297 else
298 assert(tile_x == 0 && tile_y == 0);
299
300 if (devinfo->gen >= 6)
301 OUT_BATCH(0);
302
303 ADVANCE_BATCH();
304 }
305
306 void
307 brw_emit_depthbuffer(struct brw_context *brw)
308 {
309 const struct gen_device_info *devinfo = &brw->screen->devinfo;
310 struct gl_context *ctx = &brw->ctx;
311 struct gl_framebuffer *fb = ctx->DrawBuffer;
312 /* _NEW_BUFFERS */
313 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
314 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
315 struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
316 struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
317 uint32_t tile_x = brw->depthstencil.tile_x;
318 uint32_t tile_y = brw->depthstencil.tile_y;
319 bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
320 bool separate_stencil = false;
321 uint32_t depth_surface_type = BRW_SURFACE_NULL;
322 uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
323 uint32_t depth_offset = 0;
324 uint32_t width = 1, height = 1;
325
326 if (stencil_mt) {
327 separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
328
329 /* Gen7 supports only separate stencil */
330 assert(separate_stencil || devinfo->gen < 7);
331 }
332
333 /* If there's a packed depth/stencil bound to stencil only, we need to
334 * emit the packed depth/stencil buffer packet.
335 */
336 if (!depth_irb && stencil_irb && !separate_stencil) {
337 depth_irb = stencil_irb;
338 depth_mt = stencil_mt;
339 }
340
341 if (depth_irb && depth_mt) {
342 /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
343 * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
344 * depthstencil format.
345 *
346 * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
347 * set to the same value. Gens after 7 implicitly always set
348 * Separate_Stencil_Enable; software cannot disable it.
349 */
350 if ((devinfo->gen < 7 && hiz) || devinfo->gen >= 7) {
351 assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
352 }
353
354 /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
355 assert(devinfo->gen >= 7 || !separate_stencil || hiz);
356
357 assert(devinfo->gen < 6 || depth_mt->surf.tiling == ISL_TILING_Y0);
358 assert(!hiz || depth_mt->surf.tiling == ISL_TILING_Y0);
359
360 depthbuffer_format = brw_depthbuffer_format(brw);
361 depth_surface_type = BRW_SURFACE_2D;
362 depth_offset = brw->depthstencil.depth_offset;
363 width = depth_irb->Base.Base.Width;
364 height = depth_irb->Base.Base.Height;
365 } else if (separate_stencil) {
366 /*
367 * There exists a separate stencil buffer but no depth buffer.
368 *
369 * The stencil buffer inherits most of its fields from
370 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
371 * height.
372 *
373 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
374 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
375 * [DevGT+]: This field must be set to TRUE.
376 */
377 assert(brw->has_separate_stencil);
378
379 depth_surface_type = BRW_SURFACE_2D;
380 width = stencil_irb->Base.Base.Width;
381 height = stencil_irb->Base.Base.Height;
382 }
383
384 if (depth_mt)
385 brw_cache_flush_for_depth(brw, depth_mt->bo);
386 if (stencil_mt)
387 brw_cache_flush_for_depth(brw, stencil_mt->bo);
388
389 if (devinfo->gen < 6) {
390 brw_emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
391 depthbuffer_format, depth_surface_type,
392 stencil_mt, hiz, separate_stencil,
393 width, height, tile_x, tile_y);
394 return;
395 }
396
397 /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
398 if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
399 assert(brw->hw_ctx);
400 return;
401 }
402
403 brw_emit_depth_stall_flushes(brw);
404
405 const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
406 intel_batchbuffer_begin(brw, ds_dwords, RENDER_RING);
407 uint32_t *ds_map = brw->batch.map_next;
408 const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
409
410 struct isl_view view = {
411 /* Some nice defaults */
412 .base_level = 0,
413 .levels = 1,
414 .base_array_layer = 0,
415 .array_len = 1,
416 .swizzle = ISL_SWIZZLE_IDENTITY,
417 };
418
419 struct isl_depth_stencil_hiz_emit_info info = {
420 .view = &view,
421 };
422
423 if (depth_mt) {
424 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
425 info.depth_surf = &depth_mt->surf;
426
427 info.depth_address =
428 brw_batch_reloc(&brw->batch,
429 ds_offset + brw->isl_dev.ds.depth_offset,
430 depth_mt->bo, depth_mt->offset, RELOC_WRITE);
431
432 info.mocs = brw_get_bo_mocs(devinfo, depth_mt->bo);
433 view.base_level = depth_irb->mt_level - depth_irb->mt->first_level;
434 view.base_array_layer = depth_irb->mt_layer;
435 view.array_len = MAX2(depth_irb->layer_count, 1);
436 view.format = depth_mt->surf.format;
437
438 info.hiz_usage = depth_mt->aux_usage;
439 if (!intel_renderbuffer_has_hiz(depth_irb)) {
440 /* Just because a miptree has ISL_AUX_USAGE_HIZ does not mean that
441 * all miplevels of that miptree are guaranteed to support HiZ. See
442 * intel_miptree_level_enable_hiz for details.
443 */
444 info.hiz_usage = ISL_AUX_USAGE_NONE;
445 }
446
447 if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
448 info.hiz_surf = &depth_mt->aux_buf->surf;
449
450 uint32_t hiz_offset = 0;
451 if (devinfo->gen == 6) {
452 /* HiZ surfaces on Sandy Bridge technically don't support
453 * mip-mapping. However, we can fake it by offsetting to the
454 * first slice of LOD0 in the HiZ surface.
455 */
456 isl_surf_get_image_offset_B_tile_sa(&depth_mt->aux_buf->surf,
457 view.base_level, 0, 0,
458 &hiz_offset, NULL, NULL);
459 }
460
461 info.hiz_address =
462 brw_batch_reloc(&brw->batch,
463 ds_offset + brw->isl_dev.ds.hiz_offset,
464 depth_mt->aux_buf->bo,
465 depth_mt->aux_buf->offset + hiz_offset,
466 RELOC_WRITE);
467 }
468
469 info.depth_clear_value = depth_mt->fast_clear_color.f32[0];
470 }
471
472 if (stencil_mt) {
473 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
474 info.stencil_surf = &stencil_mt->surf;
475
476 if (!depth_mt) {
477 info.mocs = brw_get_bo_mocs(devinfo, stencil_mt->bo);
478 view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
479 view.base_array_layer = stencil_irb->mt_layer;
480 view.array_len = MAX2(stencil_irb->layer_count, 1);
481 view.format = stencil_mt->surf.format;
482 }
483
484 uint32_t stencil_offset = 0;
485 if (devinfo->gen == 6) {
486 /* Stencil surfaces on Sandy Bridge technically don't support
487 * mip-mapping. However, we can fake it by offsetting to the
488 * first slice of LOD0 in the stencil surface.
489 */
490 isl_surf_get_image_offset_B_tile_sa(&stencil_mt->surf,
491 view.base_level, 0, 0,
492 &stencil_offset, NULL, NULL);
493 }
494
495 info.stencil_address =
496 brw_batch_reloc(&brw->batch,
497 ds_offset + brw->isl_dev.ds.stencil_offset,
498 stencil_mt->bo,
499 stencil_mt->offset + stencil_offset,
500 RELOC_WRITE);
501 }
502
503 isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
504
505 brw->batch.map_next += ds_dwords;
506 intel_batchbuffer_advance(brw);
507
508 brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
509 }
510
511 const struct brw_tracked_state brw_depthbuffer = {
512 .dirty = {
513 .mesa = _NEW_BUFFERS,
514 .brw = BRW_NEW_AUX_STATE |
515 BRW_NEW_BATCH |
516 BRW_NEW_BLORP,
517 },
518 .emit = brw_emit_depthbuffer,
519 };
520
521 void
522 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
523 {
524 const struct gen_device_info *devinfo = &brw->screen->devinfo;
525 const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
526 const uint32_t _3DSTATE_PIPELINE_SELECT =
527 is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
528
529 if (devinfo->gen >= 8 && devinfo->gen < 10) {
530 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
531 *
532 * Software must clear the COLOR_CALC_STATE Valid field in
533 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
534 * with Pipeline Select set to GPGPU.
535 *
536 * The internal hardware docs recommend the same workaround for Gen9
537 * hardware too.
538 */
539 if (pipeline == BRW_COMPUTE_PIPELINE) {
540 BEGIN_BATCH(2);
541 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
542 OUT_BATCH(0);
543 ADVANCE_BATCH();
544
545 brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
546 }
547 }
548
549 if (devinfo->gen >= 6) {
550 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
551 * PIPELINE_SELECT [DevBWR+]":
552 *
553 * Project: DEVSNB+
554 *
555 * Software must ensure all the write caches are flushed through a
556 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
557 * command to invalidate read only caches prior to programming
558 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
559 */
560 const unsigned dc_flush =
561 devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
562
563 brw_emit_pipe_control_flush(brw,
564 PIPE_CONTROL_RENDER_TARGET_FLUSH |
565 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
566 dc_flush |
567 PIPE_CONTROL_CS_STALL);
568
569 brw_emit_pipe_control_flush(brw,
570 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
571 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
572 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
573 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
574
575 } else {
576 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
577 * PIPELINE_SELECT [DevBWR+]":
578 *
579 * Project: PRE-DEVSNB
580 *
581 * Software must ensure the current pipeline is flushed via an
582 * MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
583 */
584 BEGIN_BATCH(1);
585 OUT_BATCH(MI_FLUSH);
586 ADVANCE_BATCH();
587 }
588
589 /* Select the pipeline */
590 BEGIN_BATCH(1);
591 OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
592 (devinfo->gen >= 9 ? (3 << 8) : 0) |
593 (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
594 ADVANCE_BATCH();
595
596 if (devinfo->gen == 7 && !devinfo->is_haswell &&
597 pipeline == BRW_RENDER_PIPELINE) {
598 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
599 * PIPELINE_SELECT [DevBWR+]":
600 *
601 * Project: DEVIVB, DEVHSW:GT3:A0
602 *
603 * Software must send a pipe_control with a CS stall and a post sync
604 * operation and then a dummy DRAW after every MI_SET_CONTEXT and
605 * after any PIPELINE_SELECT that is enabling 3D mode.
606 */
607 gen7_emit_cs_stall_flush(brw);
608
609 BEGIN_BATCH(7);
610 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
611 OUT_BATCH(_3DPRIM_POINTLIST);
612 OUT_BATCH(0);
613 OUT_BATCH(0);
614 OUT_BATCH(0);
615 OUT_BATCH(0);
616 OUT_BATCH(0);
617 ADVANCE_BATCH();
618 }
619
620 if (devinfo->is_geminilake) {
621 /* Project: DevGLK
622 *
623 * "This chicken bit works around a hardware issue with barrier logic
624 * encountered when switching between GPGPU and 3D pipelines. To
625 * workaround the issue, this mode bit should be set after a pipeline
626 * is selected."
627 */
628 const unsigned barrier_mode =
629 pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
630 : GLK_SCEC_BARRIER_MODE_GPGPU;
631 brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
632 barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
633 }
634 }
635
636 /**
637 * Misc invariant state packets
638 */
639 void
640 brw_upload_invariant_state(struct brw_context *brw)
641 {
642 const struct gen_device_info *devinfo = &brw->screen->devinfo;
643 const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
644
645 brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
646 brw->last_pipeline = BRW_RENDER_PIPELINE;
647
648 if (devinfo->gen >= 8) {
649 BEGIN_BATCH(3);
650 OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
651 OUT_BATCH(0);
652 OUT_BATCH(0);
653 ADVANCE_BATCH();
654 } else {
655 BEGIN_BATCH(2);
656 OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
657 OUT_BATCH(0);
658 ADVANCE_BATCH();
659 }
660
661 /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
662 if (!is_965) {
663 BEGIN_BATCH(3);
664 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
665 /* use legacy aa line coverage computation */
666 OUT_BATCH(0);
667 OUT_BATCH(0);
668 ADVANCE_BATCH();
669 }
670
671 const uint32_t _3DSTATE_VF_STATISTICS =
672 is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
673 BEGIN_BATCH(1);
674 OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
675 ADVANCE_BATCH();
676 }
677
678 /**
679 * Define the base addresses which some state is referenced from.
680 *
681 * This allows us to avoid having to emit relocations for the objects,
682 * and is actually required for binding table pointers on gen6.
683 *
684 * Surface state base address covers binding table pointers and
685 * surface state objects, but not the surfaces that the surface state
686 * objects point to.
687 */
688 void
689 brw_upload_state_base_address(struct brw_context *brw)
690 {
691 const struct gen_device_info *devinfo = &brw->screen->devinfo;
692
693 if (brw->batch.state_base_address_emitted)
694 return;
695
696 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
697 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
698 * programmed prior to STATE_BASE_ADDRESS.
699 *
700 * However, given that the instruction SBA (general state base
701 * address) on this chipset is always set to 0 across X and GL,
702 * maybe this isn't required for us in particular.
703 */
704
705 if (devinfo->gen >= 6) {
706 const unsigned dc_flush =
707 devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
708
709 /* Emit a render target cache flush.
710 *
711 * This isn't documented anywhere in the PRM. However, it seems to be
712 * necessary prior to changing the surface state base adress. We've
713 * seen issues in Vulkan where we get GPU hangs when using multi-level
714 * command buffers which clear depth, reset state base address, and then
715 * go render stuff.
716 *
717 * Normally, in GL, we would trust the kernel to do sufficient stalls
718 * and flushes prior to executing our batch. However, it doesn't seem
719 * as if the kernel's flushing is always sufficient and we don't want to
720 * rely on it.
721 *
722 * We make this an end-of-pipe sync instead of a normal flush because we
723 * do not know the current status of the GPU. On Haswell at least,
724 * having a fast-clear operation in flight at the same time as a normal
725 * rendering operation can cause hangs. Since the kernel's flushing is
726 * insufficient, we need to ensure that any rendering operations from
727 * other processes are definitely complete before we try to do our own
728 * rendering. It's a bit of a big hammer but it appears to work.
729 */
730 brw_emit_end_of_pipe_sync(brw,
731 PIPE_CONTROL_RENDER_TARGET_FLUSH |
732 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
733 dc_flush);
734 }
735
736 if (devinfo->gen >= 8) {
737 /* STATE_BASE_ADDRESS has issues with 48-bit address spaces. If the
738 * address + size as seen by STATE_BASE_ADDRESS overflows 48 bits,
739 * the GPU appears to treat all accesses to the buffer as being out
740 * of bounds and returns zero. To work around this, we pin all SBAs
741 * to the bottom 4GB.
742 */
743 uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
744 int pkt_len = devinfo->gen >= 9 ? 19 : 16;
745
746 BEGIN_BATCH(pkt_len);
747 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
748 /* General state base address: stateless DP read/write requests */
749 OUT_BATCH(mocs_wb << 4 | 1);
750 OUT_BATCH(0);
751 OUT_BATCH(mocs_wb << 16);
752 /* Surface state base address: */
753 OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
754 /* Dynamic state base address: */
755 OUT_RELOC64(brw->batch.state.bo, RELOC_32BIT, mocs_wb << 4 | 1);
756 /* Indirect object base address: MEDIA_OBJECT data */
757 OUT_BATCH(mocs_wb << 4 | 1);
758 OUT_BATCH(0);
759 /* Instruction base address: shader kernels (incl. SIP) */
760 OUT_RELOC64(brw->cache.bo, RELOC_32BIT, mocs_wb << 4 | 1);
761 /* General state buffer size */
762 OUT_BATCH(0xfffff001);
763 /* Dynamic state buffer size */
764 OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
765 /* Indirect object upper bound */
766 OUT_BATCH(0xfffff001);
767 /* Instruction access upper bound */
768 OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
769 if (devinfo->gen >= 9) {
770 OUT_BATCH(1);
771 OUT_BATCH(0);
772 OUT_BATCH(0);
773 }
774 ADVANCE_BATCH();
775 } else if (devinfo->gen >= 6) {
776 uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
777
778 BEGIN_BATCH(10);
779 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
780 OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
781 mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
782 1); /* General State Base Address Modify Enable */
783 /* Surface state base address:
784 * BINDING_TABLE_STATE
785 * SURFACE_STATE
786 */
787 OUT_RELOC(brw->batch.state.bo, 0, 1);
788 /* Dynamic state base address:
789 * SAMPLER_STATE
790 * SAMPLER_BORDER_COLOR_STATE
791 * CLIP, SF, WM/CC viewport state
792 * COLOR_CALC_STATE
793 * DEPTH_STENCIL_STATE
794 * BLEND_STATE
795 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
796 * Disable is clear, which we rely on)
797 */
798 OUT_RELOC(brw->batch.state.bo, 0, 1);
799
800 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
801
802 /* Instruction base address: shader kernels (incl. SIP) */
803 OUT_RELOC(brw->cache.bo, 0, 1);
804
805 OUT_BATCH(1); /* General state upper bound */
806 /* Dynamic state upper bound. Although the documentation says that
807 * programming it to zero will cause it to be ignored, that is a lie.
808 * If this isn't programmed to a real bound, the sampler border color
809 * pointer is rejected, causing border color to mysteriously fail.
810 */
811 OUT_BATCH(0xfffff001);
812 OUT_BATCH(1); /* Indirect object upper bound */
813 OUT_BATCH(1); /* Instruction access upper bound */
814 ADVANCE_BATCH();
815 } else if (devinfo->gen == 5) {
816 BEGIN_BATCH(8);
817 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
818 OUT_BATCH(1); /* General state base address */
819 OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
820 OUT_BATCH(1); /* Indirect object base address */
821 OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
822 OUT_BATCH(0xfffff001); /* General state upper bound */
823 OUT_BATCH(1); /* Indirect object upper bound */
824 OUT_BATCH(1); /* Instruction access upper bound */
825 ADVANCE_BATCH();
826 } else {
827 BEGIN_BATCH(6);
828 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
829 OUT_BATCH(1); /* General state base address */
830 OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
831 OUT_BATCH(1); /* Indirect object base address */
832 OUT_BATCH(1); /* General state upper bound */
833 OUT_BATCH(1); /* Indirect object upper bound */
834 ADVANCE_BATCH();
835 }
836
837 if (devinfo->gen >= 6) {
838 brw_emit_pipe_control_flush(brw,
839 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
840 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
841 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
842 }
843
844 /* According to section 3.6.1 of VOL1 of the 965 PRM,
845 * STATE_BASE_ADDRESS updates require a reissue of:
846 *
847 * 3DSTATE_PIPELINE_POINTERS
848 * 3DSTATE_BINDING_TABLE_POINTERS
849 * MEDIA_STATE_POINTERS
850 *
851 * and this continues through Ironlake. The Sandy Bridge PRM, vol
852 * 1 part 1 says that the folowing packets must be reissued:
853 *
854 * 3DSTATE_CC_POINTERS
855 * 3DSTATE_BINDING_TABLE_POINTERS
856 * 3DSTATE_SAMPLER_STATE_POINTERS
857 * 3DSTATE_VIEWPORT_STATE_POINTERS
858 * MEDIA_STATE_POINTERS
859 *
860 * Those are always reissued following SBA updates anyway (new
861 * batch time), except in the case of the program cache BO
862 * changing. Having a separate state flag makes the sequence more
863 * obvious.
864 */
865
866 brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
867 brw->batch.state_base_address_emitted = true;
868 }