i965: drop brw->gen in favor of devinfo->gen
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
37
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
41 #include "compiler/brw_eu_defines.h"
42
43 #include "main/framebuffer.h"
44 #include "main/fbobject.h"
45 #include "main/format_utils.h"
46 #include "main/glformats.h"
47
48 /**
49 * Upload pointers to the per-stage state.
50 *
51 * The state pointers in this packet are all relative to the general state
52 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
53 */
54 static void
55 upload_pipelined_state_pointers(struct brw_context *brw)
56 {
57 const struct gen_device_info *devinfo = &brw->screen->devinfo;
58
59 if (devinfo->gen == 5) {
60 /* Need to flush before changing clip max threads for errata. */
61 BEGIN_BATCH(1);
62 OUT_BATCH(MI_FLUSH);
63 ADVANCE_BATCH();
64 }
65
66 BEGIN_BATCH(7);
67 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
68 OUT_RELOC(brw->batch.bo, 0, brw->vs.base.state_offset);
69 if (brw->ff_gs.prog_active)
70 OUT_RELOC(brw->batch.bo, 0, brw->ff_gs.state_offset | 1);
71 else
72 OUT_BATCH(0);
73 OUT_RELOC(brw->batch.bo, 0, brw->clip.state_offset | 1);
74 OUT_RELOC(brw->batch.bo, 0, brw->sf.state_offset);
75 OUT_RELOC(brw->batch.bo, 0, brw->wm.base.state_offset);
76 OUT_RELOC(brw->batch.bo, 0, brw->cc.state_offset);
77 ADVANCE_BATCH();
78
79 brw->ctx.NewDriverState |= BRW_NEW_PSP;
80 }
81
82 static void
83 upload_psp_urb_cbs(struct brw_context *brw)
84 {
85 upload_pipelined_state_pointers(brw);
86 brw_upload_urb_fence(brw);
87 brw_upload_cs_urb_state(brw);
88 }
89
90 const struct brw_tracked_state brw_psp_urb_cbs = {
91 .dirty = {
92 .mesa = 0,
93 .brw = BRW_NEW_BATCH |
94 BRW_NEW_BLORP |
95 BRW_NEW_FF_GS_PROG_DATA |
96 BRW_NEW_GEN4_UNIT_STATE |
97 BRW_NEW_STATE_BASE_ADDRESS |
98 BRW_NEW_URB_FENCE,
99 },
100 .emit = upload_psp_urb_cbs,
101 };
102
103 uint32_t
104 brw_depthbuffer_format(struct brw_context *brw)
105 {
106 struct gl_context *ctx = &brw->ctx;
107 struct gl_framebuffer *fb = ctx->DrawBuffer;
108 struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
109 struct intel_renderbuffer *srb;
110
111 if (!drb &&
112 (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
113 !srb->mt->stencil_mt &&
114 (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
115 intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
116 drb = srb;
117 }
118
119 if (!drb)
120 return BRW_DEPTHFORMAT_D32_FLOAT;
121
122 return brw_depth_format(brw, drb->mt->format);
123 }
124
125 static struct intel_mipmap_tree *
126 get_stencil_miptree(struct intel_renderbuffer *irb)
127 {
128 if (!irb)
129 return NULL;
130 if (irb->mt->stencil_mt)
131 return irb->mt->stencil_mt;
132 return intel_renderbuffer_get_mt(irb);
133 }
134
135 static bool
136 rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
137 bool invalidate)
138 {
139 struct gl_context *ctx = &brw->ctx;
140 uint32_t tile_mask_x = 0, tile_mask_y = 0;
141
142 intel_get_tile_masks(irb->mt->surf.tiling, irb->mt->cpp,
143 &tile_mask_x, &tile_mask_y);
144 assert(!intel_miptree_level_has_hiz(irb->mt, irb->mt_level));
145
146 uint32_t tile_x = irb->draw_x & tile_mask_x;
147 uint32_t tile_y = irb->draw_y & tile_mask_y;
148
149 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
150 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
151 * Coordinate Offset X/Y":
152 *
153 * "The 3 LSBs of both offsets must be zero to ensure correct
154 * alignment"
155 */
156 bool rebase = tile_x & 7 || tile_y & 7;
157
158 /* We didn't even have intra-tile offsets before g45. */
159 rebase |= (!brw->has_surface_tile_offset && (tile_x || tile_y));
160
161 if (rebase) {
162 perf_debug("HW workaround: blitting depth level %d to a temporary "
163 "to fix alignment (depth tile offset %d,%d)\n",
164 irb->mt_level, tile_x, tile_y);
165 intel_renderbuffer_move_to_temp(brw, irb, invalidate);
166
167 /* There is now only single slice miptree. */
168 brw->depthstencil.tile_x = 0;
169 brw->depthstencil.tile_y = 0;
170 brw->depthstencil.depth_offset = 0;
171 return true;
172 }
173
174 /* While we just tried to get everything aligned, we may have failed to do
175 * so in the case of rendering to array or 3D textures, where nonzero faces
176 * will still have an offset post-rebase. At least give an informative
177 * warning.
178 */
179 WARN_ONCE((tile_x & 7) || (tile_y & 7),
180 "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
181 "Truncating offset (%u:%u), bad rendering may occur.\n",
182 tile_x, tile_y);
183 tile_x &= ~7;
184 tile_y &= ~7;
185
186 brw->depthstencil.tile_x = tile_x;
187 brw->depthstencil.tile_y = tile_y;
188 brw->depthstencil.depth_offset = intel_miptree_get_aligned_offset(
189 irb->mt,
190 irb->draw_x & ~tile_mask_x,
191 irb->draw_y & ~tile_mask_y);
192
193 return false;
194 }
195
196 void
197 brw_workaround_depthstencil_alignment(struct brw_context *brw,
198 GLbitfield clear_mask)
199 {
200 const struct gen_device_info *devinfo = &brw->screen->devinfo;
201 struct gl_context *ctx = &brw->ctx;
202 struct gl_framebuffer *fb = ctx->DrawBuffer;
203 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
204 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
205 struct intel_mipmap_tree *depth_mt = NULL;
206 bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
207 bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
208
209 if (depth_irb)
210 depth_mt = depth_irb->mt;
211
212 /* Initialize brw->depthstencil to 'nop' workaround state.
213 */
214 brw->depthstencil.tile_x = 0;
215 brw->depthstencil.tile_y = 0;
216 brw->depthstencil.depth_offset = 0;
217
218 /* Gen6+ doesn't require the workarounds, since we always program the
219 * surface state at the start of the whole surface.
220 */
221 if (devinfo->gen >= 6)
222 return;
223
224 /* Check if depth buffer is in depth/stencil format. If so, then it's only
225 * safe to invalidate it if we're also clearing stencil.
226 */
227 if (depth_irb && invalidate_depth &&
228 _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
229 invalidate_depth = invalidate_stencil && stencil_irb;
230
231 if (depth_irb) {
232 if (rebase_depth_stencil(brw, depth_irb, invalidate_depth)) {
233 /* In the case of stencil_irb being the same packed depth/stencil
234 * texture but not the same rb, make it point at our rebased mt, too.
235 */
236 if (stencil_irb &&
237 stencil_irb != depth_irb &&
238 stencil_irb->mt == depth_mt) {
239 intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
240 intel_renderbuffer_set_draw_offset(stencil_irb);
241 }
242 }
243
244 if (stencil_irb) {
245 assert(stencil_irb->mt == depth_irb->mt);
246 assert(stencil_irb->mt_level == depth_irb->mt_level);
247 assert(stencil_irb->mt_layer == depth_irb->mt_layer);
248 }
249 }
250
251 /* If there is no depth attachment, consider if stencil needs rebase. */
252 if (!depth_irb && stencil_irb)
253 rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
254 }
255
256 void
257 brw_emit_depthbuffer(struct brw_context *brw)
258 {
259 const struct gen_device_info *devinfo = &brw->screen->devinfo;
260 struct gl_context *ctx = &brw->ctx;
261 struct gl_framebuffer *fb = ctx->DrawBuffer;
262 /* _NEW_BUFFERS */
263 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
264 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
265 struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
266 struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
267 uint32_t tile_x = brw->depthstencil.tile_x;
268 uint32_t tile_y = brw->depthstencil.tile_y;
269 bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
270 bool separate_stencil = false;
271 uint32_t depth_surface_type = BRW_SURFACE_NULL;
272 uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
273 uint32_t depth_offset = 0;
274 uint32_t width = 1, height = 1;
275
276 if (stencil_mt) {
277 separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
278
279 /* Gen7 supports only separate stencil */
280 assert(separate_stencil || devinfo->gen < 7);
281 }
282
283 /* If there's a packed depth/stencil bound to stencil only, we need to
284 * emit the packed depth/stencil buffer packet.
285 */
286 if (!depth_irb && stencil_irb && !separate_stencil) {
287 depth_irb = stencil_irb;
288 depth_mt = stencil_mt;
289 }
290
291 if (depth_irb && depth_mt) {
292 /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
293 * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
294 * depthstencil format.
295 *
296 * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
297 * set to the same value. Gens after 7 implicitly always set
298 * Separate_Stencil_Enable; software cannot disable it.
299 */
300 if ((devinfo->gen < 7 && hiz) || devinfo->gen >= 7) {
301 assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
302 }
303
304 /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
305 assert(devinfo->gen >= 7 || !separate_stencil || hiz);
306
307 assert(devinfo->gen < 6 || depth_mt->surf.tiling == ISL_TILING_Y0);
308 assert(!hiz || depth_mt->surf.tiling == ISL_TILING_Y0);
309
310 depthbuffer_format = brw_depthbuffer_format(brw);
311 depth_surface_type = BRW_SURFACE_2D;
312 depth_offset = brw->depthstencil.depth_offset;
313 width = depth_irb->Base.Base.Width;
314 height = depth_irb->Base.Base.Height;
315 } else if (separate_stencil) {
316 /*
317 * There exists a separate stencil buffer but no depth buffer.
318 *
319 * The stencil buffer inherits most of its fields from
320 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
321 * height.
322 *
323 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
324 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
325 * [DevGT+]: This field must be set to TRUE.
326 */
327 assert(brw->has_separate_stencil);
328
329 depth_surface_type = BRW_SURFACE_2D;
330 width = stencil_irb->Base.Base.Width;
331 height = stencil_irb->Base.Base.Height;
332 }
333
334 if (depth_mt)
335 brw_render_cache_set_check_flush(brw, depth_mt->bo);
336 if (stencil_mt)
337 brw_render_cache_set_check_flush(brw, stencil_mt->bo);
338
339 brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
340 depthbuffer_format, depth_surface_type,
341 stencil_mt, hiz, separate_stencil,
342 width, height, tile_x, tile_y);
343 }
344
345 uint32_t
346 brw_convert_depth_value(mesa_format format, float value)
347 {
348 switch (format) {
349 case MESA_FORMAT_Z_FLOAT32:
350 return float_as_int(value);
351 case MESA_FORMAT_Z_UNORM16:
352 return value * ((1u << 16) - 1);
353 case MESA_FORMAT_Z24_UNORM_X8_UINT:
354 return value * ((1u << 24) - 1);
355 default:
356 unreachable("Invalid depth format");
357 }
358 }
359
360 void
361 brw_emit_depth_stencil_hiz(struct brw_context *brw,
362 struct intel_mipmap_tree *depth_mt,
363 uint32_t depth_offset, uint32_t depthbuffer_format,
364 uint32_t depth_surface_type,
365 struct intel_mipmap_tree *stencil_mt,
366 bool hiz, bool separate_stencil,
367 uint32_t width, uint32_t height,
368 uint32_t tile_x, uint32_t tile_y)
369 {
370 (void)hiz;
371 (void)separate_stencil;
372 (void)stencil_mt;
373
374 assert(!hiz);
375 assert(!separate_stencil);
376
377 const struct gen_device_info *devinfo = &brw->screen->devinfo;
378 const unsigned len = (brw->is_g4x || devinfo->gen == 5) ? 6 : 5;
379
380 BEGIN_BATCH(len);
381 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
382 OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch - 1 : 0) |
383 (depthbuffer_format << 18) |
384 (BRW_TILEWALK_YMAJOR << 26) |
385 (1 << 27) |
386 (depth_surface_type << 29));
387
388 if (depth_mt) {
389 OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
390 } else {
391 OUT_BATCH(0);
392 }
393
394 OUT_BATCH(((width + tile_x - 1) << 6) |
395 ((height + tile_y - 1) << 19));
396 OUT_BATCH(0);
397
398 if (brw->is_g4x || devinfo->gen >= 5)
399 OUT_BATCH(tile_x | (tile_y << 16));
400 else
401 assert(tile_x == 0 && tile_y == 0);
402
403 if (devinfo->gen >= 6)
404 OUT_BATCH(0);
405
406 ADVANCE_BATCH();
407 }
408
409 const struct brw_tracked_state brw_depthbuffer = {
410 .dirty = {
411 .mesa = _NEW_BUFFERS,
412 .brw = BRW_NEW_BATCH |
413 BRW_NEW_BLORP,
414 },
415 .emit = brw_emit_depthbuffer,
416 };
417
418 void
419 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
420 {
421 const struct gen_device_info *devinfo = &brw->screen->devinfo;
422 const bool is_965 = devinfo->gen == 4 && !brw->is_g4x;
423 const uint32_t _3DSTATE_PIPELINE_SELECT =
424 is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
425
426 if (devinfo->gen >= 8 && devinfo->gen < 10) {
427 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
428 *
429 * Software must clear the COLOR_CALC_STATE Valid field in
430 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
431 * with Pipeline Select set to GPGPU.
432 *
433 * The internal hardware docs recommend the same workaround for Gen9
434 * hardware too.
435 */
436 if (pipeline == BRW_COMPUTE_PIPELINE) {
437 BEGIN_BATCH(2);
438 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
439 OUT_BATCH(0);
440 ADVANCE_BATCH();
441
442 brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
443 }
444 }
445
446 if (devinfo->gen >= 6) {
447 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
448 * PIPELINE_SELECT [DevBWR+]":
449 *
450 * Project: DEVSNB+
451 *
452 * Software must ensure all the write caches are flushed through a
453 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
454 * command to invalidate read only caches prior to programming
455 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
456 */
457 const unsigned dc_flush =
458 devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
459
460 brw_emit_pipe_control_flush(brw,
461 PIPE_CONTROL_RENDER_TARGET_FLUSH |
462 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
463 dc_flush |
464 PIPE_CONTROL_NO_WRITE |
465 PIPE_CONTROL_CS_STALL);
466
467 brw_emit_pipe_control_flush(brw,
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
471 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
472 PIPE_CONTROL_NO_WRITE);
473
474 } else {
475 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
476 * PIPELINE_SELECT [DevBWR+]":
477 *
478 * Project: PRE-DEVSNB
479 *
480 * Software must ensure the current pipeline is flushed via an
481 * MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
482 */
483 BEGIN_BATCH(1);
484 OUT_BATCH(MI_FLUSH);
485 ADVANCE_BATCH();
486 }
487
488 /* Select the pipeline */
489 BEGIN_BATCH(1);
490 OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
491 (devinfo->gen >= 9 ? (3 << 8) : 0) |
492 (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
493 ADVANCE_BATCH();
494
495 if (devinfo->gen == 7 && !brw->is_haswell &&
496 pipeline == BRW_RENDER_PIPELINE) {
497 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
498 * PIPELINE_SELECT [DevBWR+]":
499 *
500 * Project: DEVIVB, DEVHSW:GT3:A0
501 *
502 * Software must send a pipe_control with a CS stall and a post sync
503 * operation and then a dummy DRAW after every MI_SET_CONTEXT and
504 * after any PIPELINE_SELECT that is enabling 3D mode.
505 */
506 gen7_emit_cs_stall_flush(brw);
507
508 BEGIN_BATCH(7);
509 OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
510 OUT_BATCH(_3DPRIM_POINTLIST);
511 OUT_BATCH(0);
512 OUT_BATCH(0);
513 OUT_BATCH(0);
514 OUT_BATCH(0);
515 OUT_BATCH(0);
516 ADVANCE_BATCH();
517 }
518 }
519
520 /**
521 * Misc invariant state packets
522 */
523 void
524 brw_upload_invariant_state(struct brw_context *brw)
525 {
526 const struct gen_device_info *devinfo = &brw->screen->devinfo;
527 const bool is_965 = devinfo->gen == 4 && !brw->is_g4x;
528
529 brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
530 brw->last_pipeline = BRW_RENDER_PIPELINE;
531
532 if (devinfo->gen >= 8) {
533 BEGIN_BATCH(3);
534 OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
535 OUT_BATCH(0);
536 OUT_BATCH(0);
537 ADVANCE_BATCH();
538 } else {
539 BEGIN_BATCH(2);
540 OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
541 OUT_BATCH(0);
542 ADVANCE_BATCH();
543 }
544
545 /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
546 if (!is_965) {
547 BEGIN_BATCH(3);
548 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
549 /* use legacy aa line coverage computation */
550 OUT_BATCH(0);
551 OUT_BATCH(0);
552 ADVANCE_BATCH();
553 }
554
555 const uint32_t _3DSTATE_VF_STATISTICS =
556 is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
557 BEGIN_BATCH(1);
558 OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
559 ADVANCE_BATCH();
560 }
561
562 const struct brw_tracked_state brw_invariant_state = {
563 .dirty = {
564 .mesa = 0,
565 .brw = BRW_NEW_BLORP |
566 BRW_NEW_CONTEXT,
567 },
568 .emit = brw_upload_invariant_state
569 };
570
571 /**
572 * Define the base addresses which some state is referenced from.
573 *
574 * This allows us to avoid having to emit relocations for the objects,
575 * and is actually required for binding table pointers on gen6.
576 *
577 * Surface state base address covers binding table pointers and
578 * surface state objects, but not the surfaces that the surface state
579 * objects point to.
580 */
581 void
582 brw_upload_state_base_address(struct brw_context *brw)
583 {
584 const struct gen_device_info *devinfo = &brw->screen->devinfo;
585
586 if (brw->batch.state_base_address_emitted)
587 return;
588
589 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
590 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
591 * programmed prior to STATE_BASE_ADDRESS.
592 *
593 * However, given that the instruction SBA (general state base
594 * address) on this chipset is always set to 0 across X and GL,
595 * maybe this isn't required for us in particular.
596 */
597
598 if (devinfo->gen >= 6) {
599 const unsigned dc_flush =
600 devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
601
602 /* Emit a render target cache flush.
603 *
604 * This isn't documented anywhere in the PRM. However, it seems to be
605 * necessary prior to changing the surface state base adress. We've
606 * seen issues in Vulkan where we get GPU hangs when using multi-level
607 * command buffers which clear depth, reset state base address, and then
608 * go render stuff.
609 *
610 * Normally, in GL, we would trust the kernel to do sufficient stalls
611 * and flushes prior to executing our batch. However, it doesn't seem
612 * as if the kernel's flushing is always sufficient and we don't want to
613 * rely on it.
614 *
615 * We make this an end-of-pipe sync instead of a normal flush because we
616 * do not know the current status of the GPU. On Haswell at least,
617 * having a fast-clear operation in flight at the same time as a normal
618 * rendering operation can cause hangs. Since the kernel's flushing is
619 * insufficient, we need to ensure that any rendering operations from
620 * other processes are definitely complete before we try to do our own
621 * rendering. It's a bit of a big hammer but it appears to work.
622 */
623 brw_emit_end_of_pipe_sync(brw,
624 PIPE_CONTROL_RENDER_TARGET_FLUSH |
625 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
626 dc_flush);
627 }
628
629 if (devinfo->gen >= 8) {
630 uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
631 int pkt_len = devinfo->gen >= 9 ? 19 : 16;
632
633 BEGIN_BATCH(pkt_len);
634 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
635 /* General state base address: stateless DP read/write requests */
636 OUT_BATCH(mocs_wb << 4 | 1);
637 OUT_BATCH(0);
638 OUT_BATCH(mocs_wb << 16);
639 /* Surface state base address: */
640 OUT_RELOC64(brw->batch.bo, 0, mocs_wb << 4 | 1);
641 /* Dynamic state base address: */
642 OUT_RELOC64(brw->batch.bo, 0, mocs_wb << 4 | 1);
643 /* Indirect object base address: MEDIA_OBJECT data */
644 OUT_BATCH(mocs_wb << 4 | 1);
645 OUT_BATCH(0);
646 /* Instruction base address: shader kernels (incl. SIP) */
647 OUT_RELOC64(brw->cache.bo, 0, mocs_wb << 4 | 1);
648
649 /* General state buffer size */
650 OUT_BATCH(0xfffff001);
651 /* Dynamic state buffer size */
652 OUT_BATCH(ALIGN(brw->batch.bo->size, 4096) | 1);
653 /* Indirect object upper bound */
654 OUT_BATCH(0xfffff001);
655 /* Instruction access upper bound */
656 OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
657 if (devinfo->gen >= 9) {
658 OUT_BATCH(1);
659 OUT_BATCH(0);
660 OUT_BATCH(0);
661 }
662 ADVANCE_BATCH();
663 } else if (devinfo->gen >= 6) {
664 uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
665
666 BEGIN_BATCH(10);
667 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
668 OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
669 mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
670 1); /* General State Base Address Modify Enable */
671 /* Surface state base address:
672 * BINDING_TABLE_STATE
673 * SURFACE_STATE
674 */
675 OUT_RELOC(brw->batch.bo, 0, 1);
676 /* Dynamic state base address:
677 * SAMPLER_STATE
678 * SAMPLER_BORDER_COLOR_STATE
679 * CLIP, SF, WM/CC viewport state
680 * COLOR_CALC_STATE
681 * DEPTH_STENCIL_STATE
682 * BLEND_STATE
683 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
684 * Disable is clear, which we rely on)
685 */
686 OUT_RELOC(brw->batch.bo, 0, 1);
687
688 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
689
690 /* Instruction base address: shader kernels (incl. SIP) */
691 OUT_RELOC(brw->cache.bo, 0, 1);
692
693 OUT_BATCH(1); /* General state upper bound */
694 /* Dynamic state upper bound. Although the documentation says that
695 * programming it to zero will cause it to be ignored, that is a lie.
696 * If this isn't programmed to a real bound, the sampler border color
697 * pointer is rejected, causing border color to mysteriously fail.
698 */
699 OUT_BATCH(0xfffff001);
700 OUT_BATCH(1); /* Indirect object upper bound */
701 OUT_BATCH(1); /* Instruction access upper bound */
702 ADVANCE_BATCH();
703 } else if (devinfo->gen == 5) {
704 BEGIN_BATCH(8);
705 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
706 OUT_BATCH(1); /* General state base address */
707 OUT_RELOC(brw->batch.bo, 0, 1); /* Surface state base address */
708 OUT_BATCH(1); /* Indirect object base address */
709 OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
710 OUT_BATCH(0xfffff001); /* General state upper bound */
711 OUT_BATCH(1); /* Indirect object upper bound */
712 OUT_BATCH(1); /* Instruction access upper bound */
713 ADVANCE_BATCH();
714 } else {
715 BEGIN_BATCH(6);
716 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
717 OUT_BATCH(1); /* General state base address */
718 OUT_RELOC(brw->batch.bo, 0, 1); /* Surface state base address */
719 OUT_BATCH(1); /* Indirect object base address */
720 OUT_BATCH(1); /* General state upper bound */
721 OUT_BATCH(1); /* Indirect object upper bound */
722 ADVANCE_BATCH();
723 }
724
725 if (devinfo->gen >= 6) {
726 brw_emit_pipe_control_flush(brw,
727 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
728 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
729 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
730 }
731
732 /* According to section 3.6.1 of VOL1 of the 965 PRM,
733 * STATE_BASE_ADDRESS updates require a reissue of:
734 *
735 * 3DSTATE_PIPELINE_POINTERS
736 * 3DSTATE_BINDING_TABLE_POINTERS
737 * MEDIA_STATE_POINTERS
738 *
739 * and this continues through Ironlake. The Sandy Bridge PRM, vol
740 * 1 part 1 says that the folowing packets must be reissued:
741 *
742 * 3DSTATE_CC_POINTERS
743 * 3DSTATE_BINDING_TABLE_POINTERS
744 * 3DSTATE_SAMPLER_STATE_POINTERS
745 * 3DSTATE_VIEWPORT_STATE_POINTERS
746 * MEDIA_STATE_POINTERS
747 *
748 * Those are always reissued following SBA updates anyway (new
749 * batch time), except in the case of the program cache BO
750 * changing. Having a separate state flag makes the sequence more
751 * obvious.
752 */
753
754 brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
755 brw->batch.state_base_address_emitted = true;
756 }