2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
41 #include "compiler/brw_eu_defines.h"
43 #include "main/framebuffer.h"
44 #include "main/fbobject.h"
45 #include "main/format_utils.h"
46 #include "main/glformats.h"
49 * Upload pointers to the per-stage state.
51 * The state pointers in this packet are all relative to the general state
52 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
55 upload_pipelined_state_pointers(struct brw_context
*brw
)
57 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
59 if (devinfo
->gen
== 5) {
60 /* Need to flush before changing clip max threads for errata. */
67 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS
<< 16 | (7 - 2));
68 OUT_RELOC(brw
->batch
.bo
, 0, brw
->vs
.base
.state_offset
);
69 if (brw
->ff_gs
.prog_active
)
70 OUT_RELOC(brw
->batch
.bo
, 0, brw
->ff_gs
.state_offset
| 1);
73 OUT_RELOC(brw
->batch
.bo
, 0, brw
->clip
.state_offset
| 1);
74 OUT_RELOC(brw
->batch
.bo
, 0, brw
->sf
.state_offset
);
75 OUT_RELOC(brw
->batch
.bo
, 0, brw
->wm
.base
.state_offset
);
76 OUT_RELOC(brw
->batch
.bo
, 0, brw
->cc
.state_offset
);
79 brw
->ctx
.NewDriverState
|= BRW_NEW_PSP
;
83 upload_psp_urb_cbs(struct brw_context
*brw
)
85 upload_pipelined_state_pointers(brw
);
86 brw_upload_urb_fence(brw
);
87 brw_upload_cs_urb_state(brw
);
90 const struct brw_tracked_state brw_psp_urb_cbs
= {
93 .brw
= BRW_NEW_BATCH
|
95 BRW_NEW_FF_GS_PROG_DATA
|
96 BRW_NEW_GEN4_UNIT_STATE
|
97 BRW_NEW_STATE_BASE_ADDRESS
|
100 .emit
= upload_psp_urb_cbs
,
104 brw_depthbuffer_format(struct brw_context
*brw
)
106 struct gl_context
*ctx
= &brw
->ctx
;
107 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
108 struct intel_renderbuffer
*drb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
109 struct intel_renderbuffer
*srb
;
112 (srb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
)) &&
113 !srb
->mt
->stencil_mt
&&
114 (intel_rb_format(srb
) == MESA_FORMAT_Z24_UNORM_S8_UINT
||
115 intel_rb_format(srb
) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT
)) {
120 return BRW_DEPTHFORMAT_D32_FLOAT
;
122 return brw_depth_format(brw
, drb
->mt
->format
);
125 static struct intel_mipmap_tree
*
126 get_stencil_miptree(struct intel_renderbuffer
*irb
)
130 if (irb
->mt
->stencil_mt
)
131 return irb
->mt
->stencil_mt
;
132 return intel_renderbuffer_get_mt(irb
);
136 rebase_depth_stencil(struct brw_context
*brw
, struct intel_renderbuffer
*irb
,
139 struct gl_context
*ctx
= &brw
->ctx
;
140 uint32_t tile_mask_x
= 0, tile_mask_y
= 0;
142 intel_get_tile_masks(irb
->mt
->surf
.tiling
, irb
->mt
->cpp
,
143 &tile_mask_x
, &tile_mask_y
);
144 assert(!intel_miptree_level_has_hiz(irb
->mt
, irb
->mt_level
));
146 uint32_t tile_x
= irb
->draw_x
& tile_mask_x
;
147 uint32_t tile_y
= irb
->draw_y
& tile_mask_y
;
149 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
150 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
151 * Coordinate Offset X/Y":
153 * "The 3 LSBs of both offsets must be zero to ensure correct
156 bool rebase
= tile_x
& 7 || tile_y
& 7;
158 /* We didn't even have intra-tile offsets before g45. */
159 rebase
|= (!brw
->has_surface_tile_offset
&& (tile_x
|| tile_y
));
162 perf_debug("HW workaround: blitting depth level %d to a temporary "
163 "to fix alignment (depth tile offset %d,%d)\n",
164 irb
->mt_level
, tile_x
, tile_y
);
165 intel_renderbuffer_move_to_temp(brw
, irb
, invalidate
);
167 /* There is now only single slice miptree. */
168 brw
->depthstencil
.tile_x
= 0;
169 brw
->depthstencil
.tile_y
= 0;
170 brw
->depthstencil
.depth_offset
= 0;
174 /* While we just tried to get everything aligned, we may have failed to do
175 * so in the case of rendering to array or 3D textures, where nonzero faces
176 * will still have an offset post-rebase. At least give an informative
179 WARN_ONCE((tile_x
& 7) || (tile_y
& 7),
180 "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
181 "Truncating offset (%u:%u), bad rendering may occur.\n",
186 brw
->depthstencil
.tile_x
= tile_x
;
187 brw
->depthstencil
.tile_y
= tile_y
;
188 brw
->depthstencil
.depth_offset
= intel_miptree_get_aligned_offset(
190 irb
->draw_x
& ~tile_mask_x
,
191 irb
->draw_y
& ~tile_mask_y
);
197 brw_workaround_depthstencil_alignment(struct brw_context
*brw
,
198 GLbitfield clear_mask
)
200 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
201 struct gl_context
*ctx
= &brw
->ctx
;
202 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
203 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
204 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
205 struct intel_mipmap_tree
*depth_mt
= NULL
;
206 bool invalidate_depth
= clear_mask
& BUFFER_BIT_DEPTH
;
207 bool invalidate_stencil
= clear_mask
& BUFFER_BIT_STENCIL
;
210 depth_mt
= depth_irb
->mt
;
212 /* Initialize brw->depthstencil to 'nop' workaround state.
214 brw
->depthstencil
.tile_x
= 0;
215 brw
->depthstencil
.tile_y
= 0;
216 brw
->depthstencil
.depth_offset
= 0;
218 /* Gen6+ doesn't require the workarounds, since we always program the
219 * surface state at the start of the whole surface.
221 if (devinfo
->gen
>= 6)
224 /* Check if depth buffer is in depth/stencil format. If so, then it's only
225 * safe to invalidate it if we're also clearing stencil.
227 if (depth_irb
&& invalidate_depth
&&
228 _mesa_get_format_base_format(depth_mt
->format
) == GL_DEPTH_STENCIL
)
229 invalidate_depth
= invalidate_stencil
&& stencil_irb
;
232 if (rebase_depth_stencil(brw
, depth_irb
, invalidate_depth
)) {
233 /* In the case of stencil_irb being the same packed depth/stencil
234 * texture but not the same rb, make it point at our rebased mt, too.
237 stencil_irb
!= depth_irb
&&
238 stencil_irb
->mt
== depth_mt
) {
239 intel_miptree_reference(&stencil_irb
->mt
, depth_irb
->mt
);
240 intel_renderbuffer_set_draw_offset(stencil_irb
);
245 assert(stencil_irb
->mt
== depth_irb
->mt
);
246 assert(stencil_irb
->mt_level
== depth_irb
->mt_level
);
247 assert(stencil_irb
->mt_layer
== depth_irb
->mt_layer
);
251 /* If there is no depth attachment, consider if stencil needs rebase. */
252 if (!depth_irb
&& stencil_irb
)
253 rebase_depth_stencil(brw
, stencil_irb
, invalidate_stencil
);
257 brw_emit_depthbuffer(struct brw_context
*brw
)
259 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
260 struct gl_context
*ctx
= &brw
->ctx
;
261 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
263 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
264 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
265 struct intel_mipmap_tree
*depth_mt
= intel_renderbuffer_get_mt(depth_irb
);
266 struct intel_mipmap_tree
*stencil_mt
= get_stencil_miptree(stencil_irb
);
267 uint32_t tile_x
= brw
->depthstencil
.tile_x
;
268 uint32_t tile_y
= brw
->depthstencil
.tile_y
;
269 bool hiz
= depth_irb
&& intel_renderbuffer_has_hiz(depth_irb
);
270 bool separate_stencil
= false;
271 uint32_t depth_surface_type
= BRW_SURFACE_NULL
;
272 uint32_t depthbuffer_format
= BRW_DEPTHFORMAT_D32_FLOAT
;
273 uint32_t depth_offset
= 0;
274 uint32_t width
= 1, height
= 1;
277 separate_stencil
= stencil_mt
->format
== MESA_FORMAT_S_UINT8
;
279 /* Gen7 supports only separate stencil */
280 assert(separate_stencil
|| devinfo
->gen
< 7);
283 /* If there's a packed depth/stencil bound to stencil only, we need to
284 * emit the packed depth/stencil buffer packet.
286 if (!depth_irb
&& stencil_irb
&& !separate_stencil
) {
287 depth_irb
= stencil_irb
;
288 depth_mt
= stencil_mt
;
291 if (depth_irb
&& depth_mt
) {
292 /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
293 * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
294 * depthstencil format.
296 * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
297 * set to the same value. Gens after 7 implicitly always set
298 * Separate_Stencil_Enable; software cannot disable it.
300 if ((devinfo
->gen
< 7 && hiz
) || devinfo
->gen
>= 7) {
301 assert(!_mesa_is_format_packed_depth_stencil(depth_mt
->format
));
304 /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
305 assert(devinfo
->gen
>= 7 || !separate_stencil
|| hiz
);
307 assert(devinfo
->gen
< 6 || depth_mt
->surf
.tiling
== ISL_TILING_Y0
);
308 assert(!hiz
|| depth_mt
->surf
.tiling
== ISL_TILING_Y0
);
310 depthbuffer_format
= brw_depthbuffer_format(brw
);
311 depth_surface_type
= BRW_SURFACE_2D
;
312 depth_offset
= brw
->depthstencil
.depth_offset
;
313 width
= depth_irb
->Base
.Base
.Width
;
314 height
= depth_irb
->Base
.Base
.Height
;
315 } else if (separate_stencil
) {
317 * There exists a separate stencil buffer but no depth buffer.
319 * The stencil buffer inherits most of its fields from
320 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
323 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
324 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
325 * [DevGT+]: This field must be set to TRUE.
327 assert(brw
->has_separate_stencil
);
329 depth_surface_type
= BRW_SURFACE_2D
;
330 width
= stencil_irb
->Base
.Base
.Width
;
331 height
= stencil_irb
->Base
.Base
.Height
;
335 brw_render_cache_set_check_flush(brw
, depth_mt
->bo
);
337 brw_render_cache_set_check_flush(brw
, stencil_mt
->bo
);
339 brw
->vtbl
.emit_depth_stencil_hiz(brw
, depth_mt
, depth_offset
,
340 depthbuffer_format
, depth_surface_type
,
341 stencil_mt
, hiz
, separate_stencil
,
342 width
, height
, tile_x
, tile_y
);
346 brw_convert_depth_value(mesa_format format
, float value
)
349 case MESA_FORMAT_Z_FLOAT32
:
350 return float_as_int(value
);
351 case MESA_FORMAT_Z_UNORM16
:
352 return value
* ((1u << 16) - 1);
353 case MESA_FORMAT_Z24_UNORM_X8_UINT
:
354 return value
* ((1u << 24) - 1);
356 unreachable("Invalid depth format");
361 brw_emit_depth_stencil_hiz(struct brw_context
*brw
,
362 struct intel_mipmap_tree
*depth_mt
,
363 uint32_t depth_offset
, uint32_t depthbuffer_format
,
364 uint32_t depth_surface_type
,
365 struct intel_mipmap_tree
*stencil_mt
,
366 bool hiz
, bool separate_stencil
,
367 uint32_t width
, uint32_t height
,
368 uint32_t tile_x
, uint32_t tile_y
)
371 (void)separate_stencil
;
375 assert(!separate_stencil
);
377 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
378 const unsigned len
= (brw
->is_g4x
|| devinfo
->gen
== 5) ? 6 : 5;
381 OUT_BATCH(_3DSTATE_DEPTH_BUFFER
<< 16 | (len
- 2));
382 OUT_BATCH((depth_mt
? depth_mt
->surf
.row_pitch
- 1 : 0) |
383 (depthbuffer_format
<< 18) |
384 (BRW_TILEWALK_YMAJOR
<< 26) |
386 (depth_surface_type
<< 29));
389 OUT_RELOC(depth_mt
->bo
, RELOC_WRITE
, depth_offset
);
394 OUT_BATCH(((width
+ tile_x
- 1) << 6) |
395 ((height
+ tile_y
- 1) << 19));
398 if (brw
->is_g4x
|| devinfo
->gen
>= 5)
399 OUT_BATCH(tile_x
| (tile_y
<< 16));
401 assert(tile_x
== 0 && tile_y
== 0);
403 if (devinfo
->gen
>= 6)
409 const struct brw_tracked_state brw_depthbuffer
= {
411 .mesa
= _NEW_BUFFERS
,
412 .brw
= BRW_NEW_BATCH
|
415 .emit
= brw_emit_depthbuffer
,
419 brw_emit_select_pipeline(struct brw_context
*brw
, enum brw_pipeline pipeline
)
421 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
422 const bool is_965
= devinfo
->gen
== 4 && !brw
->is_g4x
;
423 const uint32_t _3DSTATE_PIPELINE_SELECT
=
424 is_965
? CMD_PIPELINE_SELECT_965
: CMD_PIPELINE_SELECT_GM45
;
426 if (devinfo
->gen
>= 8 && devinfo
->gen
< 10) {
427 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
429 * Software must clear the COLOR_CALC_STATE Valid field in
430 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
431 * with Pipeline Select set to GPGPU.
433 * The internal hardware docs recommend the same workaround for Gen9
436 if (pipeline
== BRW_COMPUTE_PIPELINE
) {
438 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS
<< 16 | (2 - 2));
442 brw
->ctx
.NewDriverState
|= BRW_NEW_CC_STATE
;
446 if (devinfo
->gen
>= 6) {
447 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
448 * PIPELINE_SELECT [DevBWR+]":
452 * Software must ensure all the write caches are flushed through a
453 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
454 * command to invalidate read only caches prior to programming
455 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
457 const unsigned dc_flush
=
458 devinfo
->gen
>= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH
: 0;
460 brw_emit_pipe_control_flush(brw
,
461 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
462 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
464 PIPE_CONTROL_NO_WRITE
|
465 PIPE_CONTROL_CS_STALL
);
467 brw_emit_pipe_control_flush(brw
,
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE
|
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE
|
471 PIPE_CONTROL_INSTRUCTION_INVALIDATE
|
472 PIPE_CONTROL_NO_WRITE
);
475 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
476 * PIPELINE_SELECT [DevBWR+]":
478 * Project: PRE-DEVSNB
480 * Software must ensure the current pipeline is flushed via an
481 * MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
488 /* Select the pipeline */
490 OUT_BATCH(_3DSTATE_PIPELINE_SELECT
<< 16 |
491 (devinfo
->gen
>= 9 ? (3 << 8) : 0) |
492 (pipeline
== BRW_COMPUTE_PIPELINE
? 2 : 0));
495 if (devinfo
->gen
== 7 && !brw
->is_haswell
&&
496 pipeline
== BRW_RENDER_PIPELINE
) {
497 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
498 * PIPELINE_SELECT [DevBWR+]":
500 * Project: DEVIVB, DEVHSW:GT3:A0
502 * Software must send a pipe_control with a CS stall and a post sync
503 * operation and then a dummy DRAW after every MI_SET_CONTEXT and
504 * after any PIPELINE_SELECT that is enabling 3D mode.
506 gen7_emit_cs_stall_flush(brw
);
509 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2));
510 OUT_BATCH(_3DPRIM_POINTLIST
);
521 * Misc invariant state packets
524 brw_upload_invariant_state(struct brw_context
*brw
)
526 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
527 const bool is_965
= devinfo
->gen
== 4 && !brw
->is_g4x
;
529 brw_emit_select_pipeline(brw
, BRW_RENDER_PIPELINE
);
530 brw
->last_pipeline
= BRW_RENDER_PIPELINE
;
532 if (devinfo
->gen
>= 8) {
534 OUT_BATCH(CMD_STATE_SIP
<< 16 | (3 - 2));
540 OUT_BATCH(CMD_STATE_SIP
<< 16 | (2 - 2));
545 /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
548 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS
<< 16 | (3 - 2));
549 /* use legacy aa line coverage computation */
555 const uint32_t _3DSTATE_VF_STATISTICS
=
556 is_965
? GEN4_3DSTATE_VF_STATISTICS
: GM45_3DSTATE_VF_STATISTICS
;
558 OUT_BATCH(_3DSTATE_VF_STATISTICS
<< 16 | 1);
562 const struct brw_tracked_state brw_invariant_state
= {
565 .brw
= BRW_NEW_BLORP
|
568 .emit
= brw_upload_invariant_state
572 * Define the base addresses which some state is referenced from.
574 * This allows us to avoid having to emit relocations for the objects,
575 * and is actually required for binding table pointers on gen6.
577 * Surface state base address covers binding table pointers and
578 * surface state objects, but not the surfaces that the surface state
582 brw_upload_state_base_address(struct brw_context
*brw
)
584 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
586 if (brw
->batch
.state_base_address_emitted
)
589 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
590 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
591 * programmed prior to STATE_BASE_ADDRESS.
593 * However, given that the instruction SBA (general state base
594 * address) on this chipset is always set to 0 across X and GL,
595 * maybe this isn't required for us in particular.
598 if (devinfo
->gen
>= 6) {
599 const unsigned dc_flush
=
600 devinfo
->gen
>= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH
: 0;
602 /* Emit a render target cache flush.
604 * This isn't documented anywhere in the PRM. However, it seems to be
605 * necessary prior to changing the surface state base adress. We've
606 * seen issues in Vulkan where we get GPU hangs when using multi-level
607 * command buffers which clear depth, reset state base address, and then
610 * Normally, in GL, we would trust the kernel to do sufficient stalls
611 * and flushes prior to executing our batch. However, it doesn't seem
612 * as if the kernel's flushing is always sufficient and we don't want to
615 * We make this an end-of-pipe sync instead of a normal flush because we
616 * do not know the current status of the GPU. On Haswell at least,
617 * having a fast-clear operation in flight at the same time as a normal
618 * rendering operation can cause hangs. Since the kernel's flushing is
619 * insufficient, we need to ensure that any rendering operations from
620 * other processes are definitely complete before we try to do our own
621 * rendering. It's a bit of a big hammer but it appears to work.
623 brw_emit_end_of_pipe_sync(brw
,
624 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
625 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
629 if (devinfo
->gen
>= 8) {
630 uint32_t mocs_wb
= devinfo
->gen
>= 9 ? SKL_MOCS_WB
: BDW_MOCS_WB
;
631 int pkt_len
= devinfo
->gen
>= 9 ? 19 : 16;
633 BEGIN_BATCH(pkt_len
);
634 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (pkt_len
- 2));
635 /* General state base address: stateless DP read/write requests */
636 OUT_BATCH(mocs_wb
<< 4 | 1);
638 OUT_BATCH(mocs_wb
<< 16);
639 /* Surface state base address: */
640 OUT_RELOC64(brw
->batch
.bo
, 0, mocs_wb
<< 4 | 1);
641 /* Dynamic state base address: */
642 OUT_RELOC64(brw
->batch
.bo
, 0, mocs_wb
<< 4 | 1);
643 /* Indirect object base address: MEDIA_OBJECT data */
644 OUT_BATCH(mocs_wb
<< 4 | 1);
646 /* Instruction base address: shader kernels (incl. SIP) */
647 OUT_RELOC64(brw
->cache
.bo
, 0, mocs_wb
<< 4 | 1);
649 /* General state buffer size */
650 OUT_BATCH(0xfffff001);
651 /* Dynamic state buffer size */
652 OUT_BATCH(ALIGN(brw
->batch
.bo
->size
, 4096) | 1);
653 /* Indirect object upper bound */
654 OUT_BATCH(0xfffff001);
655 /* Instruction access upper bound */
656 OUT_BATCH(ALIGN(brw
->cache
.bo
->size
, 4096) | 1);
657 if (devinfo
->gen
>= 9) {
663 } else if (devinfo
->gen
>= 6) {
664 uint8_t mocs
= devinfo
->gen
== 7 ? GEN7_MOCS_L3
: 0;
667 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (10 - 2));
668 OUT_BATCH(mocs
<< 8 | /* General State Memory Object Control State */
669 mocs
<< 4 | /* Stateless Data Port Access Memory Object Control State */
670 1); /* General State Base Address Modify Enable */
671 /* Surface state base address:
672 * BINDING_TABLE_STATE
675 OUT_RELOC(brw
->batch
.bo
, 0, 1);
676 /* Dynamic state base address:
678 * SAMPLER_BORDER_COLOR_STATE
679 * CLIP, SF, WM/CC viewport state
681 * DEPTH_STENCIL_STATE
683 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
684 * Disable is clear, which we rely on)
686 OUT_RELOC(brw
->batch
.bo
, 0, 1);
688 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
690 /* Instruction base address: shader kernels (incl. SIP) */
691 OUT_RELOC(brw
->cache
.bo
, 0, 1);
693 OUT_BATCH(1); /* General state upper bound */
694 /* Dynamic state upper bound. Although the documentation says that
695 * programming it to zero will cause it to be ignored, that is a lie.
696 * If this isn't programmed to a real bound, the sampler border color
697 * pointer is rejected, causing border color to mysteriously fail.
699 OUT_BATCH(0xfffff001);
700 OUT_BATCH(1); /* Indirect object upper bound */
701 OUT_BATCH(1); /* Instruction access upper bound */
703 } else if (devinfo
->gen
== 5) {
705 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (8 - 2));
706 OUT_BATCH(1); /* General state base address */
707 OUT_RELOC(brw
->batch
.bo
, 0, 1); /* Surface state base address */
708 OUT_BATCH(1); /* Indirect object base address */
709 OUT_RELOC(brw
->cache
.bo
, 0, 1); /* Instruction base address */
710 OUT_BATCH(0xfffff001); /* General state upper bound */
711 OUT_BATCH(1); /* Indirect object upper bound */
712 OUT_BATCH(1); /* Instruction access upper bound */
716 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (6 - 2));
717 OUT_BATCH(1); /* General state base address */
718 OUT_RELOC(brw
->batch
.bo
, 0, 1); /* Surface state base address */
719 OUT_BATCH(1); /* Indirect object base address */
720 OUT_BATCH(1); /* General state upper bound */
721 OUT_BATCH(1); /* Indirect object upper bound */
725 if (devinfo
->gen
>= 6) {
726 brw_emit_pipe_control_flush(brw
,
727 PIPE_CONTROL_INSTRUCTION_INVALIDATE
|
728 PIPE_CONTROL_STATE_CACHE_INVALIDATE
|
729 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
);
732 /* According to section 3.6.1 of VOL1 of the 965 PRM,
733 * STATE_BASE_ADDRESS updates require a reissue of:
735 * 3DSTATE_PIPELINE_POINTERS
736 * 3DSTATE_BINDING_TABLE_POINTERS
737 * MEDIA_STATE_POINTERS
739 * and this continues through Ironlake. The Sandy Bridge PRM, vol
740 * 1 part 1 says that the folowing packets must be reissued:
742 * 3DSTATE_CC_POINTERS
743 * 3DSTATE_BINDING_TABLE_POINTERS
744 * 3DSTATE_SAMPLER_STATE_POINTERS
745 * 3DSTATE_VIEWPORT_STATE_POINTERS
746 * MEDIA_STATE_POINTERS
748 * Those are always reissued following SBA updates anyway (new
749 * batch time), except in the case of the program cache BO
750 * changing. Having a separate state flag makes the sequence more
754 brw
->ctx
.NewDriverState
|= BRW_NEW_STATE_BASE_ADDRESS
;
755 brw
->batch
.state_base_address_emitted
= true;