2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
30 * This is the main state upload code.
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/gen_aux_map.h"
103 #include "intel/common/gen_l3_config.h"
104 #include "intel/common/gen_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
111 #include "iris_genx_macros.h"
112 #include "intel/common/gen_guardband.h"
115 * Statically assert that PIPE_* enums match the hardware packets.
116 * (As long as they match, we don't need to translate them.)
118 UNUSED
static void pipe_asserts()
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
122 /* pipe_logicop happens to match the hardware. */
123 PIPE_ASSERT(PIPE_LOGICOP_CLEAR
== LOGICOP_CLEAR
);
124 PIPE_ASSERT(PIPE_LOGICOP_NOR
== LOGICOP_NOR
);
125 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED
== LOGICOP_AND_INVERTED
);
126 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED
== LOGICOP_COPY_INVERTED
);
127 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE
== LOGICOP_AND_REVERSE
);
128 PIPE_ASSERT(PIPE_LOGICOP_INVERT
== LOGICOP_INVERT
);
129 PIPE_ASSERT(PIPE_LOGICOP_XOR
== LOGICOP_XOR
);
130 PIPE_ASSERT(PIPE_LOGICOP_NAND
== LOGICOP_NAND
);
131 PIPE_ASSERT(PIPE_LOGICOP_AND
== LOGICOP_AND
);
132 PIPE_ASSERT(PIPE_LOGICOP_EQUIV
== LOGICOP_EQUIV
);
133 PIPE_ASSERT(PIPE_LOGICOP_NOOP
== LOGICOP_NOOP
);
134 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED
== LOGICOP_OR_INVERTED
);
135 PIPE_ASSERT(PIPE_LOGICOP_COPY
== LOGICOP_COPY
);
136 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE
== LOGICOP_OR_REVERSE
);
137 PIPE_ASSERT(PIPE_LOGICOP_OR
== LOGICOP_OR
);
138 PIPE_ASSERT(PIPE_LOGICOP_SET
== LOGICOP_SET
);
140 /* pipe_blend_func happens to match the hardware. */
141 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE
== BLENDFACTOR_ONE
);
142 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR
== BLENDFACTOR_SRC_COLOR
);
143 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA
== BLENDFACTOR_SRC_ALPHA
);
144 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA
== BLENDFACTOR_DST_ALPHA
);
145 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR
== BLENDFACTOR_DST_COLOR
);
146 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
== BLENDFACTOR_SRC_ALPHA_SATURATE
);
147 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR
== BLENDFACTOR_CONST_COLOR
);
148 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA
== BLENDFACTOR_CONST_ALPHA
);
149 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR
== BLENDFACTOR_SRC1_COLOR
);
150 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA
== BLENDFACTOR_SRC1_ALPHA
);
151 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO
== BLENDFACTOR_ZERO
);
152 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR
== BLENDFACTOR_INV_SRC_COLOR
);
153 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA
== BLENDFACTOR_INV_SRC_ALPHA
);
154 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA
== BLENDFACTOR_INV_DST_ALPHA
);
155 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR
== BLENDFACTOR_INV_DST_COLOR
);
156 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR
== BLENDFACTOR_INV_CONST_COLOR
);
157 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA
== BLENDFACTOR_INV_CONST_ALPHA
);
158 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR
== BLENDFACTOR_INV_SRC1_COLOR
);
159 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA
== BLENDFACTOR_INV_SRC1_ALPHA
);
161 /* pipe_blend_func happens to match the hardware. */
162 PIPE_ASSERT(PIPE_BLEND_ADD
== BLENDFUNCTION_ADD
);
163 PIPE_ASSERT(PIPE_BLEND_SUBTRACT
== BLENDFUNCTION_SUBTRACT
);
164 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT
== BLENDFUNCTION_REVERSE_SUBTRACT
);
165 PIPE_ASSERT(PIPE_BLEND_MIN
== BLENDFUNCTION_MIN
);
166 PIPE_ASSERT(PIPE_BLEND_MAX
== BLENDFUNCTION_MAX
);
168 /* pipe_stencil_op happens to match the hardware. */
169 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP
== STENCILOP_KEEP
);
170 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO
== STENCILOP_ZERO
);
171 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE
== STENCILOP_REPLACE
);
172 PIPE_ASSERT(PIPE_STENCIL_OP_INCR
== STENCILOP_INCRSAT
);
173 PIPE_ASSERT(PIPE_STENCIL_OP_DECR
== STENCILOP_DECRSAT
);
174 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP
== STENCILOP_INCR
);
175 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP
== STENCILOP_DECR
);
176 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT
== STENCILOP_INVERT
);
178 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT
== UPPERLEFT
);
180 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT
== LOWERLEFT
);
185 translate_prim_type(enum pipe_prim_type prim
, uint8_t verts_per_patch
)
187 static const unsigned map
[] = {
188 [PIPE_PRIM_POINTS
] = _3DPRIM_POINTLIST
,
189 [PIPE_PRIM_LINES
] = _3DPRIM_LINELIST
,
190 [PIPE_PRIM_LINE_LOOP
] = _3DPRIM_LINELOOP
,
191 [PIPE_PRIM_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
192 [PIPE_PRIM_TRIANGLES
] = _3DPRIM_TRILIST
,
193 [PIPE_PRIM_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
194 [PIPE_PRIM_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
195 [PIPE_PRIM_QUADS
] = _3DPRIM_QUADLIST
,
196 [PIPE_PRIM_QUAD_STRIP
] = _3DPRIM_QUADSTRIP
,
197 [PIPE_PRIM_POLYGON
] = _3DPRIM_POLYGON
,
198 [PIPE_PRIM_LINES_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
199 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
200 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
201 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
202 [PIPE_PRIM_PATCHES
] = _3DPRIM_PATCHLIST_1
- 1,
205 return map
[prim
] + (prim
== PIPE_PRIM_PATCHES
? verts_per_patch
: 0);
209 translate_compare_func(enum pipe_compare_func pipe_func
)
211 static const unsigned map
[] = {
212 [PIPE_FUNC_NEVER
] = COMPAREFUNCTION_NEVER
,
213 [PIPE_FUNC_LESS
] = COMPAREFUNCTION_LESS
,
214 [PIPE_FUNC_EQUAL
] = COMPAREFUNCTION_EQUAL
,
215 [PIPE_FUNC_LEQUAL
] = COMPAREFUNCTION_LEQUAL
,
216 [PIPE_FUNC_GREATER
] = COMPAREFUNCTION_GREATER
,
217 [PIPE_FUNC_NOTEQUAL
] = COMPAREFUNCTION_NOTEQUAL
,
218 [PIPE_FUNC_GEQUAL
] = COMPAREFUNCTION_GEQUAL
,
219 [PIPE_FUNC_ALWAYS
] = COMPAREFUNCTION_ALWAYS
,
221 return map
[pipe_func
];
225 translate_shadow_func(enum pipe_compare_func pipe_func
)
227 /* Gallium specifies the result of shadow comparisons as:
229 * 1 if ref <op> texel,
234 * 0 if texel <op> ref,
237 * So we need to flip the operator and also negate.
239 static const unsigned map
[] = {
240 [PIPE_FUNC_NEVER
] = PREFILTEROPALWAYS
,
241 [PIPE_FUNC_LESS
] = PREFILTEROPLEQUAL
,
242 [PIPE_FUNC_EQUAL
] = PREFILTEROPNOTEQUAL
,
243 [PIPE_FUNC_LEQUAL
] = PREFILTEROPLESS
,
244 [PIPE_FUNC_GREATER
] = PREFILTEROPGEQUAL
,
245 [PIPE_FUNC_NOTEQUAL
] = PREFILTEROPEQUAL
,
246 [PIPE_FUNC_GEQUAL
] = PREFILTEROPGREATER
,
247 [PIPE_FUNC_ALWAYS
] = PREFILTEROPNEVER
,
249 return map
[pipe_func
];
253 translate_cull_mode(unsigned pipe_face
)
255 static const unsigned map
[4] = {
256 [PIPE_FACE_NONE
] = CULLMODE_NONE
,
257 [PIPE_FACE_FRONT
] = CULLMODE_FRONT
,
258 [PIPE_FACE_BACK
] = CULLMODE_BACK
,
259 [PIPE_FACE_FRONT_AND_BACK
] = CULLMODE_BOTH
,
261 return map
[pipe_face
];
265 translate_fill_mode(unsigned pipe_polymode
)
267 static const unsigned map
[4] = {
268 [PIPE_POLYGON_MODE_FILL
] = FILL_MODE_SOLID
,
269 [PIPE_POLYGON_MODE_LINE
] = FILL_MODE_WIREFRAME
,
270 [PIPE_POLYGON_MODE_POINT
] = FILL_MODE_POINT
,
271 [PIPE_POLYGON_MODE_FILL_RECTANGLE
] = FILL_MODE_SOLID
,
273 return map
[pipe_polymode
];
277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip
)
279 static const unsigned map
[] = {
280 [PIPE_TEX_MIPFILTER_NEAREST
] = MIPFILTER_NEAREST
,
281 [PIPE_TEX_MIPFILTER_LINEAR
] = MIPFILTER_LINEAR
,
282 [PIPE_TEX_MIPFILTER_NONE
] = MIPFILTER_NONE
,
284 return map
[pipe_mip
];
288 translate_wrap(unsigned pipe_wrap
)
290 static const unsigned map
[] = {
291 [PIPE_TEX_WRAP_REPEAT
] = TCM_WRAP
,
292 [PIPE_TEX_WRAP_CLAMP
] = TCM_HALF_BORDER
,
293 [PIPE_TEX_WRAP_CLAMP_TO_EDGE
] = TCM_CLAMP
,
294 [PIPE_TEX_WRAP_CLAMP_TO_BORDER
] = TCM_CLAMP_BORDER
,
295 [PIPE_TEX_WRAP_MIRROR_REPEAT
] = TCM_MIRROR
,
296 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
] = TCM_MIRROR_ONCE
,
298 /* These are unsupported. */
299 [PIPE_TEX_WRAP_MIRROR_CLAMP
] = -1,
300 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
] = -1,
302 return map
[pipe_wrap
];
306 * Allocate space for some indirect state.
308 * Return a pointer to the map (to fill it out) and a state ref (for
309 * referring to the state in GPU commands).
312 upload_state(struct u_upload_mgr
*uploader
,
313 struct iris_state_ref
*ref
,
318 u_upload_alloc(uploader
, 0, size
, alignment
, &ref
->offset
, &ref
->res
, &p
);
323 * Stream out temporary/short-lived state.
325 * This allocates space, pins the BO, and includes the BO address in the
326 * returned offset (which works because all state lives in 32-bit memory
330 stream_state(struct iris_batch
*batch
,
331 struct u_upload_mgr
*uploader
,
332 struct pipe_resource
**out_res
,
335 uint32_t *out_offset
)
339 u_upload_alloc(uploader
, 0, size
, alignment
, out_offset
, out_res
, &ptr
);
341 struct iris_bo
*bo
= iris_resource_bo(*out_res
);
342 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
344 iris_record_state_size(batch
->state_sizes
,
345 bo
->gtt_offset
+ *out_offset
, size
);
347 *out_offset
+= iris_bo_offset_from_base_address(bo
);
353 * stream_state() + memcpy.
356 emit_state(struct iris_batch
*batch
,
357 struct u_upload_mgr
*uploader
,
358 struct pipe_resource
**out_res
,
365 stream_state(batch
, uploader
, out_res
, size
, alignment
, &offset
);
368 memcpy(map
, data
, size
);
374 * Did field 'x' change between 'old_cso' and 'new_cso'?
376 * (If so, we may want to set some dirty flags.)
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
383 flush_before_state_base_change(struct iris_batch
*batch
)
385 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
387 /* Flush before emitting STATE_BASE_ADDRESS.
389 * This isn't documented anywhere in the PRM. However, it seems to be
390 * necessary prior to changing the surface state base adress. We've
391 * seen issues in Vulkan where we get GPU hangs when using multi-level
392 * command buffers which clear depth, reset state base address, and then
395 * Normally, in GL, we would trust the kernel to do sufficient stalls
396 * and flushes prior to executing our batch. However, it doesn't seem
397 * as if the kernel's flushing is always sufficient and we don't want to
400 * We make this an end-of-pipe sync instead of a normal flush because we
401 * do not know the current status of the GPU. On Haswell at least,
402 * having a fast-clear operation in flight at the same time as a normal
403 * rendering operation can cause hangs. Since the kernel's flushing is
404 * insufficient, we need to ensure that any rendering operations from
405 * other processes are definitely complete before we try to do our own
406 * rendering. It's a bit of a big hammer but it appears to work.
408 iris_emit_end_of_pipe_sync(batch
,
409 "change STATE_BASE_ADDRESS (flushes)",
410 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
411 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
412 PIPE_CONTROL_DATA_CACHE_FLUSH
|
413 /* GEN:BUG:1606662791:
415 * Software must program PIPE_CONTROL command
416 * with "HDC Pipeline Flush" prior to
417 * programming of the below two non-pipeline
419 * * STATE_BASE_ADDRESS
420 * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
422 ((GEN_GEN
== 12 && devinfo
->revision
== 0 /* A0 */ ?
423 PIPE_CONTROL_FLUSH_HDC
: 0)));
427 flush_after_state_base_change(struct iris_batch
*batch
)
429 /* After re-setting the surface state base address, we have to do some
430 * cache flusing so that the sampler engine will pick up the new
431 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432 * Shared Function > 3D Sampler > State > State Caching (page 96):
434 * Coherency with system memory in the state cache, like the texture
435 * cache is handled partially by software. It is expected that the
436 * command stream or shader will issue Cache Flush operation or
437 * Cache_Flush sampler message to ensure that the L1 cache remains
438 * coherent with system memory.
442 * Whenever the value of the Dynamic_State_Base_Addr,
443 * Surface_State_Base_Addr are altered, the L1 state cache must be
444 * invalidated to ensure the new surface or sampler state is fetched
445 * from system memory.
447 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448 * which, according the PIPE_CONTROL instruction documentation in the
451 * Setting this bit is independent of any other bit in this packet.
452 * This bit controls the invalidation of the L1 and L2 state caches
453 * at the top of the pipe i.e. at the parsing time.
455 * Unfortunately, experimentation seems to indicate that state cache
456 * invalidation through a PIPE_CONTROL does nothing whatsoever in
457 * regards to surface state and binding tables. In stead, it seems that
458 * invalidating the texture cache is what is actually needed.
460 * XXX: As far as we have been able to determine through
461 * experimentation, shows that flush the texture cache appears to be
462 * sufficient. The theory here is that all of the sampling/rendering
463 * units cache the binding table in the texture cache. However, we have
464 * yet to be able to actually confirm this.
466 iris_emit_end_of_pipe_sync(batch
,
467 "change STATE_BASE_ADDRESS (invalidates)",
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE
|
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
474 _iris_emit_lri(struct iris_batch
*batch
, uint32_t reg
, uint32_t val
)
476 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
477 lri
.RegisterOffset
= reg
;
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
484 _iris_emit_lrr(struct iris_batch
*batch
, uint32_t dst
, uint32_t src
)
486 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_REG
), lrr
) {
487 lrr
.SourceRegisterAddress
= src
;
488 lrr
.DestinationRegisterAddress
= dst
;
493 iris_load_register_reg32(struct iris_batch
*batch
, uint32_t dst
,
496 _iris_emit_lrr(batch
, dst
, src
);
500 iris_load_register_reg64(struct iris_batch
*batch
, uint32_t dst
,
503 _iris_emit_lrr(batch
, dst
, src
);
504 _iris_emit_lrr(batch
, dst
+ 4, src
+ 4);
508 iris_load_register_imm32(struct iris_batch
*batch
, uint32_t reg
,
511 _iris_emit_lri(batch
, reg
, val
);
515 iris_load_register_imm64(struct iris_batch
*batch
, uint32_t reg
,
518 _iris_emit_lri(batch
, reg
+ 0, val
& 0xffffffff);
519 _iris_emit_lri(batch
, reg
+ 4, val
>> 32);
523 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
526 iris_load_register_mem32(struct iris_batch
*batch
, uint32_t reg
,
527 struct iris_bo
*bo
, uint32_t offset
)
529 iris_batch_sync_region_start(batch
);
530 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
531 lrm
.RegisterAddress
= reg
;
532 lrm
.MemoryAddress
= ro_bo(bo
, offset
);
534 iris_batch_sync_region_end(batch
);
538 * Load a 64-bit value from a buffer into a MMIO register via
539 * two MI_LOAD_REGISTER_MEM commands.
542 iris_load_register_mem64(struct iris_batch
*batch
, uint32_t reg
,
543 struct iris_bo
*bo
, uint32_t offset
)
545 iris_load_register_mem32(batch
, reg
+ 0, bo
, offset
+ 0);
546 iris_load_register_mem32(batch
, reg
+ 4, bo
, offset
+ 4);
550 iris_store_register_mem32(struct iris_batch
*batch
, uint32_t reg
,
551 struct iris_bo
*bo
, uint32_t offset
,
554 iris_batch_sync_region_start(batch
);
555 iris_emit_cmd(batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
556 srm
.RegisterAddress
= reg
;
557 srm
.MemoryAddress
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
558 srm
.PredicateEnable
= predicated
;
560 iris_batch_sync_region_end(batch
);
564 iris_store_register_mem64(struct iris_batch
*batch
, uint32_t reg
,
565 struct iris_bo
*bo
, uint32_t offset
,
568 iris_store_register_mem32(batch
, reg
+ 0, bo
, offset
+ 0, predicated
);
569 iris_store_register_mem32(batch
, reg
+ 4, bo
, offset
+ 4, predicated
);
573 iris_store_data_imm32(struct iris_batch
*batch
,
574 struct iris_bo
*bo
, uint32_t offset
,
577 iris_batch_sync_region_start(batch
);
578 iris_emit_cmd(batch
, GENX(MI_STORE_DATA_IMM
), sdi
) {
579 sdi
.Address
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
580 sdi
.ImmediateData
= imm
;
582 iris_batch_sync_region_end(batch
);
586 iris_store_data_imm64(struct iris_batch
*batch
,
587 struct iris_bo
*bo
, uint32_t offset
,
590 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
591 * 2 in genxml but it's actually variable length and we need 5 DWords.
593 void *map
= iris_get_command_space(batch
, 4 * 5);
594 iris_batch_sync_region_start(batch
);
595 _iris_pack_command(batch
, GENX(MI_STORE_DATA_IMM
), map
, sdi
) {
596 sdi
.DWordLength
= 5 - 2;
597 sdi
.Address
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
598 sdi
.ImmediateData
= imm
;
600 iris_batch_sync_region_end(batch
);
604 iris_copy_mem_mem(struct iris_batch
*batch
,
605 struct iris_bo
*dst_bo
, uint32_t dst_offset
,
606 struct iris_bo
*src_bo
, uint32_t src_offset
,
609 /* MI_COPY_MEM_MEM operates on DWords. */
610 assert(bytes
% 4 == 0);
611 assert(dst_offset
% 4 == 0);
612 assert(src_offset
% 4 == 0);
613 iris_batch_sync_region_start(batch
);
615 for (unsigned i
= 0; i
< bytes
; i
+= 4) {
616 iris_emit_cmd(batch
, GENX(MI_COPY_MEM_MEM
), cp
) {
617 cp
.DestinationMemoryAddress
= rw_bo(dst_bo
, dst_offset
+ i
,
618 IRIS_DOMAIN_OTHER_WRITE
);
619 cp
.SourceMemoryAddress
= ro_bo(src_bo
, src_offset
+ i
);
623 iris_batch_sync_region_end(batch
);
627 emit_pipeline_select(struct iris_batch
*batch
, uint32_t pipeline
)
629 #if GEN_GEN >= 8 && GEN_GEN < 10
630 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
632 * Software must clear the COLOR_CALC_STATE Valid field in
633 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
634 * with Pipeline Select set to GPGPU.
636 * The internal hardware docs recommend the same workaround for Gen9
639 if (pipeline
== GPGPU
)
640 iris_emit_cmd(batch
, GENX(3DSTATE_CC_STATE_POINTERS
), t
);
644 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
645 * PIPELINE_SELECT [DevBWR+]":
649 * Software must ensure all the write caches are flushed through a
650 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
651 * command to invalidate read only caches prior to programming
652 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
654 iris_emit_pipe_control_flush(batch
,
655 "workaround: PIPELINE_SELECT flushes (1/2)",
656 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
657 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
658 PIPE_CONTROL_DATA_CACHE_FLUSH
|
659 PIPE_CONTROL_CS_STALL
);
661 iris_emit_pipe_control_flush(batch
,
662 "workaround: PIPELINE_SELECT flushes (2/2)",
663 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
664 PIPE_CONTROL_CONST_CACHE_INVALIDATE
|
665 PIPE_CONTROL_STATE_CACHE_INVALIDATE
|
666 PIPE_CONTROL_INSTRUCTION_INVALIDATE
);
668 iris_emit_cmd(batch
, GENX(PIPELINE_SELECT
), sel
) {
672 sel
.PipelineSelection
= pipeline
;
677 init_glk_barrier_mode(struct iris_batch
*batch
, uint32_t value
)
682 * "This chicken bit works around a hardware issue with barrier
683 * logic encountered when switching between GPGPU and 3D pipelines.
684 * To workaround the issue, this mode bit should be set after a
685 * pipeline is selected."
688 iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1
), ®_val
, reg
) {
689 reg
.GLKBarrierMode
= value
;
690 reg
.GLKBarrierModeMask
= 1;
692 iris_emit_lri(batch
, SLICE_COMMON_ECO_CHICKEN1
, reg_val
);
697 init_state_base_address(struct iris_batch
*batch
)
699 uint32_t mocs
= batch
->screen
->isl_dev
.mocs
.internal
;
700 flush_before_state_base_change(batch
);
702 /* We program most base addresses once at context initialization time.
703 * Each base address points at a 4GB memory zone, and never needs to
704 * change. See iris_bufmgr.h for a description of the memory zones.
706 * The one exception is Surface State Base Address, which needs to be
707 * updated occasionally. See iris_binder.c for the details there.
709 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
710 sba
.GeneralStateMOCS
= mocs
;
711 sba
.StatelessDataPortAccessMOCS
= mocs
;
712 sba
.DynamicStateMOCS
= mocs
;
713 sba
.IndirectObjectMOCS
= mocs
;
714 sba
.InstructionMOCS
= mocs
;
715 sba
.SurfaceStateMOCS
= mocs
;
717 sba
.GeneralStateBaseAddressModifyEnable
= true;
718 sba
.DynamicStateBaseAddressModifyEnable
= true;
719 sba
.IndirectObjectBaseAddressModifyEnable
= true;
720 sba
.InstructionBaseAddressModifyEnable
= true;
721 sba
.GeneralStateBufferSizeModifyEnable
= true;
722 sba
.DynamicStateBufferSizeModifyEnable
= true;
724 sba
.BindlessSurfaceStateBaseAddressModifyEnable
= true;
725 sba
.BindlessSurfaceStateMOCS
= mocs
;
727 sba
.IndirectObjectBufferSizeModifyEnable
= true;
728 sba
.InstructionBuffersizeModifyEnable
= true;
730 sba
.InstructionBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_SHADER_START
);
731 sba
.DynamicStateBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_DYNAMIC_START
);
733 sba
.GeneralStateBufferSize
= 0xfffff;
734 sba
.IndirectObjectBufferSize
= 0xfffff;
735 sba
.InstructionBufferSize
= 0xfffff;
736 sba
.DynamicStateBufferSize
= 0xfffff;
739 flush_after_state_base_change(batch
);
743 iris_emit_l3_config(struct iris_batch
*batch
,
744 const struct gen_l3_config
*cfg
)
747 assert(cfg
|| GEN_GEN
>= 12);
750 #define L3_ALLOCATION_REG GENX(L3ALLOC)
751 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
753 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
754 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
757 iris_pack_state(L3_ALLOCATION_REG
, ®_val
, reg
) {
759 reg
.SLMEnable
= cfg
->n
[GEN_L3P_SLM
] > 0;
762 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
763 * in L3CNTLREG register. The default setting of the bit is not the
764 * desirable behavior.
766 reg
.ErrorDetectionBehaviorControl
= true;
767 reg
.UseFullWays
= true;
769 if (GEN_GEN
< 12 || cfg
) {
770 reg
.URBAllocation
= cfg
->n
[GEN_L3P_URB
];
771 reg
.ROAllocation
= cfg
->n
[GEN_L3P_RO
];
772 reg
.DCAllocation
= cfg
->n
[GEN_L3P_DC
];
773 reg
.AllAllocation
= cfg
->n
[GEN_L3P_ALL
];
776 reg
.L3FullWayAllocationEnable
= true;
780 _iris_emit_lri(batch
, L3_ALLOCATION_REG_num
, reg_val
);
785 iris_enable_obj_preemption(struct iris_batch
*batch
, bool enable
)
789 /* A fixed function pipe flush is required before modifying this field */
790 iris_emit_end_of_pipe_sync(batch
, enable
? "enable preemption"
791 : "disable preemption",
792 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
794 /* enable object level preemption */
795 iris_pack_state(GENX(CS_CHICKEN1
), ®_val
, reg
) {
796 reg
.ReplayMode
= enable
;
797 reg
.ReplayModeMask
= true;
799 iris_emit_lri(batch
, CS_CHICKEN1
, reg_val
);
805 iris_upload_slice_hashing_state(struct iris_batch
*batch
)
807 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
808 int subslices_delta
=
809 devinfo
->ppipe_subslices
[0] - devinfo
->ppipe_subslices
[1];
810 if (subslices_delta
== 0)
813 struct iris_context
*ice
= NULL
;
814 ice
= container_of(batch
, ice
, batches
[IRIS_BATCH_RENDER
]);
815 assert(&ice
->batches
[IRIS_BATCH_RENDER
] == batch
);
817 unsigned size
= GENX(SLICE_HASH_TABLE_length
) * 4;
818 uint32_t hash_address
;
819 struct pipe_resource
*tmp
= NULL
;
821 stream_state(batch
, ice
->state
.dynamic_uploader
, &tmp
,
822 size
, 64, &hash_address
);
823 pipe_resource_reference(&tmp
, NULL
);
825 struct GENX(SLICE_HASH_TABLE
) table0
= {
827 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
828 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
829 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
830 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
831 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
832 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
833 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
834 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
835 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
836 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
837 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
838 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
839 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
840 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
841 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
842 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
846 struct GENX(SLICE_HASH_TABLE
) table1
= {
848 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
849 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
850 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
851 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
852 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
853 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
854 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
855 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
856 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
857 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
858 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
859 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
860 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
861 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
862 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
863 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
867 const struct GENX(SLICE_HASH_TABLE
) *table
=
868 subslices_delta
< 0 ? &table0
: &table1
;
869 GENX(SLICE_HASH_TABLE_pack
)(NULL
, map
, table
);
871 iris_emit_cmd(batch
, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS
), ptr
) {
872 ptr
.SliceHashStatePointerValid
= true;
873 ptr
.SliceHashTableStatePointer
= hash_address
;
876 iris_emit_cmd(batch
, GENX(3DSTATE_3D_MODE
), mode
) {
877 mode
.SliceHashingTableEnable
= true;
883 iris_alloc_push_constants(struct iris_batch
*batch
)
885 /* For now, we set a static partitioning of the push constant area,
886 * assuming that all stages could be in use.
888 * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
889 * see if that improves performance by offering more space to
890 * the VS/FS when those aren't in use. Also, try dynamically
891 * enabling/disabling it like i965 does. This would be more
892 * stalls and may not actually help; we don't know yet.
894 for (int i
= 0; i
<= MESA_SHADER_FRAGMENT
; i
++) {
895 iris_emit_cmd(batch
, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS
), alloc
) {
896 alloc
._3DCommandSubOpcode
= 18 + i
;
897 alloc
.ConstantBufferOffset
= 6 * i
;
898 alloc
.ConstantBufferSize
= i
== MESA_SHADER_FRAGMENT
? 8 : 6;
905 init_aux_map_state(struct iris_batch
*batch
);
909 * Upload the initial GPU state for a render context.
911 * This sets some invariant state that needs to be programmed a particular
912 * way, but we never actually change.
915 iris_init_render_context(struct iris_batch
*batch
)
917 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
920 iris_batch_sync_region_start(batch
);
922 emit_pipeline_select(batch
, _3D
);
924 iris_emit_l3_config(batch
, batch
->screen
->l3_config_3d
);
926 init_state_base_address(batch
);
929 iris_pack_state(GENX(CS_DEBUG_MODE2
), ®_val
, reg
) {
930 reg
.CONSTANT_BUFFERAddressOffsetDisable
= true;
931 reg
.CONSTANT_BUFFERAddressOffsetDisableMask
= true;
933 iris_emit_lri(batch
, CS_DEBUG_MODE2
, reg_val
);
935 iris_pack_state(GENX(INSTPM
), ®_val
, reg
) {
936 reg
.CONSTANT_BUFFERAddressOffsetDisable
= true;
937 reg
.CONSTANT_BUFFERAddressOffsetDisableMask
= true;
939 iris_emit_lri(batch
, INSTPM
, reg_val
);
943 iris_pack_state(GENX(CACHE_MODE_1
), ®_val
, reg
) {
944 reg
.FloatBlendOptimizationEnable
= true;
945 reg
.FloatBlendOptimizationEnableMask
= true;
946 reg
.PartialResolveDisableInVC
= true;
947 reg
.PartialResolveDisableInVCMask
= true;
949 iris_emit_lri(batch
, CACHE_MODE_1
, reg_val
);
951 if (devinfo
->is_geminilake
)
952 init_glk_barrier_mode(batch
, GLK_BARRIER_MODE_3D_HULL
);
956 iris_pack_state(GENX(TCCNTLREG
), ®_val
, reg
) {
957 reg
.L3DataPartialWriteMergingEnable
= true;
958 reg
.ColorZPartialWriteMergingEnable
= true;
959 reg
.URBPartialWriteMergingEnable
= true;
960 reg
.TCDisable
= true;
962 iris_emit_lri(batch
, TCCNTLREG
, reg_val
);
964 iris_pack_state(GENX(SAMPLER_MODE
), ®_val
, reg
) {
965 reg
.HeaderlessMessageforPreemptableContexts
= 1;
966 reg
.HeaderlessMessageforPreemptableContextsMask
= 1;
968 iris_emit_lri(batch
, SAMPLER_MODE
, reg_val
);
970 /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
971 iris_pack_state(GENX(HALF_SLICE_CHICKEN7
), ®_val
, reg
) {
972 reg
.EnabledTexelOffsetPrecisionFix
= 1;
973 reg
.EnabledTexelOffsetPrecisionFixMask
= 1;
975 iris_emit_lri(batch
, HALF_SLICE_CHICKEN7
, reg_val
);
977 /* Hardware specification recommends disabling repacking for the
978 * compatibility with decompression mechanism in display controller.
980 if (devinfo
->disable_ccs_repack
) {
981 iris_pack_state(GENX(CACHE_MODE_0
), ®_val
, reg
) {
982 reg
.DisableRepackingforCompression
= true;
983 reg
.DisableRepackingforCompressionMask
= true;
985 iris_emit_lri(batch
, CACHE_MODE_0
, reg_val
);
988 iris_upload_slice_hashing_state(batch
);
991 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
992 * changing it dynamically. We set it to the maximum size here, and
993 * instead include the render target dimensions in the viewport, so
994 * viewport extents clipping takes care of pruning stray geometry.
996 iris_emit_cmd(batch
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
997 rect
.ClippedDrawingRectangleXMax
= UINT16_MAX
;
998 rect
.ClippedDrawingRectangleYMax
= UINT16_MAX
;
1001 /* Set the initial MSAA sample positions. */
1002 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLE_PATTERN
), pat
) {
1003 GEN_SAMPLE_POS_1X(pat
._1xSample
);
1004 GEN_SAMPLE_POS_2X(pat
._2xSample
);
1005 GEN_SAMPLE_POS_4X(pat
._4xSample
);
1006 GEN_SAMPLE_POS_8X(pat
._8xSample
);
1008 GEN_SAMPLE_POS_16X(pat
._16xSample
);
1012 /* Use the legacy AA line coverage computation. */
1013 iris_emit_cmd(batch
, GENX(3DSTATE_AA_LINE_PARAMETERS
), foo
);
1015 /* Disable chromakeying (it's for media) */
1016 iris_emit_cmd(batch
, GENX(3DSTATE_WM_CHROMAKEY
), foo
);
1018 /* We want regular rendering, not special HiZ operations. */
1019 iris_emit_cmd(batch
, GENX(3DSTATE_WM_HZ_OP
), foo
);
1021 /* No polygon stippling offsets are necessary. */
1022 /* TODO: may need to set an offset for origin-UL framebuffers */
1023 iris_emit_cmd(batch
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), foo
);
1025 iris_alloc_push_constants(batch
);
1029 init_aux_map_state(batch
);
1032 iris_batch_sync_region_end(batch
);
1036 iris_init_compute_context(struct iris_batch
*batch
)
1038 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
1040 iris_batch_sync_region_start(batch
);
1042 /* GEN:BUG:1607854226:
1044 * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1047 emit_pipeline_select(batch
, _3D
);
1049 emit_pipeline_select(batch
, GPGPU
);
1052 iris_emit_l3_config(batch
, batch
->screen
->l3_config_cs
);
1054 init_state_base_address(batch
);
1057 emit_pipeline_select(batch
, GPGPU
);
1061 if (devinfo
->is_geminilake
)
1062 init_glk_barrier_mode(batch
, GLK_BARRIER_MODE_GPGPU
);
1066 init_aux_map_state(batch
);
1069 iris_batch_sync_region_end(batch
);
1072 struct iris_vertex_buffer_state
{
1073 /** The VERTEX_BUFFER_STATE hardware structure. */
1074 uint32_t state
[GENX(VERTEX_BUFFER_STATE_length
)];
1076 /** The resource to source vertex data from. */
1077 struct pipe_resource
*resource
;
1082 struct iris_depth_buffer_state
{
1083 /* Depth/HiZ/Stencil related hardware packets. */
1084 uint32_t packets
[GENX(3DSTATE_DEPTH_BUFFER_length
) +
1085 GENX(3DSTATE_STENCIL_BUFFER_length
) +
1086 GENX(3DSTATE_HIER_DEPTH_BUFFER_length
) +
1087 GENX(3DSTATE_CLEAR_PARAMS_length
) +
1088 GENX(MI_LOAD_REGISTER_IMM_length
) * 2];
1092 * Generation-specific context state (ice->state.genx->...).
1094 * Most state can go in iris_context directly, but these encode hardware
1095 * packets which vary by generation.
1097 struct iris_genx_state
{
1098 struct iris_vertex_buffer_state vertex_buffers
[33];
1099 uint32_t last_index_buffer
[GENX(3DSTATE_INDEX_BUFFER_length
)];
1101 struct iris_depth_buffer_state depth_buffer
;
1103 uint32_t so_buffers
[4 * GENX(3DSTATE_SO_BUFFER_length
)];
1106 bool pma_fix_enabled
;
1110 /* Is object level preemption enabled? */
1111 bool object_preemption
;
1116 struct brw_image_param image_param
[PIPE_MAX_SHADER_IMAGES
];
1118 } shaders
[MESA_SHADER_STAGES
];
1122 * The pipe->set_blend_color() driver hook.
1124 * This corresponds to our COLOR_CALC_STATE.
1127 iris_set_blend_color(struct pipe_context
*ctx
,
1128 const struct pipe_blend_color
*state
)
1130 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1132 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1133 memcpy(&ice
->state
.blend_color
, state
, sizeof(struct pipe_blend_color
));
1134 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
1138 * Gallium CSO for blend state (see pipe_blend_state).
1140 struct iris_blend_state
{
1141 /** Partial 3DSTATE_PS_BLEND */
1142 uint32_t ps_blend
[GENX(3DSTATE_PS_BLEND_length
)];
1144 /** Partial BLEND_STATE */
1145 uint32_t blend_state
[GENX(BLEND_STATE_length
) +
1146 BRW_MAX_DRAW_BUFFERS
* GENX(BLEND_STATE_ENTRY_length
)];
1148 bool alpha_to_coverage
; /* for shader key */
1150 /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1151 uint8_t blend_enables
;
1153 /** Bitfield of whether color writes are enabled for RT[i] */
1154 uint8_t color_write_enables
;
1156 /** Does RT[0] use dual color blending? */
1157 bool dual_color_blending
;
1160 static enum pipe_blendfactor
1161 fix_blendfactor(enum pipe_blendfactor f
, bool alpha_to_one
)
1164 if (f
== PIPE_BLENDFACTOR_SRC1_ALPHA
)
1165 return PIPE_BLENDFACTOR_ONE
;
1167 if (f
== PIPE_BLENDFACTOR_INV_SRC1_ALPHA
)
1168 return PIPE_BLENDFACTOR_ZERO
;
1175 * The pipe->create_blend_state() driver hook.
1177 * Translates a pipe_blend_state into iris_blend_state.
1180 iris_create_blend_state(struct pipe_context
*ctx
,
1181 const struct pipe_blend_state
*state
)
1183 struct iris_blend_state
*cso
= malloc(sizeof(struct iris_blend_state
));
1184 uint32_t *blend_entry
= cso
->blend_state
+ GENX(BLEND_STATE_length
);
1186 cso
->blend_enables
= 0;
1187 cso
->color_write_enables
= 0;
1188 STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS
<= 8);
1190 cso
->alpha_to_coverage
= state
->alpha_to_coverage
;
1192 bool indep_alpha_blend
= false;
1194 for (int i
= 0; i
< BRW_MAX_DRAW_BUFFERS
; i
++) {
1195 const struct pipe_rt_blend_state
*rt
=
1196 &state
->rt
[state
->independent_blend_enable
? i
: 0];
1198 enum pipe_blendfactor src_rgb
=
1199 fix_blendfactor(rt
->rgb_src_factor
, state
->alpha_to_one
);
1200 enum pipe_blendfactor src_alpha
=
1201 fix_blendfactor(rt
->alpha_src_factor
, state
->alpha_to_one
);
1202 enum pipe_blendfactor dst_rgb
=
1203 fix_blendfactor(rt
->rgb_dst_factor
, state
->alpha_to_one
);
1204 enum pipe_blendfactor dst_alpha
=
1205 fix_blendfactor(rt
->alpha_dst_factor
, state
->alpha_to_one
);
1207 if (rt
->rgb_func
!= rt
->alpha_func
||
1208 src_rgb
!= src_alpha
|| dst_rgb
!= dst_alpha
)
1209 indep_alpha_blend
= true;
1211 if (rt
->blend_enable
)
1212 cso
->blend_enables
|= 1u << i
;
1215 cso
->color_write_enables
|= 1u << i
;
1217 iris_pack_state(GENX(BLEND_STATE_ENTRY
), blend_entry
, be
) {
1218 be
.LogicOpEnable
= state
->logicop_enable
;
1219 be
.LogicOpFunction
= state
->logicop_func
;
1221 be
.PreBlendSourceOnlyClampEnable
= false;
1222 be
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
1223 be
.PreBlendColorClampEnable
= true;
1224 be
.PostBlendColorClampEnable
= true;
1226 be
.ColorBufferBlendEnable
= rt
->blend_enable
;
1228 be
.ColorBlendFunction
= rt
->rgb_func
;
1229 be
.AlphaBlendFunction
= rt
->alpha_func
;
1230 be
.SourceBlendFactor
= src_rgb
;
1231 be
.SourceAlphaBlendFactor
= src_alpha
;
1232 be
.DestinationBlendFactor
= dst_rgb
;
1233 be
.DestinationAlphaBlendFactor
= dst_alpha
;
1235 be
.WriteDisableRed
= !(rt
->colormask
& PIPE_MASK_R
);
1236 be
.WriteDisableGreen
= !(rt
->colormask
& PIPE_MASK_G
);
1237 be
.WriteDisableBlue
= !(rt
->colormask
& PIPE_MASK_B
);
1238 be
.WriteDisableAlpha
= !(rt
->colormask
& PIPE_MASK_A
);
1240 blend_entry
+= GENX(BLEND_STATE_ENTRY_length
);
1243 iris_pack_command(GENX(3DSTATE_PS_BLEND
), cso
->ps_blend
, pb
) {
1244 /* pb.HasWriteableRT is filled in at draw time.
1245 * pb.AlphaTestEnable is filled in at draw time.
1247 * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1248 * setting it when dual color blending without an appropriate shader.
1251 pb
.AlphaToCoverageEnable
= state
->alpha_to_coverage
;
1252 pb
.IndependentAlphaBlendEnable
= indep_alpha_blend
;
1254 pb
.SourceBlendFactor
=
1255 fix_blendfactor(state
->rt
[0].rgb_src_factor
, state
->alpha_to_one
);
1256 pb
.SourceAlphaBlendFactor
=
1257 fix_blendfactor(state
->rt
[0].alpha_src_factor
, state
->alpha_to_one
);
1258 pb
.DestinationBlendFactor
=
1259 fix_blendfactor(state
->rt
[0].rgb_dst_factor
, state
->alpha_to_one
);
1260 pb
.DestinationAlphaBlendFactor
=
1261 fix_blendfactor(state
->rt
[0].alpha_dst_factor
, state
->alpha_to_one
);
1264 iris_pack_state(GENX(BLEND_STATE
), cso
->blend_state
, bs
) {
1265 bs
.AlphaToCoverageEnable
= state
->alpha_to_coverage
;
1266 bs
.IndependentAlphaBlendEnable
= indep_alpha_blend
;
1267 bs
.AlphaToOneEnable
= state
->alpha_to_one
;
1268 bs
.AlphaToCoverageDitherEnable
= state
->alpha_to_coverage
;
1269 bs
.ColorDitherEnable
= state
->dither
;
1270 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1273 cso
->dual_color_blending
= util_blend_state_is_dual(state
, 0);
1279 * The pipe->bind_blend_state() driver hook.
1281 * Bind a blending CSO and flag related dirty bits.
1284 iris_bind_blend_state(struct pipe_context
*ctx
, void *state
)
1286 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1287 struct iris_blend_state
*cso
= state
;
1289 ice
->state
.cso_blend
= cso
;
1291 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
;
1292 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
1293 ice
->state
.stage_dirty
|= ice
->state
.stage_dirty_for_nos
[IRIS_NOS_BLEND
];
1296 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
1300 * Return true if the FS writes to any color outputs which are not disabled
1301 * via color masking.
1304 has_writeable_rt(const struct iris_blend_state
*cso_blend
,
1305 const struct shader_info
*fs_info
)
1310 unsigned rt_outputs
= fs_info
->outputs_written
>> FRAG_RESULT_DATA0
;
1312 if (fs_info
->outputs_written
& BITFIELD64_BIT(FRAG_RESULT_COLOR
))
1313 rt_outputs
= (1 << BRW_MAX_DRAW_BUFFERS
) - 1;
1315 return cso_blend
->color_write_enables
& rt_outputs
;
1319 * Gallium CSO for depth, stencil, and alpha testing state.
1321 struct iris_depth_stencil_alpha_state
{
1322 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1323 uint32_t wmds
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
1326 uint32_t depth_bounds
[GENX(3DSTATE_DEPTH_BOUNDS_length
)];
1329 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1330 struct pipe_alpha_state alpha
;
1332 /** Outbound to resolve and cache set tracking. */
1333 bool depth_writes_enabled
;
1334 bool stencil_writes_enabled
;
1336 /** Outbound to Gen8-9 PMA stall equations */
1337 bool depth_test_enabled
;
1341 * The pipe->create_depth_stencil_alpha_state() driver hook.
1343 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1344 * testing state since we need pieces of it in a variety of places.
1347 iris_create_zsa_state(struct pipe_context
*ctx
,
1348 const struct pipe_depth_stencil_alpha_state
*state
)
1350 struct iris_depth_stencil_alpha_state
*cso
=
1351 malloc(sizeof(struct iris_depth_stencil_alpha_state
));
1353 bool two_sided_stencil
= state
->stencil
[1].enabled
;
1355 cso
->alpha
= state
->alpha
;
1356 cso
->depth_writes_enabled
= state
->depth
.writemask
;
1357 cso
->depth_test_enabled
= state
->depth
.enabled
;
1358 cso
->stencil_writes_enabled
=
1359 state
->stencil
[0].writemask
!= 0 ||
1360 (two_sided_stencil
&& state
->stencil
[1].writemask
!= 0);
1362 /* gallium frontends need to optimize away EQUAL writes for us. */
1363 assert(!(state
->depth
.func
== PIPE_FUNC_EQUAL
&& state
->depth
.writemask
));
1365 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), cso
->wmds
, wmds
) {
1366 wmds
.StencilFailOp
= state
->stencil
[0].fail_op
;
1367 wmds
.StencilPassDepthFailOp
= state
->stencil
[0].zfail_op
;
1368 wmds
.StencilPassDepthPassOp
= state
->stencil
[0].zpass_op
;
1369 wmds
.StencilTestFunction
=
1370 translate_compare_func(state
->stencil
[0].func
);
1371 wmds
.BackfaceStencilFailOp
= state
->stencil
[1].fail_op
;
1372 wmds
.BackfaceStencilPassDepthFailOp
= state
->stencil
[1].zfail_op
;
1373 wmds
.BackfaceStencilPassDepthPassOp
= state
->stencil
[1].zpass_op
;
1374 wmds
.BackfaceStencilTestFunction
=
1375 translate_compare_func(state
->stencil
[1].func
);
1376 wmds
.DepthTestFunction
= translate_compare_func(state
->depth
.func
);
1377 wmds
.DoubleSidedStencilEnable
= two_sided_stencil
;
1378 wmds
.StencilTestEnable
= state
->stencil
[0].enabled
;
1379 wmds
.StencilBufferWriteEnable
=
1380 state
->stencil
[0].writemask
!= 0 ||
1381 (two_sided_stencil
&& state
->stencil
[1].writemask
!= 0);
1382 wmds
.DepthTestEnable
= state
->depth
.enabled
;
1383 wmds
.DepthBufferWriteEnable
= state
->depth
.writemask
;
1384 wmds
.StencilTestMask
= state
->stencil
[0].valuemask
;
1385 wmds
.StencilWriteMask
= state
->stencil
[0].writemask
;
1386 wmds
.BackfaceStencilTestMask
= state
->stencil
[1].valuemask
;
1387 wmds
.BackfaceStencilWriteMask
= state
->stencil
[1].writemask
;
1388 /* wmds.[Backface]StencilReferenceValue are merged later */
1390 wmds
.StencilReferenceValueModifyDisable
= true;
1395 iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS
), cso
->depth_bounds
, depth_bounds
) {
1396 depth_bounds
.DepthBoundsTestValueModifyDisable
= false;
1397 depth_bounds
.DepthBoundsTestEnableModifyDisable
= false;
1398 depth_bounds
.DepthBoundsTestEnable
= state
->depth
.bounds_test
;
1399 depth_bounds
.DepthBoundsTestMinValue
= state
->depth
.bounds_min
;
1400 depth_bounds
.DepthBoundsTestMaxValue
= state
->depth
.bounds_max
;
1408 * The pipe->bind_depth_stencil_alpha_state() driver hook.
1410 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1413 iris_bind_zsa_state(struct pipe_context
*ctx
, void *state
)
1415 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1416 struct iris_depth_stencil_alpha_state
*old_cso
= ice
->state
.cso_zsa
;
1417 struct iris_depth_stencil_alpha_state
*new_cso
= state
;
1420 if (cso_changed(alpha
.ref_value
))
1421 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
1423 if (cso_changed(alpha
.enabled
))
1424 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
| IRIS_DIRTY_BLEND_STATE
;
1426 if (cso_changed(alpha
.func
))
1427 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
1429 if (cso_changed(depth_writes_enabled
))
1430 ice
->state
.dirty
|= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
1432 ice
->state
.depth_writes_enabled
= new_cso
->depth_writes_enabled
;
1433 ice
->state
.stencil_writes_enabled
= new_cso
->stencil_writes_enabled
;
1436 if (cso_changed(depth_bounds
))
1437 ice
->state
.dirty
|= IRIS_DIRTY_DEPTH_BOUNDS
;
1441 ice
->state
.cso_zsa
= new_cso
;
1442 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
1443 ice
->state
.dirty
|= IRIS_DIRTY_WM_DEPTH_STENCIL
;
1444 ice
->state
.stage_dirty
|=
1445 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_DEPTH_STENCIL_ALPHA
];
1448 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
1453 want_pma_fix(struct iris_context
*ice
)
1455 UNUSED
struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1456 UNUSED
const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1457 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
1458 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
1459 const struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
1460 const struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
1461 const struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
1463 /* In very specific combinations of state, we can instruct Gen8-9 hardware
1464 * to avoid stalling at the pixel mask array. The state equations are
1465 * documented in these places:
1467 * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
1468 * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1470 * Both equations share some common elements:
1473 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1474 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1475 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1476 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1479 * 3DSTATE_WM::ForceKillPix != ForceOff &&
1480 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1481 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1482 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1483 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1484 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1486 * (Technically the stencil PMA treats ForceKillPix differently,
1487 * but I think this is a documentation oversight, and we don't
1488 * ever use it in this way, so it doesn't matter).
1491 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
1492 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1493 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1494 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1495 * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1496 * 3DSTATE_PS_EXTRA::PixelShaderValid &&
1499 * These are always true:
1501 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1502 * 3DSTATE_PS_EXTRA::PixelShaderValid
1504 * Also, we never use the normal drawing path for HiZ ops; these are true:
1506 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1507 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1508 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1509 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
1511 * This happens sometimes:
1513 * 3DSTATE_WM::ForceThreadDispatch != 1
1515 * However, we choose to ignore it as it either agrees with the signal
1516 * (dispatch was already enabled, so nothing out of the ordinary), or
1517 * there are no framebuffer attachments (so no depth or HiZ anyway,
1518 * meaning the PMA signal will already be disabled).
1524 struct iris_resource
*zres
, *sres
;
1525 iris_get_depth_stencil_resources(cso_fb
->zsbuf
->texture
, &zres
, &sres
);
1527 /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1528 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1530 if (!zres
|| !iris_resource_level_has_hiz(zres
, cso_fb
->zsbuf
->u
.tex
.level
))
1533 /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1534 if (wm_prog_data
->early_fragment_tests
)
1537 /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1538 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1539 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1540 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1541 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1542 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1544 bool killpixels
= wm_prog_data
->uses_kill
|| wm_prog_data
->uses_omask
||
1545 cso_blend
->alpha_to_coverage
|| cso_zsa
->alpha
.enabled
;
1547 /* The Gen8 depth PMA equation becomes:
1550 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1551 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1554 * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1555 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1556 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1560 * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1561 * ((killpixels && (depth_writes || stencil_writes)) ||
1562 * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1565 if (!cso_zsa
->depth_test_enabled
)
1568 return wm_prog_data
->computed_depth_mode
!= PSCDEPTH_OFF
||
1569 (killpixels
&& (cso_zsa
->depth_writes_enabled
||
1570 (sres
&& cso_zsa
->stencil_writes_enabled
)));
1575 genX(update_pma_fix
)(struct iris_context
*ice
,
1576 struct iris_batch
*batch
,
1580 struct iris_genx_state
*genx
= ice
->state
.genx
;
1582 if (genx
->pma_fix_enabled
== enable
)
1585 genx
->pma_fix_enabled
= enable
;
1587 /* According to the Broadwell PIPE_CONTROL documentation, software should
1588 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1589 * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
1591 * The Gen9 docs say to use a depth stall rather than a command streamer
1592 * stall. However, the hardware seems to violently disagree. A full
1593 * command streamer stall seems to be needed in both cases.
1595 iris_emit_pipe_control_flush(batch
, "PMA fix change (1/2)",
1596 PIPE_CONTROL_CS_STALL
|
1597 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1598 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1601 iris_pack_state(GENX(CACHE_MODE_1
), ®_val
, reg
) {
1602 reg
.NPPMAFixEnable
= enable
;
1603 reg
.NPEarlyZFailsDisable
= enable
;
1604 reg
.NPPMAFixEnableMask
= true;
1605 reg
.NPEarlyZFailsDisableMask
= true;
1607 iris_emit_lri(batch
, CACHE_MODE_1
, reg_val
);
1609 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1610 * Flush bits is often necessary. We do it regardless because it's easier.
1611 * The render cache flush is also necessary if stencil writes are enabled.
1613 * Again, the Gen9 docs give a different set of flushes but the Broadwell
1614 * flushes seem to work just as well.
1616 iris_emit_pipe_control_flush(batch
, "PMA fix change (1/2)",
1617 PIPE_CONTROL_DEPTH_STALL
|
1618 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1619 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1624 * Gallium CSO for rasterizer state.
1626 struct iris_rasterizer_state
{
1627 uint32_t sf
[GENX(3DSTATE_SF_length
)];
1628 uint32_t clip
[GENX(3DSTATE_CLIP_length
)];
1629 uint32_t raster
[GENX(3DSTATE_RASTER_length
)];
1630 uint32_t wm
[GENX(3DSTATE_WM_length
)];
1631 uint32_t line_stipple
[GENX(3DSTATE_LINE_STIPPLE_length
)];
1633 uint8_t num_clip_plane_consts
;
1634 bool clip_halfz
; /* for CC_VIEWPORT */
1635 bool depth_clip_near
; /* for CC_VIEWPORT */
1636 bool depth_clip_far
; /* for CC_VIEWPORT */
1637 bool flatshade
; /* for shader state */
1638 bool flatshade_first
; /* for stream output */
1639 bool clamp_fragment_color
; /* for shader state */
1640 bool light_twoside
; /* for shader state */
1641 bool rasterizer_discard
; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1642 bool half_pixel_center
; /* for 3DSTATE_MULTISAMPLE */
1643 bool line_stipple_enable
;
1644 bool poly_stipple_enable
;
1646 bool force_persample_interp
;
1647 bool conservative_rasterization
;
1648 bool fill_mode_point_or_line
;
1649 enum pipe_sprite_coord_mode sprite_coord_mode
; /* PIPE_SPRITE_* */
1650 uint16_t sprite_coord_enable
;
1654 get_line_width(const struct pipe_rasterizer_state
*state
)
1656 float line_width
= state
->line_width
;
1658 /* From the OpenGL 4.4 spec:
1660 * "The actual width of non-antialiased lines is determined by rounding
1661 * the supplied width to the nearest integer, then clamping it to the
1662 * implementation-dependent maximum non-antialiased line width."
1664 if (!state
->multisample
&& !state
->line_smooth
)
1665 line_width
= roundf(state
->line_width
);
1667 if (!state
->multisample
&& state
->line_smooth
&& line_width
< 1.5f
) {
1668 /* For 1 pixel line thickness or less, the general anti-aliasing
1669 * algorithm gives up, and a garbage line is generated. Setting a
1670 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1671 * (one-pixel-wide), non-antialiased lines.
1673 * Lines rendered with zero Line Width are rasterized using the
1674 * "Grid Intersection Quantization" rules as specified by the
1675 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1684 * The pipe->create_rasterizer_state() driver hook.
1687 iris_create_rasterizer_state(struct pipe_context
*ctx
,
1688 const struct pipe_rasterizer_state
*state
)
1690 struct iris_rasterizer_state
*cso
=
1691 malloc(sizeof(struct iris_rasterizer_state
));
1693 cso
->multisample
= state
->multisample
;
1694 cso
->force_persample_interp
= state
->force_persample_interp
;
1695 cso
->clip_halfz
= state
->clip_halfz
;
1696 cso
->depth_clip_near
= state
->depth_clip_near
;
1697 cso
->depth_clip_far
= state
->depth_clip_far
;
1698 cso
->flatshade
= state
->flatshade
;
1699 cso
->flatshade_first
= state
->flatshade_first
;
1700 cso
->clamp_fragment_color
= state
->clamp_fragment_color
;
1701 cso
->light_twoside
= state
->light_twoside
;
1702 cso
->rasterizer_discard
= state
->rasterizer_discard
;
1703 cso
->half_pixel_center
= state
->half_pixel_center
;
1704 cso
->sprite_coord_mode
= state
->sprite_coord_mode
;
1705 cso
->sprite_coord_enable
= state
->sprite_coord_enable
;
1706 cso
->line_stipple_enable
= state
->line_stipple_enable
;
1707 cso
->poly_stipple_enable
= state
->poly_stipple_enable
;
1708 cso
->conservative_rasterization
=
1709 state
->conservative_raster_mode
== PIPE_CONSERVATIVE_RASTER_POST_SNAP
;
1711 cso
->fill_mode_point_or_line
=
1712 state
->fill_front
== PIPE_POLYGON_MODE_LINE
||
1713 state
->fill_front
== PIPE_POLYGON_MODE_POINT
||
1714 state
->fill_back
== PIPE_POLYGON_MODE_LINE
||
1715 state
->fill_back
== PIPE_POLYGON_MODE_POINT
;
1717 if (state
->clip_plane_enable
!= 0)
1718 cso
->num_clip_plane_consts
= util_logbase2(state
->clip_plane_enable
) + 1;
1720 cso
->num_clip_plane_consts
= 0;
1722 float line_width
= get_line_width(state
);
1724 iris_pack_command(GENX(3DSTATE_SF
), cso
->sf
, sf
) {
1725 sf
.StatisticsEnable
= true;
1726 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1727 sf
.LineEndCapAntialiasingRegionWidth
=
1728 state
->line_smooth
? _10pixels
: _05pixels
;
1729 sf
.LastPixelEnable
= state
->line_last_pixel
;
1730 sf
.LineWidth
= line_width
;
1731 sf
.SmoothPointEnable
= (state
->point_smooth
|| state
->multisample
) &&
1732 !state
->point_quad_rasterization
;
1733 sf
.PointWidthSource
= state
->point_size_per_vertex
? Vertex
: State
;
1734 sf
.PointWidth
= state
->point_size
;
1736 if (state
->flatshade_first
) {
1737 sf
.TriangleFanProvokingVertexSelect
= 1;
1739 sf
.TriangleStripListProvokingVertexSelect
= 2;
1740 sf
.TriangleFanProvokingVertexSelect
= 2;
1741 sf
.LineStripListProvokingVertexSelect
= 1;
1745 iris_pack_command(GENX(3DSTATE_RASTER
), cso
->raster
, rr
) {
1746 rr
.FrontWinding
= state
->front_ccw
? CounterClockwise
: Clockwise
;
1747 rr
.CullMode
= translate_cull_mode(state
->cull_face
);
1748 rr
.FrontFaceFillMode
= translate_fill_mode(state
->fill_front
);
1749 rr
.BackFaceFillMode
= translate_fill_mode(state
->fill_back
);
1750 rr
.DXMultisampleRasterizationEnable
= state
->multisample
;
1751 rr
.GlobalDepthOffsetEnableSolid
= state
->offset_tri
;
1752 rr
.GlobalDepthOffsetEnableWireframe
= state
->offset_line
;
1753 rr
.GlobalDepthOffsetEnablePoint
= state
->offset_point
;
1754 rr
.GlobalDepthOffsetConstant
= state
->offset_units
* 2;
1755 rr
.GlobalDepthOffsetScale
= state
->offset_scale
;
1756 rr
.GlobalDepthOffsetClamp
= state
->offset_clamp
;
1757 rr
.SmoothPointEnable
= state
->point_smooth
;
1758 rr
.AntialiasingEnable
= state
->line_smooth
;
1759 rr
.ScissorRectangleEnable
= state
->scissor
;
1761 rr
.ViewportZNearClipTestEnable
= state
->depth_clip_near
;
1762 rr
.ViewportZFarClipTestEnable
= state
->depth_clip_far
;
1763 rr
.ConservativeRasterizationEnable
=
1764 cso
->conservative_rasterization
;
1766 rr
.ViewportZClipTestEnable
= (state
->depth_clip_near
|| state
->depth_clip_far
);
1770 iris_pack_command(GENX(3DSTATE_CLIP
), cso
->clip
, cl
) {
1771 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1772 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1774 cl
.EarlyCullEnable
= true;
1775 cl
.UserClipDistanceClipTestEnableBitmask
= state
->clip_plane_enable
;
1776 cl
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1777 cl
.APIMode
= state
->clip_halfz
? APIMODE_D3D
: APIMODE_OGL
;
1778 cl
.GuardbandClipTestEnable
= true;
1779 cl
.ClipEnable
= true;
1780 cl
.MinimumPointWidth
= 0.125;
1781 cl
.MaximumPointWidth
= 255.875;
1783 if (state
->flatshade_first
) {
1784 cl
.TriangleFanProvokingVertexSelect
= 1;
1786 cl
.TriangleStripListProvokingVertexSelect
= 2;
1787 cl
.TriangleFanProvokingVertexSelect
= 2;
1788 cl
.LineStripListProvokingVertexSelect
= 1;
1792 iris_pack_command(GENX(3DSTATE_WM
), cso
->wm
, wm
) {
1793 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1794 * filled in at draw time from the FS program.
1796 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1797 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1798 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1799 wm
.LineStippleEnable
= state
->line_stipple_enable
;
1800 wm
.PolygonStippleEnable
= state
->poly_stipple_enable
;
1803 /* Remap from 0..255 back to 1..256 */
1804 const unsigned line_stipple_factor
= state
->line_stipple_factor
+ 1;
1806 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE
), cso
->line_stipple
, line
) {
1807 if (state
->line_stipple_enable
) {
1808 line
.LineStipplePattern
= state
->line_stipple_pattern
;
1809 line
.LineStippleInverseRepeatCount
= 1.0f
/ line_stipple_factor
;
1810 line
.LineStippleRepeatCount
= line_stipple_factor
;
1818 * The pipe->bind_rasterizer_state() driver hook.
1820 * Bind a rasterizer CSO and flag related dirty bits.
1823 iris_bind_rasterizer_state(struct pipe_context
*ctx
, void *state
)
1825 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1826 struct iris_rasterizer_state
*old_cso
= ice
->state
.cso_rast
;
1827 struct iris_rasterizer_state
*new_cso
= state
;
1830 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1831 if (cso_changed_memcmp(line_stipple
))
1832 ice
->state
.dirty
|= IRIS_DIRTY_LINE_STIPPLE
;
1834 if (cso_changed(half_pixel_center
))
1835 ice
->state
.dirty
|= IRIS_DIRTY_MULTISAMPLE
;
1837 if (cso_changed(line_stipple_enable
) || cso_changed(poly_stipple_enable
))
1838 ice
->state
.dirty
|= IRIS_DIRTY_WM
;
1840 if (cso_changed(rasterizer_discard
))
1841 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
| IRIS_DIRTY_CLIP
;
1843 if (cso_changed(flatshade_first
))
1844 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
;
1846 if (cso_changed(depth_clip_near
) || cso_changed(depth_clip_far
) ||
1847 cso_changed(clip_halfz
))
1848 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
1850 if (cso_changed(sprite_coord_enable
) ||
1851 cso_changed(sprite_coord_mode
) ||
1852 cso_changed(light_twoside
))
1853 ice
->state
.dirty
|= IRIS_DIRTY_SBE
;
1855 if (cso_changed(conservative_rasterization
))
1856 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_FS
;
1859 ice
->state
.cso_rast
= new_cso
;
1860 ice
->state
.dirty
|= IRIS_DIRTY_RASTER
;
1861 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
1862 ice
->state
.stage_dirty
|=
1863 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_RASTERIZER
];
1867 * Return true if the given wrap mode requires the border color to exist.
1869 * (We can skip uploading it if the sampler isn't going to use it.)
1872 wrap_mode_needs_border_color(unsigned wrap_mode
)
1874 return wrap_mode
== TCM_CLAMP_BORDER
|| wrap_mode
== TCM_HALF_BORDER
;
1878 * Gallium CSO for sampler state.
1880 struct iris_sampler_state
{
1881 union pipe_color_union border_color
;
1882 bool needs_border_color
;
1884 uint32_t sampler_state
[GENX(SAMPLER_STATE_length
)];
1888 * The pipe->create_sampler_state() driver hook.
1890 * We fill out SAMPLER_STATE (except for the border color pointer), and
1891 * store that on the CPU. It doesn't make sense to upload it to a GPU
1892 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1893 * all bound sampler states to be in contiguous memor.
1896 iris_create_sampler_state(struct pipe_context
*ctx
,
1897 const struct pipe_sampler_state
*state
)
1899 struct iris_sampler_state
*cso
= CALLOC_STRUCT(iris_sampler_state
);
1904 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST
== MAPFILTER_NEAREST
);
1905 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR
== MAPFILTER_LINEAR
);
1907 unsigned wrap_s
= translate_wrap(state
->wrap_s
);
1908 unsigned wrap_t
= translate_wrap(state
->wrap_t
);
1909 unsigned wrap_r
= translate_wrap(state
->wrap_r
);
1911 memcpy(&cso
->border_color
, &state
->border_color
, sizeof(cso
->border_color
));
1913 cso
->needs_border_color
= wrap_mode_needs_border_color(wrap_s
) ||
1914 wrap_mode_needs_border_color(wrap_t
) ||
1915 wrap_mode_needs_border_color(wrap_r
);
1917 float min_lod
= state
->min_lod
;
1918 unsigned mag_img_filter
= state
->mag_img_filter
;
1920 // XXX: explain this code ported from ilo...I don't get it at all...
1921 if (state
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
&&
1922 state
->min_lod
> 0.0f
) {
1924 mag_img_filter
= state
->min_img_filter
;
1927 iris_pack_state(GENX(SAMPLER_STATE
), cso
->sampler_state
, samp
) {
1928 samp
.TCXAddressControlMode
= wrap_s
;
1929 samp
.TCYAddressControlMode
= wrap_t
;
1930 samp
.TCZAddressControlMode
= wrap_r
;
1931 samp
.CubeSurfaceControlMode
= state
->seamless_cube_map
;
1932 samp
.NonnormalizedCoordinateEnable
= !state
->normalized_coords
;
1933 samp
.MinModeFilter
= state
->min_img_filter
;
1934 samp
.MagModeFilter
= mag_img_filter
;
1935 samp
.MipModeFilter
= translate_mip_filter(state
->min_mip_filter
);
1936 samp
.MaximumAnisotropy
= RATIO21
;
1938 if (state
->max_anisotropy
>= 2) {
1939 if (state
->min_img_filter
== PIPE_TEX_FILTER_LINEAR
) {
1940 samp
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
1941 samp
.AnisotropicAlgorithm
= EWAApproximation
;
1944 if (state
->mag_img_filter
== PIPE_TEX_FILTER_LINEAR
)
1945 samp
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
1947 samp
.MaximumAnisotropy
=
1948 MIN2((state
->max_anisotropy
- 2) / 2, RATIO161
);
1951 /* Set address rounding bits if not using nearest filtering. */
1952 if (state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
) {
1953 samp
.UAddressMinFilterRoundingEnable
= true;
1954 samp
.VAddressMinFilterRoundingEnable
= true;
1955 samp
.RAddressMinFilterRoundingEnable
= true;
1958 if (state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
) {
1959 samp
.UAddressMagFilterRoundingEnable
= true;
1960 samp
.VAddressMagFilterRoundingEnable
= true;
1961 samp
.RAddressMagFilterRoundingEnable
= true;
1964 if (state
->compare_mode
== PIPE_TEX_COMPARE_R_TO_TEXTURE
)
1965 samp
.ShadowFunction
= translate_shadow_func(state
->compare_func
);
1967 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
1969 samp
.LODPreClampMode
= CLAMP_MODE_OGL
;
1970 samp
.MinLOD
= CLAMP(min_lod
, 0, hw_max_lod
);
1971 samp
.MaxLOD
= CLAMP(state
->max_lod
, 0, hw_max_lod
);
1972 samp
.TextureLODBias
= CLAMP(state
->lod_bias
, -16, 15);
1974 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1981 * The pipe->bind_sampler_states() driver hook.
1984 iris_bind_sampler_states(struct pipe_context
*ctx
,
1985 enum pipe_shader_type p_stage
,
1986 unsigned start
, unsigned count
,
1989 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1990 gl_shader_stage stage
= stage_from_pipe(p_stage
);
1991 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
1993 assert(start
+ count
<= IRIS_MAX_TEXTURE_SAMPLERS
);
1997 for (int i
= 0; i
< count
; i
++) {
1998 if (shs
->samplers
[start
+ i
] != states
[i
]) {
1999 shs
->samplers
[start
+ i
] = states
[i
];
2005 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS
<< stage
;
2009 * Upload the sampler states into a contiguous area of GPU memory, for
2010 * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2012 * Also fill out the border color state pointers.
2015 iris_upload_sampler_states(struct iris_context
*ice
, gl_shader_stage stage
)
2017 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2018 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
2020 /* We assume gallium frontends will call pipe->bind_sampler_states()
2021 * if the program's number of textures changes.
2023 unsigned count
= info
? util_last_bit(info
->textures_used
) : 0;
2028 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2029 * in the dynamic state memory zone, so we can point to it via the
2030 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2032 unsigned size
= count
* 4 * GENX(SAMPLER_STATE_length
);
2034 upload_state(ice
->state
.dynamic_uploader
, &shs
->sampler_table
, size
, 32);
2038 struct pipe_resource
*res
= shs
->sampler_table
.res
;
2039 struct iris_bo
*bo
= iris_resource_bo(res
);
2041 iris_record_state_size(ice
->state
.sizes
,
2042 bo
->gtt_offset
+ shs
->sampler_table
.offset
, size
);
2044 shs
->sampler_table
.offset
+= iris_bo_offset_from_base_address(bo
);
2046 /* Make sure all land in the same BO */
2047 iris_border_color_pool_reserve(ice
, IRIS_MAX_TEXTURE_SAMPLERS
);
2049 ice
->state
.need_border_colors
&= ~(1 << stage
);
2051 for (int i
= 0; i
< count
; i
++) {
2052 struct iris_sampler_state
*state
= shs
->samplers
[i
];
2053 struct iris_sampler_view
*tex
= shs
->textures
[i
];
2056 memset(map
, 0, 4 * GENX(SAMPLER_STATE_length
));
2057 } else if (!state
->needs_border_color
) {
2058 memcpy(map
, state
->sampler_state
, 4 * GENX(SAMPLER_STATE_length
));
2060 ice
->state
.need_border_colors
|= 1 << stage
;
2062 /* We may need to swizzle the border color for format faking.
2063 * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2064 * This means we need to move the border color's A channel into
2065 * the R or G channels so that those read swizzles will move it
2068 union pipe_color_union
*color
= &state
->border_color
;
2069 union pipe_color_union tmp
;
2071 enum pipe_format internal_format
= tex
->res
->internal_format
;
2073 if (util_format_is_alpha(internal_format
)) {
2074 unsigned char swz
[4] = {
2075 PIPE_SWIZZLE_W
, PIPE_SWIZZLE_0
,
2076 PIPE_SWIZZLE_0
, PIPE_SWIZZLE_0
2078 util_format_apply_color_swizzle(&tmp
, color
, swz
, true);
2080 } else if (util_format_is_luminance_alpha(internal_format
) &&
2081 internal_format
!= PIPE_FORMAT_L8A8_SRGB
) {
2082 unsigned char swz
[4] = {
2083 PIPE_SWIZZLE_X
, PIPE_SWIZZLE_W
,
2084 PIPE_SWIZZLE_0
, PIPE_SWIZZLE_0
2086 util_format_apply_color_swizzle(&tmp
, color
, swz
, true);
2091 /* Stream out the border color and merge the pointer. */
2092 uint32_t offset
= iris_upload_border_color(ice
, color
);
2094 uint32_t dynamic
[GENX(SAMPLER_STATE_length
)];
2095 iris_pack_state(GENX(SAMPLER_STATE
), dynamic
, dyns
) {
2096 dyns
.BorderColorPointer
= offset
;
2099 for (uint32_t j
= 0; j
< GENX(SAMPLER_STATE_length
); j
++)
2100 map
[j
] = state
->sampler_state
[j
] | dynamic
[j
];
2103 map
+= GENX(SAMPLER_STATE_length
);
2107 static enum isl_channel_select
2108 fmt_swizzle(const struct iris_format_info
*fmt
, enum pipe_swizzle swz
)
2111 case PIPE_SWIZZLE_X
: return fmt
->swizzle
.r
;
2112 case PIPE_SWIZZLE_Y
: return fmt
->swizzle
.g
;
2113 case PIPE_SWIZZLE_Z
: return fmt
->swizzle
.b
;
2114 case PIPE_SWIZZLE_W
: return fmt
->swizzle
.a
;
2115 case PIPE_SWIZZLE_1
: return SCS_ONE
;
2116 case PIPE_SWIZZLE_0
: return SCS_ZERO
;
2117 default: unreachable("invalid swizzle");
2122 fill_buffer_surface_state(struct isl_device
*isl_dev
,
2123 struct iris_resource
*res
,
2125 enum isl_format format
,
2126 struct isl_swizzle swizzle
,
2130 const struct isl_format_layout
*fmtl
= isl_format_get_layout(format
);
2131 const unsigned cpp
= format
== ISL_FORMAT_RAW
? 1 : fmtl
->bpb
/ 8;
2133 /* The ARB_texture_buffer_specification says:
2135 * "The number of texels in the buffer texture's texel array is given by
2137 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2139 * where <buffer_size> is the size of the buffer object, in basic
2140 * machine units and <components> and <base_type> are the element count
2141 * and base data type for elements, as specified in Table X.1. The
2142 * number of texels in the texel array is then clamped to the
2143 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2145 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2146 * so that when ISL divides by stride to obtain the number of texels, that
2147 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2149 unsigned final_size
=
2150 MIN3(size
, res
->bo
->size
- res
->offset
- offset
,
2151 IRIS_MAX_TEXTURE_BUFFER_SIZE
* cpp
);
2153 isl_buffer_fill_state(isl_dev
, map
,
2154 .address
= res
->bo
->gtt_offset
+ res
->offset
+ offset
,
2155 .size_B
= final_size
,
2159 .mocs
= iris_mocs(res
->bo
, isl_dev
));
2162 #define SURFACE_STATE_ALIGNMENT 64
2165 * Allocate several contiguous SURFACE_STATE structures, one for each
2166 * supported auxiliary surface mode. This only allocates the CPU-side
2167 * copy, they will need to be uploaded later after they're filled in.
2170 alloc_surface_states(struct iris_surface_state
*surf_state
,
2171 unsigned aux_usages
)
2173 const unsigned surf_size
= 4 * GENX(RENDER_SURFACE_STATE_length
);
2175 /* If this changes, update this to explicitly align pointers */
2176 STATIC_ASSERT(surf_size
== SURFACE_STATE_ALIGNMENT
);
2178 assert(aux_usages
!= 0);
2180 /* In case we're re-allocating them... */
2181 free(surf_state
->cpu
);
2183 surf_state
->num_states
= util_bitcount(aux_usages
);
2184 surf_state
->cpu
= calloc(surf_state
->num_states
, surf_size
);
2185 surf_state
->ref
.offset
= 0;
2186 pipe_resource_reference(&surf_state
->ref
.res
, NULL
);
2188 assert(surf_state
->cpu
);
2192 * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2195 upload_surface_states(struct u_upload_mgr
*mgr
,
2196 struct iris_surface_state
*surf_state
)
2198 const unsigned surf_size
= 4 * GENX(RENDER_SURFACE_STATE_length
);
2199 const unsigned bytes
= surf_state
->num_states
* surf_size
;
2202 upload_state(mgr
, &surf_state
->ref
, bytes
, SURFACE_STATE_ALIGNMENT
);
2204 surf_state
->ref
.offset
+=
2205 iris_bo_offset_from_base_address(iris_resource_bo(surf_state
->ref
.res
));
2208 memcpy(map
, surf_state
->cpu
, bytes
);
2212 * Update resource addresses in a set of SURFACE_STATE descriptors,
2213 * and re-upload them if necessary.
2216 update_surface_state_addrs(struct u_upload_mgr
*mgr
,
2217 struct iris_surface_state
*surf_state
,
2220 if (surf_state
->bo_address
== bo
->gtt_offset
)
2223 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start
) % 64 == 0);
2224 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits
) == 64);
2226 uint64_t *ss_addr
= (uint64_t *) &surf_state
->cpu
[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start
) / 32];
2228 /* First, update the CPU copies. We assume no other fields exist in
2229 * the QWord containing Surface Base Address.
2231 for (unsigned i
= 0; i
< surf_state
->num_states
; i
++) {
2232 *ss_addr
= *ss_addr
- surf_state
->bo_address
+ bo
->gtt_offset
;
2233 ss_addr
= ((void *) ss_addr
) + SURFACE_STATE_ALIGNMENT
;
2236 /* Next, upload the updated copies to a GPU buffer. */
2237 upload_surface_states(mgr
, surf_state
);
2239 surf_state
->bo_address
= bo
->gtt_offset
;
2246 * Return an ISL surface for use with non-coherent render target reads.
2248 * In a few complex cases, we can't use the SURFACE_STATE for normal render
2249 * target writes. We need to make a separate one for sampling which refers
2250 * to the single slice of the texture being read.
2253 get_rt_read_isl_surf(const struct gen_device_info
*devinfo
,
2254 struct iris_resource
*res
,
2255 enum pipe_texture_target target
,
2256 struct isl_view
*view
,
2257 uint32_t *offset_to_tile
,
2258 uint32_t *tile_x_sa
,
2259 uint32_t *tile_y_sa
,
2260 struct isl_surf
*surf
)
2264 const enum isl_dim_layout dim_layout
=
2265 iris_get_isl_dim_layout(devinfo
, res
->surf
.tiling
, target
);
2267 surf
->dim
= target_to_isl_surf_dim(target
);
2269 if (surf
->dim_layout
== dim_layout
)
2272 /* The layout of the specified texture target is not compatible with the
2273 * actual layout of the miptree structure in memory -- You're entering
2274 * dangerous territory, this can only possibly work if you only intended
2275 * to access a single level and slice of the texture, and the hardware
2276 * supports the tile offset feature in order to allow non-tile-aligned
2277 * base offsets, since we'll have to point the hardware to the first
2278 * texel of the level instead of relying on the usual base level/layer
2281 assert(view
->levels
== 1 && view
->array_len
== 1);
2282 assert(*tile_x_sa
== 0 && *tile_y_sa
== 0);
2284 *offset_to_tile
= iris_resource_get_tile_offsets(res
, view
->base_level
,
2285 view
->base_array_layer
,
2286 tile_x_sa
, tile_y_sa
);
2287 const unsigned l
= view
->base_level
;
2289 surf
->logical_level0_px
.width
= minify(surf
->logical_level0_px
.width
, l
);
2290 surf
->logical_level0_px
.height
= surf
->dim
<= ISL_SURF_DIM_1D
? 1 :
2291 minify(surf
->logical_level0_px
.height
, l
);
2292 surf
->logical_level0_px
.depth
= surf
->dim
<= ISL_SURF_DIM_2D
? 1 :
2293 minify(surf
->logical_level0_px
.depth
, l
);
2295 surf
->logical_level0_px
.array_len
= 1;
2297 surf
->dim_layout
= dim_layout
;
2299 view
->base_level
= 0;
2300 view
->base_array_layer
= 0;
2305 fill_surface_state(struct isl_device
*isl_dev
,
2307 struct iris_resource
*res
,
2308 struct isl_surf
*surf
,
2309 struct isl_view
*view
,
2311 uint32_t extra_main_offset
,
2315 struct isl_surf_fill_state_info f
= {
2318 .mocs
= iris_mocs(res
->bo
, isl_dev
),
2319 .address
= res
->bo
->gtt_offset
+ res
->offset
+ extra_main_offset
,
2320 .x_offset_sa
= tile_x_sa
,
2321 .y_offset_sa
= tile_y_sa
,
2324 assert(!iris_resource_unfinished_aux_import(res
));
2326 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
2327 f
.aux_surf
= &res
->aux
.surf
;
2328 f
.aux_usage
= aux_usage
;
2329 f
.aux_address
= res
->aux
.bo
->gtt_offset
+ res
->aux
.offset
;
2331 struct iris_bo
*clear_bo
= NULL
;
2332 uint64_t clear_offset
= 0;
2334 iris_resource_get_clear_color(res
, &clear_bo
, &clear_offset
);
2336 f
.clear_address
= clear_bo
->gtt_offset
+ clear_offset
;
2337 f
.use_clear_address
= isl_dev
->info
->gen
> 9;
2341 isl_surf_fill_state_s(isl_dev
, map
, &f
);
2345 * The pipe->create_sampler_view() driver hook.
2347 static struct pipe_sampler_view
*
2348 iris_create_sampler_view(struct pipe_context
*ctx
,
2349 struct pipe_resource
*tex
,
2350 const struct pipe_sampler_view
*tmpl
)
2352 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2353 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2354 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2355 struct iris_sampler_view
*isv
= calloc(1, sizeof(struct iris_sampler_view
));
2360 /* initialize base object */
2362 isv
->base
.context
= ctx
;
2363 isv
->base
.texture
= NULL
;
2364 pipe_reference_init(&isv
->base
.reference
, 1);
2365 pipe_resource_reference(&isv
->base
.texture
, tex
);
2367 if (util_format_is_depth_or_stencil(tmpl
->format
)) {
2368 struct iris_resource
*zres
, *sres
;
2369 const struct util_format_description
*desc
=
2370 util_format_description(tmpl
->format
);
2372 iris_get_depth_stencil_resources(tex
, &zres
, &sres
);
2374 tex
= util_format_has_depth(desc
) ? &zres
->base
: &sres
->base
;
2377 isv
->res
= (struct iris_resource
*) tex
;
2379 alloc_surface_states(&isv
->surface_state
, isv
->res
->aux
.sampler_usages
);
2381 isv
->surface_state
.bo_address
= isv
->res
->bo
->gtt_offset
;
2383 isl_surf_usage_flags_t usage
= ISL_SURF_USAGE_TEXTURE_BIT
;
2385 if (isv
->base
.target
== PIPE_TEXTURE_CUBE
||
2386 isv
->base
.target
== PIPE_TEXTURE_CUBE_ARRAY
)
2387 usage
|= ISL_SURF_USAGE_CUBE_BIT
;
2389 const struct iris_format_info fmt
=
2390 iris_format_for_usage(devinfo
, tmpl
->format
, usage
);
2392 isv
->clear_color
= isv
->res
->aux
.clear_color
;
2394 isv
->view
= (struct isl_view
) {
2396 .swizzle
= (struct isl_swizzle
) {
2397 .r
= fmt_swizzle(&fmt
, tmpl
->swizzle_r
),
2398 .g
= fmt_swizzle(&fmt
, tmpl
->swizzle_g
),
2399 .b
= fmt_swizzle(&fmt
, tmpl
->swizzle_b
),
2400 .a
= fmt_swizzle(&fmt
, tmpl
->swizzle_a
),
2405 void *map
= isv
->surface_state
.cpu
;
2407 /* Fill out SURFACE_STATE for this view. */
2408 if (tmpl
->target
!= PIPE_BUFFER
) {
2409 isv
->view
.base_level
= tmpl
->u
.tex
.first_level
;
2410 isv
->view
.levels
= tmpl
->u
.tex
.last_level
- tmpl
->u
.tex
.first_level
+ 1;
2411 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2412 isv
->view
.base_array_layer
= tmpl
->u
.tex
.first_layer
;
2413 isv
->view
.array_len
=
2414 tmpl
->u
.tex
.last_layer
- tmpl
->u
.tex
.first_layer
+ 1;
2416 if (iris_resource_unfinished_aux_import(isv
->res
))
2417 iris_resource_finish_aux_import(&screen
->base
, isv
->res
);
2419 unsigned aux_modes
= isv
->res
->aux
.sampler_usages
;
2421 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
2423 /* If we have a multisampled depth buffer, do not create a sampler
2424 * surface state with HiZ.
2426 fill_surface_state(&screen
->isl_dev
, map
, isv
->res
, &isv
->res
->surf
,
2427 &isv
->view
, aux_usage
, 0, 0, 0);
2429 map
+= SURFACE_STATE_ALIGNMENT
;
2432 fill_buffer_surface_state(&screen
->isl_dev
, isv
->res
, map
,
2433 isv
->view
.format
, isv
->view
.swizzle
,
2434 tmpl
->u
.buf
.offset
, tmpl
->u
.buf
.size
);
2437 upload_surface_states(ice
->state
.surface_uploader
, &isv
->surface_state
);
2443 iris_sampler_view_destroy(struct pipe_context
*ctx
,
2444 struct pipe_sampler_view
*state
)
2446 struct iris_sampler_view
*isv
= (void *) state
;
2447 pipe_resource_reference(&state
->texture
, NULL
);
2448 pipe_resource_reference(&isv
->surface_state
.ref
.res
, NULL
);
2449 free(isv
->surface_state
.cpu
);
2454 * The pipe->create_surface() driver hook.
2456 * In Gallium nomenclature, "surfaces" are a view of a resource that
2457 * can be bound as a render target or depth/stencil buffer.
2459 static struct pipe_surface
*
2460 iris_create_surface(struct pipe_context
*ctx
,
2461 struct pipe_resource
*tex
,
2462 const struct pipe_surface
*tmpl
)
2464 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2465 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2466 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2468 isl_surf_usage_flags_t usage
= 0;
2470 usage
= ISL_SURF_USAGE_STORAGE_BIT
;
2471 else if (util_format_is_depth_or_stencil(tmpl
->format
))
2472 usage
= ISL_SURF_USAGE_DEPTH_BIT
;
2474 usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
;
2476 const struct iris_format_info fmt
=
2477 iris_format_for_usage(devinfo
, tmpl
->format
, usage
);
2479 if ((usage
& ISL_SURF_USAGE_RENDER_TARGET_BIT
) &&
2480 !isl_format_supports_rendering(devinfo
, fmt
.fmt
)) {
2481 /* Framebuffer validation will reject this invalid case, but it
2482 * hasn't had the opportunity yet. In the meantime, we need to
2483 * avoid hitting ISL asserts about unsupported formats below.
2488 struct iris_surface
*surf
= calloc(1, sizeof(struct iris_surface
));
2489 struct pipe_surface
*psurf
= &surf
->base
;
2490 struct iris_resource
*res
= (struct iris_resource
*) tex
;
2495 pipe_reference_init(&psurf
->reference
, 1);
2496 pipe_resource_reference(&psurf
->texture
, tex
);
2497 psurf
->context
= ctx
;
2498 psurf
->format
= tmpl
->format
;
2499 psurf
->width
= tex
->width0
;
2500 psurf
->height
= tex
->height0
;
2501 psurf
->texture
= tex
;
2502 psurf
->u
.tex
.first_layer
= tmpl
->u
.tex
.first_layer
;
2503 psurf
->u
.tex
.last_layer
= tmpl
->u
.tex
.last_layer
;
2504 psurf
->u
.tex
.level
= tmpl
->u
.tex
.level
;
2506 uint32_t array_len
= tmpl
->u
.tex
.last_layer
- tmpl
->u
.tex
.first_layer
+ 1;
2508 struct isl_view
*view
= &surf
->view
;
2509 *view
= (struct isl_view
) {
2511 .base_level
= tmpl
->u
.tex
.level
,
2513 .base_array_layer
= tmpl
->u
.tex
.first_layer
,
2514 .array_len
= array_len
,
2515 .swizzle
= ISL_SWIZZLE_IDENTITY
,
2520 enum pipe_texture_target target
= (tex
->target
== PIPE_TEXTURE_3D
&&
2521 array_len
== 1) ? PIPE_TEXTURE_2D
:
2522 tex
->target
== PIPE_TEXTURE_1D_ARRAY
?
2523 PIPE_TEXTURE_2D_ARRAY
: tex
->target
;
2525 struct isl_view
*read_view
= &surf
->read_view
;
2526 *read_view
= (struct isl_view
) {
2528 .base_level
= tmpl
->u
.tex
.level
,
2530 .base_array_layer
= tmpl
->u
.tex
.first_layer
,
2531 .array_len
= array_len
,
2532 .swizzle
= ISL_SWIZZLE_IDENTITY
,
2533 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
2537 surf
->clear_color
= res
->aux
.clear_color
;
2539 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2540 if (res
->surf
.usage
& (ISL_SURF_USAGE_DEPTH_BIT
|
2541 ISL_SURF_USAGE_STENCIL_BIT
))
2545 alloc_surface_states(&surf
->surface_state
, res
->aux
.possible_usages
);
2546 surf
->surface_state
.bo_address
= res
->bo
->gtt_offset
;
2549 alloc_surface_states(&surf
->surface_state_read
, res
->aux
.possible_usages
);
2550 surf
->surface_state_read
.bo_address
= res
->bo
->gtt_offset
;
2553 if (!isl_format_is_compressed(res
->surf
.format
)) {
2554 if (iris_resource_unfinished_aux_import(res
))
2555 iris_resource_finish_aux_import(&screen
->base
, res
);
2557 void *map
= surf
->surface_state
.cpu
;
2558 UNUSED
void *map_read
= surf
->surface_state_read
.cpu
;
2560 /* This is a normal surface. Fill out a SURFACE_STATE for each possible
2561 * auxiliary surface mode and return the pipe_surface.
2563 unsigned aux_modes
= res
->aux
.possible_usages
;
2565 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
2566 fill_surface_state(&screen
->isl_dev
, map
, res
, &res
->surf
,
2567 view
, aux_usage
, 0, 0, 0);
2568 map
+= SURFACE_STATE_ALIGNMENT
;
2571 struct isl_surf surf
;
2572 uint32_t offset_to_tile
= 0, tile_x_sa
= 0, tile_y_sa
= 0;
2573 get_rt_read_isl_surf(devinfo
, res
, target
, read_view
,
2574 &offset_to_tile
, &tile_x_sa
, &tile_y_sa
, &surf
);
2575 fill_surface_state(&screen
->isl_dev
, map_read
, res
, &surf
, read_view
,
2576 aux_usage
, offset_to_tile
, tile_x_sa
, tile_y_sa
);
2577 map_read
+= SURFACE_STATE_ALIGNMENT
;
2581 upload_surface_states(ice
->state
.surface_uploader
, &surf
->surface_state
);
2584 upload_surface_states(ice
->state
.surface_uploader
,
2585 &surf
->surface_state_read
);
2591 /* The resource has a compressed format, which is not renderable, but we
2592 * have a renderable view format. We must be attempting to upload blocks
2593 * of compressed data via an uncompressed view.
2595 * In this case, we can assume there are no auxiliary buffers, a single
2596 * miplevel, and that the resource is single-sampled. Gallium may try
2597 * and create an uncompressed view with multiple layers, however.
2599 assert(!isl_format_is_compressed(fmt
.fmt
));
2600 assert(res
->aux
.possible_usages
== 1 << ISL_AUX_USAGE_NONE
);
2601 assert(res
->surf
.samples
== 1);
2602 assert(view
->levels
== 1);
2604 struct isl_surf isl_surf
;
2605 uint32_t offset_B
= 0, tile_x_sa
= 0, tile_y_sa
= 0;
2607 if (view
->base_level
> 0) {
2608 /* We can't rely on the hardware's miplevel selection with such
2609 * a substantial lie about the format, so we select a single image
2610 * using the Tile X/Y Offset fields. In this case, we can't handle
2611 * multiple array slices.
2613 * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2614 * hard-coded to align to exactly the block size of the compressed
2615 * texture. This means that, when reinterpreted as a non-compressed
2616 * texture, the tile offsets may be anything and we can't rely on
2619 * Return NULL to force gallium frontends to take fallback paths.
2621 if (view
->array_len
> 1 || GEN_GEN
== 8)
2624 const bool is_3d
= res
->surf
.dim
== ISL_SURF_DIM_3D
;
2625 isl_surf_get_image_surf(&screen
->isl_dev
, &res
->surf
,
2627 is_3d
? 0 : view
->base_array_layer
,
2628 is_3d
? view
->base_array_layer
: 0,
2630 &offset_B
, &tile_x_sa
, &tile_y_sa
);
2632 /* We use address and tile offsets to access a single level/layer
2633 * as a subimage, so reset level/layer so it doesn't offset again.
2635 view
->base_array_layer
= 0;
2636 view
->base_level
= 0;
2638 /* Level 0 doesn't require tile offsets, and the hardware can find
2639 * array slices using QPitch even with the format override, so we
2640 * can allow layers in this case. Copy the original ISL surface.
2642 memcpy(&isl_surf
, &res
->surf
, sizeof(isl_surf
));
2645 /* Scale down the image dimensions by the block size. */
2646 const struct isl_format_layout
*fmtl
=
2647 isl_format_get_layout(res
->surf
.format
);
2648 isl_surf
.format
= fmt
.fmt
;
2649 isl_surf
.logical_level0_px
= isl_surf_get_logical_level0_el(&isl_surf
);
2650 isl_surf
.phys_level0_sa
= isl_surf_get_phys_level0_el(&isl_surf
);
2651 tile_x_sa
/= fmtl
->bw
;
2652 tile_y_sa
/= fmtl
->bh
;
2654 psurf
->width
= isl_surf
.logical_level0_px
.width
;
2655 psurf
->height
= isl_surf
.logical_level0_px
.height
;
2657 struct isl_surf_fill_state_info f
= {
2660 .mocs
= iris_mocs(res
->bo
, &screen
->isl_dev
),
2661 .address
= res
->bo
->gtt_offset
+ offset_B
,
2662 .x_offset_sa
= tile_x_sa
,
2663 .y_offset_sa
= tile_y_sa
,
2666 isl_surf_fill_state_s(&screen
->isl_dev
, surf
->surface_state
.cpu
, &f
);
2668 upload_surface_states(ice
->state
.surface_uploader
, &surf
->surface_state
);
2675 fill_default_image_param(struct brw_image_param
*param
)
2677 memset(param
, 0, sizeof(*param
));
2678 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2679 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2680 * detailed explanation of these parameters.
2682 param
->swizzling
[0] = 0xff;
2683 param
->swizzling
[1] = 0xff;
2687 fill_buffer_image_param(struct brw_image_param
*param
,
2688 enum pipe_format pfmt
,
2691 const unsigned cpp
= util_format_get_blocksize(pfmt
);
2693 fill_default_image_param(param
);
2694 param
->size
[0] = size
/ cpp
;
2695 param
->stride
[0] = cpp
;
2698 #define isl_surf_fill_image_param(x, ...)
2699 #define fill_default_image_param(x, ...)
2700 #define fill_buffer_image_param(x, ...)
2704 * The pipe->set_shader_images() driver hook.
2707 iris_set_shader_images(struct pipe_context
*ctx
,
2708 enum pipe_shader_type p_stage
,
2709 unsigned start_slot
, unsigned count
,
2710 const struct pipe_image_view
*p_images
)
2712 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2713 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2714 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2715 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2717 struct iris_genx_state
*genx
= ice
->state
.genx
;
2718 struct brw_image_param
*image_params
= genx
->shaders
[stage
].image_param
;
2721 shs
->bound_image_views
&= ~u_bit_consecutive(start_slot
, count
);
2723 for (unsigned i
= 0; i
< count
; i
++) {
2724 struct iris_image_view
*iv
= &shs
->image
[start_slot
+ i
];
2726 if (p_images
&& p_images
[i
].resource
) {
2727 const struct pipe_image_view
*img
= &p_images
[i
];
2728 struct iris_resource
*res
= (void *) img
->resource
;
2730 util_copy_image_view(&iv
->base
, img
);
2732 shs
->bound_image_views
|= 1 << (start_slot
+ i
);
2734 res
->bind_history
|= PIPE_BIND_SHADER_IMAGE
;
2735 res
->bind_stages
|= 1 << stage
;
2737 enum isl_format isl_fmt
= iris_image_view_get_format(ice
, img
);
2739 /* Render compression with images supported on gen12+ only. */
2740 unsigned aux_usages
= GEN_GEN
>= 12 ? res
->aux
.possible_usages
:
2741 1 << ISL_AUX_USAGE_NONE
;
2743 alloc_surface_states(&iv
->surface_state
, aux_usages
);
2744 iv
->surface_state
.bo_address
= res
->bo
->gtt_offset
;
2746 void *map
= iv
->surface_state
.cpu
;
2748 if (res
->base
.target
!= PIPE_BUFFER
) {
2749 struct isl_view view
= {
2751 .base_level
= img
->u
.tex
.level
,
2753 .base_array_layer
= img
->u
.tex
.first_layer
,
2754 .array_len
= img
->u
.tex
.last_layer
- img
->u
.tex
.first_layer
+ 1,
2755 .swizzle
= ISL_SWIZZLE_IDENTITY
,
2756 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
2759 /* If using untyped fallback. */
2760 if (isl_fmt
== ISL_FORMAT_RAW
) {
2761 fill_buffer_surface_state(&screen
->isl_dev
, res
, map
,
2762 isl_fmt
, ISL_SWIZZLE_IDENTITY
,
2765 unsigned aux_modes
= aux_usages
;
2767 enum isl_aux_usage usage
= u_bit_scan(&aux_modes
);
2769 fill_surface_state(&screen
->isl_dev
, map
, res
, &res
->surf
,
2770 &view
, usage
, 0, 0, 0);
2772 map
+= SURFACE_STATE_ALIGNMENT
;
2776 isl_surf_fill_image_param(&screen
->isl_dev
,
2777 &image_params
[start_slot
+ i
],
2780 util_range_add(&res
->base
, &res
->valid_buffer_range
, img
->u
.buf
.offset
,
2781 img
->u
.buf
.offset
+ img
->u
.buf
.size
);
2783 fill_buffer_surface_state(&screen
->isl_dev
, res
, map
,
2784 isl_fmt
, ISL_SWIZZLE_IDENTITY
,
2785 img
->u
.buf
.offset
, img
->u
.buf
.size
);
2786 fill_buffer_image_param(&image_params
[start_slot
+ i
],
2787 img
->format
, img
->u
.buf
.size
);
2790 upload_surface_states(ice
->state
.surface_uploader
, &iv
->surface_state
);
2792 pipe_resource_reference(&iv
->base
.resource
, NULL
);
2793 pipe_resource_reference(&iv
->surface_state
.ref
.res
, NULL
);
2794 fill_default_image_param(&image_params
[start_slot
+ i
]);
2798 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
;
2800 stage
== MESA_SHADER_COMPUTE
? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2801 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
2803 /* Broadwell also needs brw_image_params re-uploaded */
2805 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
;
2806 shs
->sysvals_need_upload
= true;
2812 * The pipe->set_sampler_views() driver hook.
2815 iris_set_sampler_views(struct pipe_context
*ctx
,
2816 enum pipe_shader_type p_stage
,
2817 unsigned start
, unsigned count
,
2818 struct pipe_sampler_view
**views
)
2820 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2821 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2822 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2824 shs
->bound_sampler_views
&= ~u_bit_consecutive(start
, count
);
2826 for (unsigned i
= 0; i
< count
; i
++) {
2827 struct pipe_sampler_view
*pview
= views
? views
[i
] : NULL
;
2828 pipe_sampler_view_reference((struct pipe_sampler_view
**)
2829 &shs
->textures
[start
+ i
], pview
);
2830 struct iris_sampler_view
*view
= (void *) pview
;
2832 view
->res
->bind_history
|= PIPE_BIND_SAMPLER_VIEW
;
2833 view
->res
->bind_stages
|= 1 << stage
;
2835 shs
->bound_sampler_views
|= 1 << (start
+ i
);
2837 update_surface_state_addrs(ice
->state
.surface_uploader
,
2838 &view
->surface_state
, view
->res
->bo
);
2842 ice
->state
.stage_dirty
|= (IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
);
2844 stage
== MESA_SHADER_COMPUTE
? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2845 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
2849 iris_set_compute_resources(struct pipe_context
*ctx
,
2850 unsigned start
, unsigned count
,
2851 struct pipe_surface
**resources
)
2857 iris_set_global_binding(struct pipe_context
*ctx
,
2858 unsigned start_slot
, unsigned count
,
2859 struct pipe_resource
**resources
,
2862 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2864 assert(start_slot
+ count
<= IRIS_MAX_GLOBAL_BINDINGS
);
2865 for (unsigned i
= 0; i
< count
; i
++) {
2866 if (resources
&& resources
[i
]) {
2867 pipe_resource_reference(&ice
->state
.global_bindings
[start_slot
+ i
],
2869 struct iris_resource
*res
= (void *) resources
[i
];
2870 uint64_t addr
= res
->bo
->gtt_offset
;
2871 memcpy(handles
[i
], &addr
, sizeof(addr
));
2873 pipe_resource_reference(&ice
->state
.global_bindings
[start_slot
+ i
],
2878 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_CS
;
2882 * The pipe->set_tess_state() driver hook.
2885 iris_set_tess_state(struct pipe_context
*ctx
,
2886 const float default_outer_level
[4],
2887 const float default_inner_level
[2])
2889 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2890 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_TESS_CTRL
];
2892 memcpy(&ice
->state
.default_outer_level
[0], &default_outer_level
[0], 4 * sizeof(float));
2893 memcpy(&ice
->state
.default_inner_level
[0], &default_inner_level
[0], 2 * sizeof(float));
2895 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_TCS
;
2896 shs
->sysvals_need_upload
= true;
2900 iris_surface_destroy(struct pipe_context
*ctx
, struct pipe_surface
*p_surf
)
2902 struct iris_surface
*surf
= (void *) p_surf
;
2903 pipe_resource_reference(&p_surf
->texture
, NULL
);
2904 pipe_resource_reference(&surf
->surface_state
.ref
.res
, NULL
);
2905 pipe_resource_reference(&surf
->surface_state_read
.ref
.res
, NULL
);
2906 free(surf
->surface_state
.cpu
);
2911 iris_set_clip_state(struct pipe_context
*ctx
,
2912 const struct pipe_clip_state
*state
)
2914 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2915 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_VERTEX
];
2916 struct iris_shader_state
*gshs
= &ice
->state
.shaders
[MESA_SHADER_GEOMETRY
];
2917 struct iris_shader_state
*tshs
= &ice
->state
.shaders
[MESA_SHADER_TESS_EVAL
];
2919 memcpy(&ice
->state
.clip_planes
, state
, sizeof(*state
));
2921 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
|
2922 IRIS_STAGE_DIRTY_CONSTANTS_GS
|
2923 IRIS_STAGE_DIRTY_CONSTANTS_TES
;
2924 shs
->sysvals_need_upload
= true;
2925 gshs
->sysvals_need_upload
= true;
2926 tshs
->sysvals_need_upload
= true;
2930 * The pipe->set_polygon_stipple() driver hook.
2933 iris_set_polygon_stipple(struct pipe_context
*ctx
,
2934 const struct pipe_poly_stipple
*state
)
2936 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2937 memcpy(&ice
->state
.poly_stipple
, state
, sizeof(*state
));
2938 ice
->state
.dirty
|= IRIS_DIRTY_POLYGON_STIPPLE
;
2942 * The pipe->set_sample_mask() driver hook.
2945 iris_set_sample_mask(struct pipe_context
*ctx
, unsigned sample_mask
)
2947 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2949 /* We only support 16x MSAA, so we have 16 bits of sample maks.
2950 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2952 ice
->state
.sample_mask
= sample_mask
& 0xffff;
2953 ice
->state
.dirty
|= IRIS_DIRTY_SAMPLE_MASK
;
2957 * The pipe->set_scissor_states() driver hook.
2959 * This corresponds to our SCISSOR_RECT state structures. It's an
2960 * exact match, so we just store them, and memcpy them out later.
2963 iris_set_scissor_states(struct pipe_context
*ctx
,
2964 unsigned start_slot
,
2965 unsigned num_scissors
,
2966 const struct pipe_scissor_state
*rects
)
2968 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2970 for (unsigned i
= 0; i
< num_scissors
; i
++) {
2971 if (rects
[i
].minx
== rects
[i
].maxx
|| rects
[i
].miny
== rects
[i
].maxy
) {
2972 /* If the scissor was out of bounds and got clamped to 0 width/height
2973 * at the bounds, the subtraction of 1 from maximums could produce a
2974 * negative number and thus not clip anything. Instead, just provide
2975 * a min > max scissor inside the bounds, which produces the expected
2978 ice
->state
.scissors
[start_slot
+ i
] = (struct pipe_scissor_state
) {
2979 .minx
= 1, .maxx
= 0, .miny
= 1, .maxy
= 0,
2982 ice
->state
.scissors
[start_slot
+ i
] = (struct pipe_scissor_state
) {
2983 .minx
= rects
[i
].minx
, .miny
= rects
[i
].miny
,
2984 .maxx
= rects
[i
].maxx
- 1, .maxy
= rects
[i
].maxy
- 1,
2989 ice
->state
.dirty
|= IRIS_DIRTY_SCISSOR_RECT
;
2993 * The pipe->set_stencil_ref() driver hook.
2995 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
2998 iris_set_stencil_ref(struct pipe_context
*ctx
,
2999 const struct pipe_stencil_ref
*state
)
3001 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3002 memcpy(&ice
->state
.stencil_ref
, state
, sizeof(*state
));
3004 ice
->state
.dirty
|= IRIS_DIRTY_STENCIL_REF
;
3005 else if (GEN_GEN
>= 9)
3006 ice
->state
.dirty
|= IRIS_DIRTY_WM_DEPTH_STENCIL
;
3008 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
3012 viewport_extent(const struct pipe_viewport_state
*state
, int axis
, float sign
)
3014 return copysignf(state
->scale
[axis
], sign
) + state
->translate
[axis
];
3018 * The pipe->set_viewport_states() driver hook.
3020 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
3021 * the guardband yet, as we need the framebuffer dimensions, but we can
3022 * at least fill out the rest.
3025 iris_set_viewport_states(struct pipe_context
*ctx
,
3026 unsigned start_slot
,
3028 const struct pipe_viewport_state
*states
)
3030 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3032 memcpy(&ice
->state
.viewports
[start_slot
], states
, sizeof(*states
) * count
);
3034 ice
->state
.dirty
|= IRIS_DIRTY_SF_CL_VIEWPORT
;
3036 if (ice
->state
.cso_rast
&& (!ice
->state
.cso_rast
->depth_clip_near
||
3037 !ice
->state
.cso_rast
->depth_clip_far
))
3038 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
3042 * The pipe->set_framebuffer_state() driver hook.
3044 * Sets the current draw FBO, including color render targets, depth,
3045 * and stencil buffers.
3048 iris_set_framebuffer_state(struct pipe_context
*ctx
,
3049 const struct pipe_framebuffer_state
*state
)
3051 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3052 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3053 struct isl_device
*isl_dev
= &screen
->isl_dev
;
3054 struct pipe_framebuffer_state
*cso
= &ice
->state
.framebuffer
;
3055 struct iris_resource
*zres
;
3056 struct iris_resource
*stencil_res
;
3058 unsigned samples
= util_framebuffer_get_num_samples(state
);
3059 unsigned layers
= util_framebuffer_get_num_layers(state
);
3061 if (cso
->samples
!= samples
) {
3062 ice
->state
.dirty
|= IRIS_DIRTY_MULTISAMPLE
;
3064 /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3065 if (GEN_GEN
>= 9 && (cso
->samples
== 16 || samples
== 16))
3066 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_FS
;
3069 if (cso
->nr_cbufs
!= state
->nr_cbufs
) {
3070 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
3073 if ((cso
->layers
== 0) != (layers
== 0)) {
3074 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
3077 if (cso
->width
!= state
->width
|| cso
->height
!= state
->height
) {
3078 ice
->state
.dirty
|= IRIS_DIRTY_SF_CL_VIEWPORT
;
3081 if (cso
->zsbuf
|| state
->zsbuf
) {
3082 ice
->state
.dirty
|= IRIS_DIRTY_DEPTH_BUFFER
;
3085 util_copy_framebuffer_state(cso
, state
);
3086 cso
->samples
= samples
;
3087 cso
->layers
= layers
;
3089 struct iris_depth_buffer_state
*cso_z
= &ice
->state
.genx
->depth_buffer
;
3091 struct isl_view view
= {
3094 .base_array_layer
= 0,
3096 .swizzle
= ISL_SWIZZLE_IDENTITY
,
3099 struct isl_depth_stencil_hiz_emit_info info
= { .view
= &view
};
3102 iris_get_depth_stencil_resources(cso
->zsbuf
->texture
, &zres
,
3105 view
.base_level
= cso
->zsbuf
->u
.tex
.level
;
3106 view
.base_array_layer
= cso
->zsbuf
->u
.tex
.first_layer
;
3108 cso
->zsbuf
->u
.tex
.last_layer
- cso
->zsbuf
->u
.tex
.first_layer
+ 1;
3111 view
.usage
|= ISL_SURF_USAGE_DEPTH_BIT
;
3113 info
.depth_surf
= &zres
->surf
;
3114 info
.depth_address
= zres
->bo
->gtt_offset
+ zres
->offset
;
3115 info
.mocs
= iris_mocs(zres
->bo
, isl_dev
);
3117 view
.format
= zres
->surf
.format
;
3119 if (iris_resource_level_has_hiz(zres
, view
.base_level
)) {
3120 info
.hiz_usage
= zres
->aux
.usage
;
3121 info
.hiz_surf
= &zres
->aux
.surf
;
3122 info
.hiz_address
= zres
->aux
.bo
->gtt_offset
+ zres
->aux
.offset
;
3127 view
.usage
|= ISL_SURF_USAGE_STENCIL_BIT
;
3128 info
.stencil_aux_usage
= stencil_res
->aux
.usage
;
3129 info
.stencil_surf
= &stencil_res
->surf
;
3130 info
.stencil_address
= stencil_res
->bo
->gtt_offset
+ stencil_res
->offset
;
3132 view
.format
= stencil_res
->surf
.format
;
3133 info
.mocs
= iris_mocs(stencil_res
->bo
, isl_dev
);
3138 isl_emit_depth_stencil_hiz_s(isl_dev
, cso_z
->packets
, &info
);
3140 /* Make a null surface for unbound buffers */
3141 void *null_surf_map
=
3142 upload_state(ice
->state
.surface_uploader
, &ice
->state
.null_fb
,
3143 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
3144 isl_null_fill_state(&screen
->isl_dev
, null_surf_map
,
3145 isl_extent3d(MAX2(cso
->width
, 1),
3146 MAX2(cso
->height
, 1),
3147 cso
->layers
? cso
->layers
: 1));
3148 ice
->state
.null_fb
.offset
+=
3149 iris_bo_offset_from_base_address(iris_resource_bo(ice
->state
.null_fb
.res
));
3151 /* Render target change */
3152 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_FS
;
3154 ice
->state
.dirty
|= IRIS_DIRTY_RENDER_BUFFER
;
3156 ice
->state
.dirty
|= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
3158 ice
->state
.stage_dirty
|=
3159 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_FRAMEBUFFER
];
3162 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
3166 * The pipe->set_constant_buffer() driver hook.
3168 * This uploads any constant data in user buffers, and references
3169 * any UBO resources containing constant data.
3172 iris_set_constant_buffer(struct pipe_context
*ctx
,
3173 enum pipe_shader_type p_stage
, unsigned index
,
3174 const struct pipe_constant_buffer
*input
)
3176 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3177 gl_shader_stage stage
= stage_from_pipe(p_stage
);
3178 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3179 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[index
];
3181 /* TODO: Only do this if the buffer changes? */
3182 pipe_resource_reference(&shs
->constbuf_surf_state
[index
].res
, NULL
);
3184 if (input
&& input
->buffer_size
&& (input
->buffer
|| input
->user_buffer
)) {
3185 shs
->bound_cbufs
|= 1u << index
;
3187 if (input
->user_buffer
) {
3189 pipe_resource_reference(&cbuf
->buffer
, NULL
);
3190 u_upload_alloc(ice
->ctx
.const_uploader
, 0, input
->buffer_size
, 64,
3191 &cbuf
->buffer_offset
, &cbuf
->buffer
, (void **) &map
);
3193 if (!cbuf
->buffer
) {
3194 /* Allocation was unsuccessful - just unbind */
3195 iris_set_constant_buffer(ctx
, p_stage
, index
, NULL
);
3200 memcpy(map
, input
->user_buffer
, input
->buffer_size
);
3201 } else if (input
->buffer
) {
3202 pipe_resource_reference(&cbuf
->buffer
, input
->buffer
);
3204 cbuf
->buffer_offset
= input
->buffer_offset
;
3208 MIN2(input
->buffer_size
,
3209 iris_resource_bo(cbuf
->buffer
)->size
- cbuf
->buffer_offset
);
3211 struct iris_resource
*res
= (void *) cbuf
->buffer
;
3212 res
->bind_history
|= PIPE_BIND_CONSTANT_BUFFER
;
3213 res
->bind_stages
|= 1 << stage
;
3215 shs
->bound_cbufs
&= ~(1u << index
);
3216 pipe_resource_reference(&cbuf
->buffer
, NULL
);
3219 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
;
3223 upload_sysvals(struct iris_context
*ice
,
3224 gl_shader_stage stage
,
3225 const struct pipe_grid_info
*grid
)
3227 UNUSED
struct iris_genx_state
*genx
= ice
->state
.genx
;
3228 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3230 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3231 if (!shader
|| (shader
->num_system_values
== 0 &&
3232 shader
->kernel_input_size
== 0))
3235 assert(shader
->num_cbufs
> 0);
3237 unsigned sysval_cbuf_index
= shader
->num_cbufs
- 1;
3238 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[sysval_cbuf_index
];
3239 unsigned system_values_start
=
3240 ALIGN(shader
->kernel_input_size
, sizeof(uint32_t));
3241 unsigned upload_size
= system_values_start
+
3242 shader
->num_system_values
* sizeof(uint32_t);
3245 assert(sysval_cbuf_index
< PIPE_MAX_CONSTANT_BUFFERS
);
3246 u_upload_alloc(ice
->ctx
.const_uploader
, 0, upload_size
, 64,
3247 &cbuf
->buffer_offset
, &cbuf
->buffer
, &map
);
3249 if (shader
->kernel_input_size
> 0)
3250 memcpy(map
, grid
->input
, shader
->kernel_input_size
);
3252 uint32_t *sysval_map
= map
+ system_values_start
;
3253 for (int i
= 0; i
< shader
->num_system_values
; i
++) {
3254 uint32_t sysval
= shader
->system_values
[i
];
3257 if (BRW_PARAM_DOMAIN(sysval
) == BRW_PARAM_DOMAIN_IMAGE
) {
3259 unsigned img
= BRW_PARAM_IMAGE_IDX(sysval
);
3260 unsigned offset
= BRW_PARAM_IMAGE_OFFSET(sysval
);
3261 struct brw_image_param
*param
=
3262 &genx
->shaders
[stage
].image_param
[img
];
3264 assert(offset
< sizeof(struct brw_image_param
));
3265 value
= ((uint32_t *) param
)[offset
];
3267 } else if (sysval
== BRW_PARAM_BUILTIN_ZERO
) {
3269 } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval
)) {
3270 int plane
= BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval
);
3271 int comp
= BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval
);
3272 value
= fui(ice
->state
.clip_planes
.ucp
[plane
][comp
]);
3273 } else if (sysval
== BRW_PARAM_BUILTIN_PATCH_VERTICES_IN
) {
3274 if (stage
== MESA_SHADER_TESS_CTRL
) {
3275 value
= ice
->state
.vertices_per_patch
;
3277 assert(stage
== MESA_SHADER_TESS_EVAL
);
3278 const struct shader_info
*tcs_info
=
3279 iris_get_shader_info(ice
, MESA_SHADER_TESS_CTRL
);
3281 value
= tcs_info
->tess
.tcs_vertices_out
;
3283 value
= ice
->state
.vertices_per_patch
;
3285 } else if (sysval
>= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
&&
3286 sysval
<= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W
) {
3287 unsigned i
= sysval
- BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
;
3288 value
= fui(ice
->state
.default_outer_level
[i
]);
3289 } else if (sysval
== BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X
) {
3290 value
= fui(ice
->state
.default_inner_level
[0]);
3291 } else if (sysval
== BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y
) {
3292 value
= fui(ice
->state
.default_inner_level
[1]);
3293 } else if (sysval
>= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X
&&
3294 sysval
<= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z
) {
3295 unsigned i
= sysval
- BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X
;
3296 value
= ice
->state
.last_block
[i
];
3298 assert(!"unhandled system value");
3301 *sysval_map
++ = value
;
3304 cbuf
->buffer_size
= upload_size
;
3305 iris_upload_ubo_ssbo_surf_state(ice
, cbuf
,
3306 &shs
->constbuf_surf_state
[sysval_cbuf_index
], false);
3308 shs
->sysvals_need_upload
= false;
3312 * The pipe->set_shader_buffers() driver hook.
3314 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
3315 * SURFACE_STATE here, as the buffer offset may change each time.
3318 iris_set_shader_buffers(struct pipe_context
*ctx
,
3319 enum pipe_shader_type p_stage
,
3320 unsigned start_slot
, unsigned count
,
3321 const struct pipe_shader_buffer
*buffers
,
3322 unsigned writable_bitmask
)
3324 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3325 gl_shader_stage stage
= stage_from_pipe(p_stage
);
3326 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3328 unsigned modified_bits
= u_bit_consecutive(start_slot
, count
);
3330 shs
->bound_ssbos
&= ~modified_bits
;
3331 shs
->writable_ssbos
&= ~modified_bits
;
3332 shs
->writable_ssbos
|= writable_bitmask
<< start_slot
;
3334 for (unsigned i
= 0; i
< count
; i
++) {
3335 if (buffers
&& buffers
[i
].buffer
) {
3336 struct iris_resource
*res
= (void *) buffers
[i
].buffer
;
3337 struct pipe_shader_buffer
*ssbo
= &shs
->ssbo
[start_slot
+ i
];
3338 struct iris_state_ref
*surf_state
=
3339 &shs
->ssbo_surf_state
[start_slot
+ i
];
3340 pipe_resource_reference(&ssbo
->buffer
, &res
->base
);
3341 ssbo
->buffer_offset
= buffers
[i
].buffer_offset
;
3343 MIN2(buffers
[i
].buffer_size
, res
->bo
->size
- ssbo
->buffer_offset
);
3345 shs
->bound_ssbos
|= 1 << (start_slot
+ i
);
3347 iris_upload_ubo_ssbo_surf_state(ice
, ssbo
, surf_state
, true);
3349 res
->bind_history
|= PIPE_BIND_SHADER_BUFFER
;
3350 res
->bind_stages
|= 1 << stage
;
3352 util_range_add(&res
->base
, &res
->valid_buffer_range
, ssbo
->buffer_offset
,
3353 ssbo
->buffer_offset
+ ssbo
->buffer_size
);
3355 pipe_resource_reference(&shs
->ssbo
[start_slot
+ i
].buffer
, NULL
);
3356 pipe_resource_reference(&shs
->ssbo_surf_state
[start_slot
+ i
].res
,
3361 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
;
3365 iris_delete_state(struct pipe_context
*ctx
, void *state
)
3371 * The pipe->set_vertex_buffers() driver hook.
3373 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3376 iris_set_vertex_buffers(struct pipe_context
*ctx
,
3377 unsigned start_slot
, unsigned count
,
3378 const struct pipe_vertex_buffer
*buffers
)
3380 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3381 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3382 struct iris_genx_state
*genx
= ice
->state
.genx
;
3384 ice
->state
.bound_vertex_buffers
&= ~u_bit_consecutive64(start_slot
, count
);
3386 for (unsigned i
= 0; i
< count
; i
++) {
3387 const struct pipe_vertex_buffer
*buffer
= buffers
? &buffers
[i
] : NULL
;
3388 struct iris_vertex_buffer_state
*state
=
3389 &genx
->vertex_buffers
[start_slot
+ i
];
3392 pipe_resource_reference(&state
->resource
, NULL
);
3396 /* We may see user buffers that are NULL bindings. */
3397 assert(!(buffer
->is_user_buffer
&& buffer
->buffer
.user
!= NULL
));
3399 pipe_resource_reference(&state
->resource
, buffer
->buffer
.resource
);
3400 struct iris_resource
*res
= (void *) state
->resource
;
3402 state
->offset
= (int) buffer
->buffer_offset
;
3405 ice
->state
.bound_vertex_buffers
|= 1ull << (start_slot
+ i
);
3406 res
->bind_history
|= PIPE_BIND_VERTEX_BUFFER
;
3409 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), state
->state
, vb
) {
3410 vb
.VertexBufferIndex
= start_slot
+ i
;
3411 vb
.AddressModifyEnable
= true;
3412 vb
.BufferPitch
= buffer
->stride
;
3414 vb
.BufferSize
= res
->base
.width0
- (int) buffer
->buffer_offset
;
3415 vb
.BufferStartingAddress
=
3416 ro_bo(NULL
, res
->bo
->gtt_offset
+ (int) buffer
->buffer_offset
);
3417 vb
.MOCS
= iris_mocs(res
->bo
, &screen
->isl_dev
);
3419 vb
.NullVertexBuffer
= true;
3424 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_BUFFERS
;
3428 * Gallium CSO for vertex elements.
3430 struct iris_vertex_element_state
{
3431 uint32_t vertex_elements
[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length
)];
3432 uint32_t vf_instancing
[33 * GENX(3DSTATE_VF_INSTANCING_length
)];
3433 uint32_t edgeflag_ve
[GENX(VERTEX_ELEMENT_STATE_length
)];
3434 uint32_t edgeflag_vfi
[GENX(3DSTATE_VF_INSTANCING_length
)];
3439 * The pipe->create_vertex_elements() driver hook.
3441 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3442 * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3443 * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3444 * needed. In these cases we will need information available at draw time.
3445 * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3446 * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3447 * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3450 iris_create_vertex_elements(struct pipe_context
*ctx
,
3452 const struct pipe_vertex_element
*state
)
3454 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3455 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
3456 struct iris_vertex_element_state
*cso
=
3457 malloc(sizeof(struct iris_vertex_element_state
));
3461 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS
), cso
->vertex_elements
, ve
) {
3463 1 + GENX(VERTEX_ELEMENT_STATE_length
) * MAX2(count
, 1) - 2;
3466 uint32_t *ve_pack_dest
= &cso
->vertex_elements
[1];
3467 uint32_t *vfi_pack_dest
= cso
->vf_instancing
;
3470 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
3472 ve
.SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
;
3473 ve
.Component0Control
= VFCOMP_STORE_0
;
3474 ve
.Component1Control
= VFCOMP_STORE_0
;
3475 ve
.Component2Control
= VFCOMP_STORE_0
;
3476 ve
.Component3Control
= VFCOMP_STORE_1_FP
;
3479 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
3483 for (int i
= 0; i
< count
; i
++) {
3484 const struct iris_format_info fmt
=
3485 iris_format_for_usage(devinfo
, state
[i
].src_format
, 0);
3486 unsigned comp
[4] = { VFCOMP_STORE_SRC
, VFCOMP_STORE_SRC
,
3487 VFCOMP_STORE_SRC
, VFCOMP_STORE_SRC
};
3489 switch (isl_format_get_num_channels(fmt
.fmt
)) {
3490 case 0: comp
[0] = VFCOMP_STORE_0
; /* fallthrough */
3491 case 1: comp
[1] = VFCOMP_STORE_0
; /* fallthrough */
3492 case 2: comp
[2] = VFCOMP_STORE_0
; /* fallthrough */
3494 comp
[3] = isl_format_has_int_channel(fmt
.fmt
) ? VFCOMP_STORE_1_INT
3495 : VFCOMP_STORE_1_FP
;
3498 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
3499 ve
.EdgeFlagEnable
= false;
3500 ve
.VertexBufferIndex
= state
[i
].vertex_buffer_index
;
3502 ve
.SourceElementOffset
= state
[i
].src_offset
;
3503 ve
.SourceElementFormat
= fmt
.fmt
;
3504 ve
.Component0Control
= comp
[0];
3505 ve
.Component1Control
= comp
[1];
3506 ve
.Component2Control
= comp
[2];
3507 ve
.Component3Control
= comp
[3];
3510 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
3511 vi
.VertexElementIndex
= i
;
3512 vi
.InstancingEnable
= state
[i
].instance_divisor
> 0;
3513 vi
.InstanceDataStepRate
= state
[i
].instance_divisor
;
3516 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
3517 vfi_pack_dest
+= GENX(3DSTATE_VF_INSTANCING_length
);
3520 /* An alternative version of the last VE and VFI is stored so it
3521 * can be used at draw time in case Vertex Shader uses EdgeFlag
3524 const unsigned edgeflag_index
= count
- 1;
3525 const struct iris_format_info fmt
=
3526 iris_format_for_usage(devinfo
, state
[edgeflag_index
].src_format
, 0);
3527 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), cso
->edgeflag_ve
, ve
) {
3528 ve
.EdgeFlagEnable
= true ;
3529 ve
.VertexBufferIndex
= state
[edgeflag_index
].vertex_buffer_index
;
3531 ve
.SourceElementOffset
= state
[edgeflag_index
].src_offset
;
3532 ve
.SourceElementFormat
= fmt
.fmt
;
3533 ve
.Component0Control
= VFCOMP_STORE_SRC
;
3534 ve
.Component1Control
= VFCOMP_STORE_0
;
3535 ve
.Component2Control
= VFCOMP_STORE_0
;
3536 ve
.Component3Control
= VFCOMP_STORE_0
;
3538 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), cso
->edgeflag_vfi
, vi
) {
3539 /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3540 * at draw time, as it should change if SGVs are emitted.
3542 vi
.InstancingEnable
= state
[edgeflag_index
].instance_divisor
> 0;
3543 vi
.InstanceDataStepRate
= state
[edgeflag_index
].instance_divisor
;
3551 * The pipe->bind_vertex_elements_state() driver hook.
3554 iris_bind_vertex_elements_state(struct pipe_context
*ctx
, void *state
)
3556 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3557 struct iris_vertex_element_state
*old_cso
= ice
->state
.cso_vertex_elements
;
3558 struct iris_vertex_element_state
*new_cso
= state
;
3560 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3561 * we need to re-emit it to ensure we're overriding the right one.
3563 if (new_cso
&& cso_changed(count
))
3564 ice
->state
.dirty
|= IRIS_DIRTY_VF_SGVS
;
3566 ice
->state
.cso_vertex_elements
= state
;
3567 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_ELEMENTS
;
3571 * The pipe->create_stream_output_target() driver hook.
3573 * "Target" here refers to a destination buffer. We translate this into
3574 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
3575 * know which buffer this represents, or whether we ought to zero the
3576 * write-offsets, or append. Those are handled in the set() hook.
3578 static struct pipe_stream_output_target
*
3579 iris_create_stream_output_target(struct pipe_context
*ctx
,
3580 struct pipe_resource
*p_res
,
3581 unsigned buffer_offset
,
3582 unsigned buffer_size
)
3584 struct iris_resource
*res
= (void *) p_res
;
3585 struct iris_stream_output_target
*cso
= calloc(1, sizeof(*cso
));
3589 res
->bind_history
|= PIPE_BIND_STREAM_OUTPUT
;
3591 pipe_reference_init(&cso
->base
.reference
, 1);
3592 pipe_resource_reference(&cso
->base
.buffer
, p_res
);
3593 cso
->base
.buffer_offset
= buffer_offset
;
3594 cso
->base
.buffer_size
= buffer_size
;
3595 cso
->base
.context
= ctx
;
3597 util_range_add(&res
->base
, &res
->valid_buffer_range
, buffer_offset
,
3598 buffer_offset
+ buffer_size
);
3600 upload_state(ctx
->stream_uploader
, &cso
->offset
, sizeof(uint32_t), 4);
3606 iris_stream_output_target_destroy(struct pipe_context
*ctx
,
3607 struct pipe_stream_output_target
*state
)
3609 struct iris_stream_output_target
*cso
= (void *) state
;
3611 pipe_resource_reference(&cso
->base
.buffer
, NULL
);
3612 pipe_resource_reference(&cso
->offset
.res
, NULL
);
3618 * The pipe->set_stream_output_targets() driver hook.
3620 * At this point, we know which targets are bound to a particular index,
3621 * and also whether we want to append or start over. We can finish the
3622 * 3DSTATE_SO_BUFFER packets we started earlier.
3625 iris_set_stream_output_targets(struct pipe_context
*ctx
,
3626 unsigned num_targets
,
3627 struct pipe_stream_output_target
**targets
,
3628 const unsigned *offsets
)
3630 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3631 struct iris_genx_state
*genx
= ice
->state
.genx
;
3632 uint32_t *so_buffers
= genx
->so_buffers
;
3633 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3635 const bool active
= num_targets
> 0;
3636 if (ice
->state
.streamout_active
!= active
) {
3637 ice
->state
.streamout_active
= active
;
3638 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
;
3640 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3641 * it's a non-pipelined command. If we're switching streamout on, we
3642 * may have missed emitting it earlier, so do so now. (We're already
3643 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3646 ice
->state
.dirty
|= IRIS_DIRTY_SO_DECL_LIST
;
3649 for (int i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
3650 struct iris_stream_output_target
*tgt
=
3651 (void *) ice
->state
.so_target
[i
];
3653 struct iris_resource
*res
= (void *) tgt
->base
.buffer
;
3655 flush
|= iris_flush_bits_for_history(res
);
3656 iris_dirty_for_history(ice
, res
);
3659 iris_emit_pipe_control_flush(&ice
->batches
[IRIS_BATCH_RENDER
],
3660 "make streamout results visible", flush
);
3664 for (int i
= 0; i
< 4; i
++) {
3665 pipe_so_target_reference(&ice
->state
.so_target
[i
],
3666 i
< num_targets
? targets
[i
] : NULL
);
3669 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3673 for (unsigned i
= 0; i
< 4; i
++,
3674 so_buffers
+= GENX(3DSTATE_SO_BUFFER_length
)) {
3676 struct iris_stream_output_target
*tgt
= (void *) ice
->state
.so_target
[i
];
3677 unsigned offset
= offsets
[i
];
3680 iris_pack_command(GENX(3DSTATE_SO_BUFFER
), so_buffers
, sob
) {
3682 sob
.SOBufferIndex
= i
;
3684 sob
._3DCommandOpcode
= 0;
3685 sob
._3DCommandSubOpcode
= SO_BUFFER_INDEX_0_CMD
+ i
;
3691 struct iris_resource
*res
= (void *) tgt
->base
.buffer
;
3693 /* Note that offsets[i] will either be 0, causing us to zero
3694 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3695 * "continue appending at the existing offset."
3697 assert(offset
== 0 || offset
== 0xFFFFFFFF);
3699 /* We might be called by Begin (offset = 0), Pause, then Resume
3700 * (offset = 0xFFFFFFFF) before ever drawing (where these commands
3701 * will actually be sent to the GPU). In this case, we don't want
3702 * to append - we still want to do our initial zeroing.
3707 iris_pack_command(GENX(3DSTATE_SO_BUFFER
), so_buffers
, sob
) {
3709 sob
.SOBufferIndex
= i
;
3711 sob
._3DCommandOpcode
= 0;
3712 sob
._3DCommandSubOpcode
= SO_BUFFER_INDEX_0_CMD
+ i
;
3714 sob
.SurfaceBaseAddress
=
3715 rw_bo(NULL
, res
->bo
->gtt_offset
+ tgt
->base
.buffer_offset
,
3716 IRIS_DOMAIN_OTHER_WRITE
);
3717 sob
.SOBufferEnable
= true;
3718 sob
.StreamOffsetWriteEnable
= true;
3719 sob
.StreamOutputBufferOffsetAddressEnable
= true;
3720 sob
.MOCS
= iris_mocs(res
->bo
, &screen
->isl_dev
);
3722 sob
.SurfaceSize
= MAX2(tgt
->base
.buffer_size
/ 4, 1) - 1;
3723 sob
.StreamOffset
= offset
;
3724 sob
.StreamOutputBufferOffsetAddress
=
3725 rw_bo(NULL
, iris_resource_bo(tgt
->offset
.res
)->gtt_offset
+
3726 tgt
->offset
.offset
, IRIS_DOMAIN_OTHER_WRITE
);
3730 ice
->state
.dirty
|= IRIS_DIRTY_SO_BUFFERS
;
3734 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3735 * 3DSTATE_STREAMOUT packets.
3737 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3738 * hardware to record. We can create it entirely based on the shader, with
3739 * no dynamic state dependencies.
3741 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3742 * state-based settings. We capture the shader-related ones here, and merge
3743 * the rest in at draw time.
3746 iris_create_so_decl_list(const struct pipe_stream_output_info
*info
,
3747 const struct brw_vue_map
*vue_map
)
3749 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
3750 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3751 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3752 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3754 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
3756 memset(so_decl
, 0, sizeof(so_decl
));
3758 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3759 * command feels strange -- each dword pair contains a SO_DECL per stream.
3761 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
3762 const struct pipe_stream_output
*output
= &info
->output
[i
];
3763 const int buffer
= output
->output_buffer
;
3764 const int varying
= output
->register_index
;
3765 const unsigned stream_id
= output
->stream
;
3766 assert(stream_id
< MAX_VERTEX_STREAMS
);
3768 buffer_mask
[stream_id
] |= 1 << buffer
;
3770 assert(vue_map
->varying_to_slot
[varying
] >= 0);
3772 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3773 * array. Instead, it simply increments DstOffset for the following
3774 * input by the number of components that should be skipped.
3776 * Our hardware is unusual in that it requires us to program SO_DECLs
3777 * for fake "hole" components, rather than simply taking the offset
3778 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3779 * program as many size = 4 holes as we can, then a final hole to
3780 * accommodate the final 1, 2, or 3 remaining.
3782 int skip_components
= output
->dst_offset
- next_offset
[buffer
];
3784 while (skip_components
> 0) {
3785 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3787 .OutputBufferSlot
= output
->output_buffer
,
3788 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
3790 skip_components
-= 4;
3793 next_offset
[buffer
] = output
->dst_offset
+ output
->num_components
;
3795 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3796 .OutputBufferSlot
= output
->output_buffer
,
3797 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
3799 ((1 << output
->num_components
) - 1) << output
->start_component
,
3802 if (decls
[stream_id
] > max_decls
)
3803 max_decls
= decls
[stream_id
];
3806 unsigned dwords
= GENX(3DSTATE_STREAMOUT_length
) + (3 + 2 * max_decls
);
3807 uint32_t *map
= ralloc_size(NULL
, sizeof(uint32_t) * dwords
);
3808 uint32_t *so_decl_map
= map
+ GENX(3DSTATE_STREAMOUT_length
);
3810 iris_pack_command(GENX(3DSTATE_STREAMOUT
), map
, sol
) {
3811 int urb_entry_read_offset
= 0;
3812 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
3813 urb_entry_read_offset
;
3815 /* We always read the whole vertex. This could be reduced at some
3816 * point by reading less and offsetting the register index in the
3819 sol
.Stream0VertexReadOffset
= urb_entry_read_offset
;
3820 sol
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
3821 sol
.Stream1VertexReadOffset
= urb_entry_read_offset
;
3822 sol
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
3823 sol
.Stream2VertexReadOffset
= urb_entry_read_offset
;
3824 sol
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
3825 sol
.Stream3VertexReadOffset
= urb_entry_read_offset
;
3826 sol
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
3828 /* Set buffer pitches; 0 means unbound. */
3829 sol
.Buffer0SurfacePitch
= 4 * info
->stride
[0];
3830 sol
.Buffer1SurfacePitch
= 4 * info
->stride
[1];
3831 sol
.Buffer2SurfacePitch
= 4 * info
->stride
[2];
3832 sol
.Buffer3SurfacePitch
= 4 * info
->stride
[3];
3835 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST
), so_decl_map
, list
) {
3836 list
.DWordLength
= 3 + 2 * max_decls
- 2;
3837 list
.StreamtoBufferSelects0
= buffer_mask
[0];
3838 list
.StreamtoBufferSelects1
= buffer_mask
[1];
3839 list
.StreamtoBufferSelects2
= buffer_mask
[2];
3840 list
.StreamtoBufferSelects3
= buffer_mask
[3];
3841 list
.NumEntries0
= decls
[0];
3842 list
.NumEntries1
= decls
[1];
3843 list
.NumEntries2
= decls
[2];
3844 list
.NumEntries3
= decls
[3];
3847 for (int i
= 0; i
< max_decls
; i
++) {
3848 iris_pack_state(GENX(SO_DECL_ENTRY
), so_decl_map
+ 3 + i
* 2, entry
) {
3849 entry
.Stream0Decl
= so_decl
[0][i
];
3850 entry
.Stream1Decl
= so_decl
[1][i
];
3851 entry
.Stream2Decl
= so_decl
[2][i
];
3852 entry
.Stream3Decl
= so_decl
[3][i
];
3860 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots
,
3861 const struct brw_vue_map
*last_vue_map
,
3862 bool two_sided_color
,
3863 unsigned *out_offset
,
3864 unsigned *out_length
)
3866 /* The compiler computes the first URB slot without considering COL/BFC
3867 * swizzling (because it doesn't know whether it's enabled), so we need
3868 * to do that here too. This may result in a smaller offset, which
3871 const unsigned first_slot
=
3872 brw_compute_first_urb_slot_required(fs_input_slots
, last_vue_map
);
3874 /* This becomes the URB read offset (counted in pairs of slots). */
3875 assert(first_slot
% 2 == 0);
3876 *out_offset
= first_slot
/ 2;
3878 /* We need to adjust the inputs read to account for front/back color
3879 * swizzling, as it can make the URB length longer.
3881 for (int c
= 0; c
<= 1; c
++) {
3882 if (fs_input_slots
& (VARYING_BIT_COL0
<< c
)) {
3883 /* If two sided color is enabled, the fragment shader's gl_Color
3884 * (COL0) input comes from either the gl_FrontColor (COL0) or
3885 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
3887 if (two_sided_color
)
3888 fs_input_slots
|= (VARYING_BIT_BFC0
<< c
);
3890 /* If front color isn't written, we opt to give them back color
3891 * instead of an undefined value. Switch from COL to BFC.
3893 if (last_vue_map
->varying_to_slot
[VARYING_SLOT_COL0
+ c
] == -1) {
3894 fs_input_slots
&= ~(VARYING_BIT_COL0
<< c
);
3895 fs_input_slots
|= (VARYING_BIT_BFC0
<< c
);
3900 /* Compute the minimum URB Read Length necessary for the FS inputs.
3902 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3903 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3905 * "This field should be set to the minimum length required to read the
3906 * maximum source attribute. The maximum source attribute is indicated
3907 * by the maximum value of the enabled Attribute # Source Attribute if
3908 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3909 * enable is not set.
3910 * read_length = ceiling((max_source_attr + 1) / 2)
3912 * [errata] Corruption/Hang possible if length programmed larger than
3915 * Similar text exists for Ivy Bridge.
3917 * We find the last URB slot that's actually read by the FS.
3919 unsigned last_read_slot
= last_vue_map
->num_slots
- 1;
3920 while (last_read_slot
> first_slot
&& !(fs_input_slots
&
3921 (1ull << last_vue_map
->slot_to_varying
[last_read_slot
])))
3924 /* The URB read length is the difference of the two, counted in pairs. */
3925 *out_length
= DIV_ROUND_UP(last_read_slot
- first_slot
+ 1, 2);
3929 iris_emit_sbe_swiz(struct iris_batch
*batch
,
3930 const struct iris_context
*ice
,
3931 unsigned urb_read_offset
,
3932 unsigned sprite_coord_enables
)
3934 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = {};
3935 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
3936 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
3937 const struct brw_vue_map
*vue_map
= ice
->shaders
.last_vue_map
;
3938 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
3940 /* XXX: this should be generated when putting programs in place */
3942 for (uint8_t idx
= 0; idx
< wm_prog_data
->urb_setup_attribs_count
; idx
++) {
3943 const uint8_t fs_attr
= wm_prog_data
->urb_setup_attribs
[idx
];
3944 const int input_index
= wm_prog_data
->urb_setup
[fs_attr
];
3945 if (input_index
< 0 || input_index
>= 16)
3948 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
=
3949 &attr_overrides
[input_index
];
3950 int slot
= vue_map
->varying_to_slot
[fs_attr
];
3952 /* Viewport and Layer are stored in the VUE header. We need to override
3953 * them to zero if earlier stages didn't write them, as GL requires that
3954 * they read back as zero when not explicitly set.
3957 case VARYING_SLOT_VIEWPORT
:
3958 case VARYING_SLOT_LAYER
:
3959 attr
->ComponentOverrideX
= true;
3960 attr
->ComponentOverrideW
= true;
3961 attr
->ConstantSource
= CONST_0000
;
3963 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
3964 attr
->ComponentOverrideY
= true;
3965 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
3966 attr
->ComponentOverrideZ
= true;
3969 case VARYING_SLOT_PRIMITIVE_ID
:
3970 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
3972 attr
->ComponentOverrideX
= true;
3973 attr
->ComponentOverrideY
= true;
3974 attr
->ComponentOverrideZ
= true;
3975 attr
->ComponentOverrideW
= true;
3976 attr
->ConstantSource
= PRIM_ID
;
3984 if (sprite_coord_enables
& (1 << input_index
))
3987 /* If there was only a back color written but not front, use back
3988 * as the color instead of undefined.
3990 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
3991 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
3992 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
3993 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
3995 /* Not written by the previous stage - undefined. */
3997 attr
->ComponentOverrideX
= true;
3998 attr
->ComponentOverrideY
= true;
3999 attr
->ComponentOverrideZ
= true;
4000 attr
->ComponentOverrideW
= true;
4001 attr
->ConstantSource
= CONST_0001_FLOAT
;
4005 /* Compute the location of the attribute relative to the read offset,
4006 * which is counted in 256-bit increments (two 128-bit VUE slots).
4008 const int source_attr
= slot
- 2 * urb_read_offset
;
4009 assert(source_attr
>= 0 && source_attr
<= 32);
4010 attr
->SourceAttribute
= source_attr
;
4012 /* If we are doing two-sided color, and the VUE slot following this one
4013 * represents a back-facing color, then we need to instruct the SF unit
4014 * to do back-facing swizzling.
4016 if (cso_rast
->light_twoside
&&
4017 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
4018 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
4019 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
4020 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
)))
4021 attr
->SwizzleSelect
= INPUTATTR_FACING
;
4024 iris_emit_cmd(batch
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
4025 for (int i
= 0; i
< 16; i
++)
4026 sbes
.Attribute
[i
] = attr_overrides
[i
];
4031 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data
*prog_data
,
4032 const struct iris_rasterizer_state
*cso
)
4034 unsigned overrides
= 0;
4036 if (prog_data
->urb_setup
[VARYING_SLOT_PNTC
] != -1)
4037 overrides
|= 1 << prog_data
->urb_setup
[VARYING_SLOT_PNTC
];
4039 for (int i
= 0; i
< 8; i
++) {
4040 if ((cso
->sprite_coord_enable
& (1 << i
)) &&
4041 prog_data
->urb_setup
[VARYING_SLOT_TEX0
+ i
] != -1)
4042 overrides
|= 1 << prog_data
->urb_setup
[VARYING_SLOT_TEX0
+ i
];
4049 iris_emit_sbe(struct iris_batch
*batch
, const struct iris_context
*ice
)
4051 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4052 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
4053 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
4054 const struct shader_info
*fs_info
=
4055 iris_get_shader_info(ice
, MESA_SHADER_FRAGMENT
);
4057 unsigned urb_read_offset
, urb_read_length
;
4058 iris_compute_sbe_urb_read_interval(fs_info
->inputs_read
,
4059 ice
->shaders
.last_vue_map
,
4060 cso_rast
->light_twoside
,
4061 &urb_read_offset
, &urb_read_length
);
4063 unsigned sprite_coord_overrides
=
4064 iris_calculate_point_sprite_overrides(wm_prog_data
, cso_rast
);
4066 iris_emit_cmd(batch
, GENX(3DSTATE_SBE
), sbe
) {
4067 sbe
.AttributeSwizzleEnable
= true;
4068 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
4069 sbe
.PointSpriteTextureCoordinateOrigin
= cso_rast
->sprite_coord_mode
;
4070 sbe
.VertexURBEntryReadOffset
= urb_read_offset
;
4071 sbe
.VertexURBEntryReadLength
= urb_read_length
;
4072 sbe
.ForceVertexURBEntryReadOffset
= true;
4073 sbe
.ForceVertexURBEntryReadLength
= true;
4074 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
4075 sbe
.PointSpriteTextureCoordinateEnable
= sprite_coord_overrides
;
4077 for (int i
= 0; i
< 32; i
++) {
4078 sbe
.AttributeActiveComponentFormat
[i
] = ACTIVE_COMPONENT_XYZW
;
4083 iris_emit_sbe_swiz(batch
, ice
, urb_read_offset
, sprite_coord_overrides
);
4086 /* ------------------------------------------------------------------- */
4089 * Populate VS program key fields based on the current state.
4092 iris_populate_vs_key(const struct iris_context
*ice
,
4093 const struct shader_info
*info
,
4094 gl_shader_stage last_stage
,
4095 struct iris_vs_prog_key
*key
)
4097 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4099 if (info
->clip_distance_array_size
== 0 &&
4100 (info
->outputs_written
& (VARYING_BIT_POS
| VARYING_BIT_CLIP_VERTEX
)) &&
4101 last_stage
== MESA_SHADER_VERTEX
)
4102 key
->vue
.nr_userclip_plane_consts
= cso_rast
->num_clip_plane_consts
;
4106 * Populate TCS program key fields based on the current state.
4109 iris_populate_tcs_key(const struct iris_context
*ice
,
4110 struct iris_tcs_prog_key
*key
)
4115 * Populate TES program key fields based on the current state.
4118 iris_populate_tes_key(const struct iris_context
*ice
,
4119 const struct shader_info
*info
,
4120 gl_shader_stage last_stage
,
4121 struct iris_tes_prog_key
*key
)
4123 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4125 if (info
->clip_distance_array_size
== 0 &&
4126 (info
->outputs_written
& (VARYING_BIT_POS
| VARYING_BIT_CLIP_VERTEX
)) &&
4127 last_stage
== MESA_SHADER_TESS_EVAL
)
4128 key
->vue
.nr_userclip_plane_consts
= cso_rast
->num_clip_plane_consts
;
4132 * Populate GS program key fields based on the current state.
4135 iris_populate_gs_key(const struct iris_context
*ice
,
4136 const struct shader_info
*info
,
4137 gl_shader_stage last_stage
,
4138 struct iris_gs_prog_key
*key
)
4140 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4142 if (info
->clip_distance_array_size
== 0 &&
4143 (info
->outputs_written
& (VARYING_BIT_POS
| VARYING_BIT_CLIP_VERTEX
)) &&
4144 last_stage
== MESA_SHADER_GEOMETRY
)
4145 key
->vue
.nr_userclip_plane_consts
= cso_rast
->num_clip_plane_consts
;
4149 * Populate FS program key fields based on the current state.
4152 iris_populate_fs_key(const struct iris_context
*ice
,
4153 const struct shader_info
*info
,
4154 struct iris_fs_prog_key
*key
)
4156 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
4157 const struct pipe_framebuffer_state
*fb
= &ice
->state
.framebuffer
;
4158 const struct iris_depth_stencil_alpha_state
*zsa
= ice
->state
.cso_zsa
;
4159 const struct iris_rasterizer_state
*rast
= ice
->state
.cso_rast
;
4160 const struct iris_blend_state
*blend
= ice
->state
.cso_blend
;
4162 key
->nr_color_regions
= fb
->nr_cbufs
;
4164 key
->clamp_fragment_color
= rast
->clamp_fragment_color
;
4166 key
->alpha_to_coverage
= blend
->alpha_to_coverage
;
4168 key
->alpha_test_replicate_alpha
= fb
->nr_cbufs
> 1 && zsa
->alpha
.enabled
;
4170 key
->flat_shade
= rast
->flatshade
&&
4171 (info
->inputs_read
& (VARYING_BIT_COL0
| VARYING_BIT_COL1
));
4173 key
->persample_interp
= rast
->force_persample_interp
;
4174 key
->multisample_fbo
= rast
->multisample
&& fb
->samples
> 1;
4176 key
->coherent_fb_fetch
= GEN_GEN
>= 9;
4178 key
->force_dual_color_blend
=
4179 screen
->driconf
.dual_color_blend_by_location
&&
4180 (blend
->blend_enables
& 1) && blend
->dual_color_blending
;
4182 /* TODO: Respect glHint for key->high_quality_derivatives */
4186 iris_populate_cs_key(const struct iris_context
*ice
,
4187 struct iris_cs_prog_key
*key
)
4192 KSP(const struct iris_compiled_shader
*shader
)
4194 struct iris_resource
*res
= (void *) shader
->assembly
.res
;
4195 return iris_bo_offset_from_base_address(res
->bo
) + shader
->assembly
.offset
;
4198 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
4199 pkt.KernelStartPointer = KSP(shader); \
4200 pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
4201 pkt.FloatingPointMode = prog_data->use_alt_mode; \
4203 pkt.DispatchGRFStartRegisterForURBData = \
4204 prog_data->dispatch_grf_start_reg; \
4205 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
4206 pkt.prefix##URBEntryReadOffset = 0; \
4208 pkt.StatisticsEnable = true; \
4209 pkt.Enable = true; \
4211 if (prog_data->total_scratch) { \
4212 struct iris_bo *bo = \
4213 iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
4214 uint32_t scratch_addr = bo->gtt_offset; \
4215 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
4216 pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr, \
4217 IRIS_DOMAIN_NONE); \
4221 * Encode most of 3DSTATE_VS based on the compiled shader.
4224 iris_store_vs_state(struct iris_context
*ice
,
4225 const struct gen_device_info
*devinfo
,
4226 struct iris_compiled_shader
*shader
)
4228 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4229 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4231 iris_pack_command(GENX(3DSTATE_VS
), shader
->derived_data
, vs
) {
4232 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
, MESA_SHADER_VERTEX
);
4233 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
4234 vs
.SIMD8DispatchEnable
= true;
4235 vs
.UserClipDistanceCullTestEnableBitmask
=
4236 vue_prog_data
->cull_distance_mask
;
4241 * Encode most of 3DSTATE_HS based on the compiled shader.
4244 iris_store_tcs_state(struct iris_context
*ice
,
4245 const struct gen_device_info
*devinfo
,
4246 struct iris_compiled_shader
*shader
)
4248 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4249 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4250 struct brw_tcs_prog_data
*tcs_prog_data
= (void *) prog_data
;
4252 iris_pack_command(GENX(3DSTATE_HS
), shader
->derived_data
, hs
) {
4253 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
, MESA_SHADER_TESS_CTRL
);
4256 /* GEN:BUG:1604578095:
4258 * Hang occurs when the number of max threads is less than 2 times
4259 * the number of instance count. The number of max threads must be
4260 * more than 2 times the number of instance count.
4262 assert((devinfo
->max_tcs_threads
/ 2) > tcs_prog_data
->instances
);
4263 hs
.DispatchGRFStartRegisterForURBData
= prog_data
->dispatch_grf_start_reg
& 0x1f;
4264 hs
.DispatchGRFStartRegisterForURBData5
= prog_data
->dispatch_grf_start_reg
>> 5;
4267 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
4268 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
4269 hs
.IncludeVertexHandles
= true;
4272 /* Patch Count threshold specifies the maximum number of patches that
4273 * will be accumulated before a thread dispatch is forced.
4275 hs
.PatchCountThreshold
= tcs_prog_data
->patch_count_threshold
;
4279 hs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
4280 hs
.IncludePrimitiveID
= tcs_prog_data
->include_primitive_id
;
4286 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4289 iris_store_tes_state(struct iris_context
*ice
,
4290 const struct gen_device_info
*devinfo
,
4291 struct iris_compiled_shader
*shader
)
4293 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4294 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4295 struct brw_tes_prog_data
*tes_prog_data
= (void *) prog_data
;
4297 uint32_t *te_state
= (void *) shader
->derived_data
;
4298 uint32_t *ds_state
= te_state
+ GENX(3DSTATE_TE_length
);
4300 iris_pack_command(GENX(3DSTATE_TE
), te_state
, te
) {
4301 te
.Partitioning
= tes_prog_data
->partitioning
;
4302 te
.OutputTopology
= tes_prog_data
->output_topology
;
4303 te
.TEDomain
= tes_prog_data
->domain
;
4305 te
.MaximumTessellationFactorOdd
= 63.0;
4306 te
.MaximumTessellationFactorNotOdd
= 64.0;
4309 iris_pack_command(GENX(3DSTATE_DS
), ds_state
, ds
) {
4310 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
, MESA_SHADER_TESS_EVAL
);
4312 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
4313 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
4314 ds
.ComputeWCoordinateEnable
=
4315 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
4317 ds
.UserClipDistanceCullTestEnableBitmask
=
4318 vue_prog_data
->cull_distance_mask
;
4324 * Encode most of 3DSTATE_GS based on the compiled shader.
4327 iris_store_gs_state(struct iris_context
*ice
,
4328 const struct gen_device_info
*devinfo
,
4329 struct iris_compiled_shader
*shader
)
4331 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4332 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4333 struct brw_gs_prog_data
*gs_prog_data
= (void *) prog_data
;
4335 iris_pack_command(GENX(3DSTATE_GS
), shader
->derived_data
, gs
) {
4336 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
, MESA_SHADER_GEOMETRY
);
4338 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
4339 gs
.OutputTopology
= gs_prog_data
->output_topology
;
4340 gs
.ControlDataHeaderSize
=
4341 gs_prog_data
->control_data_header_size_hwords
;
4342 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
4343 gs
.DispatchMode
= DISPATCH_MODE_SIMD8
;
4344 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
4345 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
4346 gs
.ReorderMode
= TRAILING
;
4347 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
4348 gs
.MaximumNumberofThreads
=
4349 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
4350 : (devinfo
->max_gs_threads
- 1);
4352 if (gs_prog_data
->static_vertex_count
!= -1) {
4353 gs
.StaticOutput
= true;
4354 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
4356 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
4358 gs
.UserClipDistanceCullTestEnableBitmask
=
4359 vue_prog_data
->cull_distance_mask
;
4361 const int urb_entry_write_offset
= 1;
4362 const uint32_t urb_entry_output_length
=
4363 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
4364 urb_entry_write_offset
;
4366 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
4367 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
4372 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4375 iris_store_fs_state(struct iris_context
*ice
,
4376 const struct gen_device_info
*devinfo
,
4377 struct iris_compiled_shader
*shader
)
4379 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4380 struct brw_wm_prog_data
*wm_prog_data
= (void *) shader
->prog_data
;
4382 uint32_t *ps_state
= (void *) shader
->derived_data
;
4383 uint32_t *psx_state
= ps_state
+ GENX(3DSTATE_PS_length
);
4385 iris_pack_command(GENX(3DSTATE_PS
), ps_state
, ps
) {
4386 ps
.VectorMaskEnable
= true;
4387 ps
.BindingTableEntryCount
= shader
->bt
.size_bytes
/ 4;
4388 ps
.FloatingPointMode
= prog_data
->use_alt_mode
;
4389 ps
.MaximumNumberofThreadsPerPSD
= 64 - (GEN_GEN
== 8 ? 2 : 1);
4391 ps
.PushConstantEnable
= prog_data
->ubo_ranges
[0].length
> 0;
4393 /* From the documentation for this packet:
4394 * "If the PS kernel does not need the Position XY Offsets to
4395 * compute a Position Value, then this field should be programmed
4396 * to POSOFFSET_NONE."
4398 * "SW Recommendation: If the PS kernel needs the Position Offsets
4399 * to compute a Position XY value, this field should match Position
4400 * ZW Interpolation Mode to ensure a consistent position.xyzw
4403 * We only require XY sample offsets. So, this recommendation doesn't
4404 * look useful at the moment. We might need this in future.
4406 ps
.PositionXYOffsetSelect
=
4407 wm_prog_data
->uses_pos_offset
? POSOFFSET_SAMPLE
: POSOFFSET_NONE
;
4409 if (prog_data
->total_scratch
) {
4410 struct iris_bo
*bo
=
4411 iris_get_scratch_space(ice
, prog_data
->total_scratch
,
4412 MESA_SHADER_FRAGMENT
);
4413 uint32_t scratch_addr
= bo
->gtt_offset
;
4414 ps
.PerThreadScratchSpace
= ffs(prog_data
->total_scratch
) - 11;
4415 ps
.ScratchSpaceBasePointer
= rw_bo(NULL
, scratch_addr
,
4420 iris_pack_command(GENX(3DSTATE_PS_EXTRA
), psx_state
, psx
) {
4421 psx
.PixelShaderValid
= true;
4422 psx
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
4423 psx
.PixelShaderKillsPixel
= wm_prog_data
->uses_kill
;
4424 psx
.AttributeEnable
= wm_prog_data
->num_varying_inputs
!= 0;
4425 psx
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
4426 psx
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
4427 psx
.PixelShaderIsPerSample
= wm_prog_data
->persample_dispatch
;
4428 psx
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
4431 psx
.PixelShaderPullsBary
= wm_prog_data
->pulls_bary
;
4432 psx
.PixelShaderComputesStencil
= wm_prog_data
->computed_stencil
;
4438 * Compute the size of the derived data (shader command packets).
4440 * This must match the data written by the iris_store_xs_state() functions.
4443 iris_store_cs_state(struct iris_context
*ice
,
4444 const struct gen_device_info
*devinfo
,
4445 struct iris_compiled_shader
*shader
)
4447 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4448 struct brw_cs_prog_data
*cs_prog_data
= (void *) shader
->prog_data
;
4449 void *map
= shader
->derived_data
;
4451 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA
), map
, desc
) {
4452 desc
.ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
;
4453 desc
.SharedLocalMemorySize
=
4454 encode_slm_size(GEN_GEN
, prog_data
->total_shared
);
4455 desc
.BarrierEnable
= cs_prog_data
->uses_barrier
;
4456 desc
.CrossThreadConstantDataReadLength
=
4457 cs_prog_data
->push
.cross_thread
.regs
;
4459 /* TODO: Check if we are missing workarounds and enable mid-thread
4462 * We still have issues with mid-thread preemption (it was already
4463 * disabled by the kernel on gen11, due to missing workarounds). It's
4464 * possible that we are just missing some workarounds, and could enable
4465 * it later, but for now let's disable it to fix a GPU in compute in Car
4466 * Chase (and possibly more).
4468 desc
.ThreadPreemptionDisable
= true;
4474 iris_derived_program_state_size(enum iris_program_cache_id cache_id
)
4476 assert(cache_id
<= IRIS_CACHE_BLORP
);
4478 static const unsigned dwords
[] = {
4479 [IRIS_CACHE_VS
] = GENX(3DSTATE_VS_length
),
4480 [IRIS_CACHE_TCS
] = GENX(3DSTATE_HS_length
),
4481 [IRIS_CACHE_TES
] = GENX(3DSTATE_TE_length
) + GENX(3DSTATE_DS_length
),
4482 [IRIS_CACHE_GS
] = GENX(3DSTATE_GS_length
),
4484 GENX(3DSTATE_PS_length
) + GENX(3DSTATE_PS_EXTRA_length
),
4485 [IRIS_CACHE_CS
] = GENX(INTERFACE_DESCRIPTOR_DATA_length
),
4486 [IRIS_CACHE_BLORP
] = 0,
4489 return sizeof(uint32_t) * dwords
[cache_id
];
4493 * Create any state packets corresponding to the given shader stage
4494 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4495 * This means that we can look up a program in the in-memory cache and
4496 * get most of the state packet without having to reconstruct it.
4499 iris_store_derived_program_state(struct iris_context
*ice
,
4500 enum iris_program_cache_id cache_id
,
4501 struct iris_compiled_shader
*shader
)
4503 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
4504 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
4508 iris_store_vs_state(ice
, devinfo
, shader
);
4510 case IRIS_CACHE_TCS
:
4511 iris_store_tcs_state(ice
, devinfo
, shader
);
4513 case IRIS_CACHE_TES
:
4514 iris_store_tes_state(ice
, devinfo
, shader
);
4517 iris_store_gs_state(ice
, devinfo
, shader
);
4520 iris_store_fs_state(ice
, devinfo
, shader
);
4523 iris_store_cs_state(ice
, devinfo
, shader
);
4524 case IRIS_CACHE_BLORP
:
4531 /* ------------------------------------------------------------------- */
4533 static const uint32_t push_constant_opcodes
[] = {
4534 [MESA_SHADER_VERTEX
] = 21,
4535 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
4536 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
4537 [MESA_SHADER_GEOMETRY
] = 22,
4538 [MESA_SHADER_FRAGMENT
] = 23,
4539 [MESA_SHADER_COMPUTE
] = 0,
4543 use_null_surface(struct iris_batch
*batch
, struct iris_context
*ice
)
4545 struct iris_bo
*state_bo
= iris_resource_bo(ice
->state
.unbound_tex
.res
);
4547 iris_use_pinned_bo(batch
, state_bo
, false, IRIS_DOMAIN_NONE
);
4549 return ice
->state
.unbound_tex
.offset
;
4553 use_null_fb_surface(struct iris_batch
*batch
, struct iris_context
*ice
)
4555 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4556 if (!ice
->state
.null_fb
.res
)
4557 return use_null_surface(batch
, ice
);
4559 struct iris_bo
*state_bo
= iris_resource_bo(ice
->state
.null_fb
.res
);
4561 iris_use_pinned_bo(batch
, state_bo
, false, IRIS_DOMAIN_NONE
);
4563 return ice
->state
.null_fb
.offset
;
4567 surf_state_offset_for_aux(struct iris_resource
*res
,
4569 enum isl_aux_usage aux_usage
)
4571 assert(aux_modes
& (1 << aux_usage
));
4572 return SURFACE_STATE_ALIGNMENT
*
4573 util_bitcount(aux_modes
& ((1 << aux_usage
) - 1));
4578 surf_state_update_clear_value(struct iris_batch
*batch
,
4579 struct iris_resource
*res
,
4580 struct iris_state_ref
*state
,
4582 enum isl_aux_usage aux_usage
)
4584 struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
4585 struct iris_bo
*state_bo
= iris_resource_bo(state
->res
);
4586 uint64_t real_offset
= state
->offset
+ IRIS_MEMZONE_BINDER_START
;
4587 uint32_t offset_into_bo
= real_offset
- state_bo
->gtt_offset
;
4588 uint32_t clear_offset
= offset_into_bo
+
4589 isl_dev
->ss
.clear_value_offset
+
4590 surf_state_offset_for_aux(res
, aux_modes
, aux_usage
);
4591 uint32_t *color
= res
->aux
.clear_color
.u32
;
4593 assert(isl_dev
->ss
.clear_value_size
== 16);
4595 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
4596 iris_emit_pipe_control_write(batch
, "update fast clear value (Z)",
4597 PIPE_CONTROL_WRITE_IMMEDIATE
,
4598 state_bo
, clear_offset
, color
[0]);
4600 iris_emit_pipe_control_write(batch
, "update fast clear color (RG__)",
4601 PIPE_CONTROL_WRITE_IMMEDIATE
,
4602 state_bo
, clear_offset
,
4603 (uint64_t) color
[0] |
4604 (uint64_t) color
[1] << 32);
4605 iris_emit_pipe_control_write(batch
, "update fast clear color (__BA)",
4606 PIPE_CONTROL_WRITE_IMMEDIATE
,
4607 state_bo
, clear_offset
+ 8,
4608 (uint64_t) color
[2] |
4609 (uint64_t) color
[3] << 32);
4612 iris_emit_pipe_control_flush(batch
,
4613 "update fast clear: state cache invalidate",
4614 PIPE_CONTROL_FLUSH_ENABLE
|
4615 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
4620 update_clear_value(struct iris_context
*ice
,
4621 struct iris_batch
*batch
,
4622 struct iris_resource
*res
,
4623 struct iris_surface_state
*surf_state
,
4624 unsigned all_aux_modes
,
4625 struct isl_view
*view
)
4627 UNUSED
struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
4628 UNUSED
unsigned aux_modes
= all_aux_modes
;
4630 /* We only need to update the clear color in the surface state for gen8 and
4631 * gen9. Newer gens can read it directly from the clear color state buffer.
4634 /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4635 aux_modes
&= ~(1 << ISL_AUX_USAGE_NONE
);
4638 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
4640 surf_state_update_clear_value(batch
, res
, &surf_state
->ref
,
4641 all_aux_modes
, aux_usage
);
4644 /* TODO: Could update rather than re-filling */
4645 alloc_surface_states(surf_state
, all_aux_modes
);
4647 void *map
= surf_state
->cpu
;
4650 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
4651 fill_surface_state(isl_dev
, map
, res
, &res
->surf
, view
, aux_usage
,
4653 map
+= SURFACE_STATE_ALIGNMENT
;
4656 upload_surface_states(ice
->state
.surface_uploader
, surf_state
);
4661 * Add a surface to the validation list, as well as the buffer containing
4662 * the corresponding SURFACE_STATE.
4664 * Returns the binding table entry (offset to SURFACE_STATE).
4667 use_surface(struct iris_context
*ice
,
4668 struct iris_batch
*batch
,
4669 struct pipe_surface
*p_surf
,
4671 enum isl_aux_usage aux_usage
,
4672 bool is_read_surface
,
4673 enum iris_domain access
)
4675 struct iris_surface
*surf
= (void *) p_surf
;
4676 struct iris_resource
*res
= (void *) p_surf
->texture
;
4677 uint32_t offset
= 0;
4679 iris_use_pinned_bo(batch
, iris_resource_bo(p_surf
->texture
),
4681 if (GEN_GEN
== 8 && is_read_surface
) {
4682 iris_use_pinned_bo(batch
, iris_resource_bo(surf
->surface_state_read
.ref
.res
), false,
4685 iris_use_pinned_bo(batch
, iris_resource_bo(surf
->surface_state
.ref
.res
), false,
4690 iris_use_pinned_bo(batch
, res
->aux
.bo
, writeable
, access
);
4691 if (res
->aux
.clear_color_bo
)
4692 iris_use_pinned_bo(batch
, res
->aux
.clear_color_bo
, false, access
);
4694 if (memcmp(&res
->aux
.clear_color
, &surf
->clear_color
,
4695 sizeof(surf
->clear_color
)) != 0) {
4696 update_clear_value(ice
, batch
, res
, &surf
->surface_state
,
4697 res
->aux
.possible_usages
, &surf
->view
);
4699 update_clear_value(ice
, batch
, res
, &surf
->surface_state_read
,
4700 res
->aux
.possible_usages
, &surf
->read_view
);
4702 surf
->clear_color
= res
->aux
.clear_color
;
4706 offset
= (GEN_GEN
== 8 && is_read_surface
)
4707 ? surf
->surface_state_read
.ref
.offset
4708 : surf
->surface_state
.ref
.offset
;
4711 surf_state_offset_for_aux(res
, res
->aux
.possible_usages
, aux_usage
);
4715 use_sampler_view(struct iris_context
*ice
,
4716 struct iris_batch
*batch
,
4717 struct iris_sampler_view
*isv
)
4719 enum isl_aux_usage aux_usage
=
4720 iris_resource_texture_aux_usage(ice
, isv
->res
, isv
->view
.format
);
4722 iris_use_pinned_bo(batch
, isv
->res
->bo
, false, IRIS_DOMAIN_OTHER_READ
);
4723 iris_use_pinned_bo(batch
, iris_resource_bo(isv
->surface_state
.ref
.res
), false,
4726 if (isv
->res
->aux
.bo
) {
4727 iris_use_pinned_bo(batch
, isv
->res
->aux
.bo
,
4728 false, IRIS_DOMAIN_OTHER_READ
);
4729 if (isv
->res
->aux
.clear_color_bo
)
4730 iris_use_pinned_bo(batch
, isv
->res
->aux
.clear_color_bo
,
4731 false, IRIS_DOMAIN_OTHER_READ
);
4732 if (memcmp(&isv
->res
->aux
.clear_color
, &isv
->clear_color
,
4733 sizeof(isv
->clear_color
)) != 0) {
4734 update_clear_value(ice
, batch
, isv
->res
, &isv
->surface_state
,
4735 isv
->res
->aux
.sampler_usages
, &isv
->view
);
4736 isv
->clear_color
= isv
->res
->aux
.clear_color
;
4740 return isv
->surface_state
.ref
.offset
+
4741 surf_state_offset_for_aux(isv
->res
, isv
->res
->aux
.sampler_usages
,
4746 use_ubo_ssbo(struct iris_batch
*batch
,
4747 struct iris_context
*ice
,
4748 struct pipe_shader_buffer
*buf
,
4749 struct iris_state_ref
*surf_state
,
4750 bool writable
, enum iris_domain access
)
4752 if (!buf
->buffer
|| !surf_state
->res
)
4753 return use_null_surface(batch
, ice
);
4755 iris_use_pinned_bo(batch
, iris_resource_bo(buf
->buffer
), writable
, access
);
4756 iris_use_pinned_bo(batch
, iris_resource_bo(surf_state
->res
), false,
4759 return surf_state
->offset
;
4763 use_image(struct iris_batch
*batch
, struct iris_context
*ice
,
4764 struct iris_shader_state
*shs
, const struct shader_info
*info
,
4767 struct iris_image_view
*iv
= &shs
->image
[i
];
4768 struct iris_resource
*res
= (void *) iv
->base
.resource
;
4771 return use_null_surface(batch
, ice
);
4773 bool write
= iv
->base
.shader_access
& PIPE_IMAGE_ACCESS_WRITE
;
4775 iris_use_pinned_bo(batch
, res
->bo
, write
, IRIS_DOMAIN_NONE
);
4776 iris_use_pinned_bo(batch
, iris_resource_bo(iv
->surface_state
.ref
.res
),
4777 false, IRIS_DOMAIN_NONE
);
4780 iris_use_pinned_bo(batch
, res
->aux
.bo
, write
, IRIS_DOMAIN_NONE
);
4782 enum isl_aux_usage aux_usage
=
4783 iris_image_view_aux_usage(ice
, &iv
->base
, info
);
4785 return iv
->surface_state
.ref
.offset
+
4786 surf_state_offset_for_aux(res
, res
->aux
.possible_usages
, aux_usage
);
4789 #define push_bt_entry(addr) \
4790 assert(addr >= binder_addr); \
4791 assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4792 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4794 #define bt_assert(section) \
4795 if (!pin_only && shader->bt.used_mask[section] != 0) \
4796 assert(shader->bt.offsets[section] == s);
4799 * Populate the binding table for a given shader stage.
4801 * This fills out the table of pointers to surfaces required by the shader,
4802 * and also adds those buffers to the validation list so the kernel can make
4803 * resident before running our batch.
4806 iris_populate_binding_table(struct iris_context
*ice
,
4807 struct iris_batch
*batch
,
4808 gl_shader_stage stage
,
4811 const struct iris_binder
*binder
= &ice
->state
.binder
;
4812 struct iris_uncompiled_shader
*ish
= ice
->shaders
.uncompiled
[stage
];
4813 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
4817 struct iris_binding_table
*bt
= &shader
->bt
;
4818 UNUSED
struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4819 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
4820 uint32_t binder_addr
= binder
->bo
->gtt_offset
;
4822 uint32_t *bt_map
= binder
->map
+ binder
->bt_offset
[stage
];
4825 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
4827 /* TCS passthrough doesn't need a binding table. */
4828 assert(stage
== MESA_SHADER_TESS_CTRL
);
4832 if (stage
== MESA_SHADER_COMPUTE
&&
4833 shader
->bt
.used_mask
[IRIS_SURFACE_GROUP_CS_WORK_GROUPS
]) {
4834 /* surface for gl_NumWorkGroups */
4835 struct iris_state_ref
*grid_data
= &ice
->state
.grid_size
;
4836 struct iris_state_ref
*grid_state
= &ice
->state
.grid_surf_state
;
4837 iris_use_pinned_bo(batch
, iris_resource_bo(grid_data
->res
), false,
4838 IRIS_DOMAIN_OTHER_READ
);
4839 iris_use_pinned_bo(batch
, iris_resource_bo(grid_state
->res
), false,
4841 push_bt_entry(grid_state
->offset
);
4844 if (stage
== MESA_SHADER_FRAGMENT
) {
4845 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
4846 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4847 if (cso_fb
->nr_cbufs
) {
4848 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
4850 if (cso_fb
->cbufs
[i
]) {
4851 addr
= use_surface(ice
, batch
, cso_fb
->cbufs
[i
], true,
4852 ice
->state
.draw_aux_usage
[i
], false,
4853 IRIS_DOMAIN_RENDER_WRITE
);
4855 addr
= use_null_fb_surface(batch
, ice
);
4857 push_bt_entry(addr
);
4859 } else if (GEN_GEN
< 11) {
4860 uint32_t addr
= use_null_fb_surface(batch
, ice
);
4861 push_bt_entry(addr
);
4865 #define foreach_surface_used(index, group) \
4867 for (int index = 0; index < bt->sizes[group]; index++) \
4868 if (iris_group_index_to_bti(bt, group, index) != \
4869 IRIS_SURFACE_NOT_USED)
4871 foreach_surface_used(i
, IRIS_SURFACE_GROUP_RENDER_TARGET_READ
) {
4872 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
4874 if (cso_fb
->cbufs
[i
]) {
4875 addr
= use_surface(ice
, batch
, cso_fb
->cbufs
[i
],
4876 false, ice
->state
.draw_aux_usage
[i
], true,
4877 IRIS_DOMAIN_OTHER_READ
);
4878 push_bt_entry(addr
);
4882 foreach_surface_used(i
, IRIS_SURFACE_GROUP_TEXTURE
) {
4883 struct iris_sampler_view
*view
= shs
->textures
[i
];
4884 uint32_t addr
= view
? use_sampler_view(ice
, batch
, view
)
4885 : use_null_surface(batch
, ice
);
4886 push_bt_entry(addr
);
4889 foreach_surface_used(i
, IRIS_SURFACE_GROUP_IMAGE
) {
4890 uint32_t addr
= use_image(batch
, ice
, shs
, info
, i
);
4891 push_bt_entry(addr
);
4894 foreach_surface_used(i
, IRIS_SURFACE_GROUP_UBO
) {
4897 if (i
== bt
->sizes
[IRIS_SURFACE_GROUP_UBO
] - 1) {
4898 if (ish
->const_data
) {
4899 iris_use_pinned_bo(batch
, iris_resource_bo(ish
->const_data
), false,
4900 IRIS_DOMAIN_OTHER_READ
);
4901 iris_use_pinned_bo(batch
, iris_resource_bo(ish
->const_data_state
.res
),
4902 false, IRIS_DOMAIN_NONE
);
4903 addr
= ish
->const_data_state
.offset
;
4905 /* This can only happen with INTEL_DISABLE_COMPACT_BINDING_TABLE=1. */
4906 addr
= use_null_surface(batch
, ice
);
4909 addr
= use_ubo_ssbo(batch
, ice
, &shs
->constbuf
[i
],
4910 &shs
->constbuf_surf_state
[i
], false,
4911 IRIS_DOMAIN_OTHER_READ
);
4914 push_bt_entry(addr
);
4917 foreach_surface_used(i
, IRIS_SURFACE_GROUP_SSBO
) {
4919 use_ubo_ssbo(batch
, ice
, &shs
->ssbo
[i
], &shs
->ssbo_surf_state
[i
],
4920 shs
->writable_ssbos
& (1u << i
), IRIS_DOMAIN_NONE
);
4921 push_bt_entry(addr
);
4925 /* XXX: YUV surfaces not implemented yet */
4926 bt_assert(plane_start
[1], ...);
4927 bt_assert(plane_start
[2], ...);
4932 iris_use_optional_res(struct iris_batch
*batch
,
4933 struct pipe_resource
*res
,
4935 enum iris_domain access
)
4938 struct iris_bo
*bo
= iris_resource_bo(res
);
4939 iris_use_pinned_bo(batch
, bo
, writeable
, access
);
4944 pin_depth_and_stencil_buffers(struct iris_batch
*batch
,
4945 struct pipe_surface
*zsbuf
,
4946 struct iris_depth_stencil_alpha_state
*cso_zsa
)
4951 struct iris_resource
*zres
, *sres
;
4952 iris_get_depth_stencil_resources(zsbuf
->texture
, &zres
, &sres
);
4955 const enum iris_domain access
= cso_zsa
->depth_writes_enabled
?
4956 IRIS_DOMAIN_DEPTH_WRITE
: IRIS_DOMAIN_OTHER_READ
;
4957 iris_use_pinned_bo(batch
, zres
->bo
, cso_zsa
->depth_writes_enabled
,
4960 iris_use_pinned_bo(batch
, zres
->aux
.bo
,
4961 cso_zsa
->depth_writes_enabled
, access
);
4966 const enum iris_domain access
= cso_zsa
->stencil_writes_enabled
?
4967 IRIS_DOMAIN_DEPTH_WRITE
: IRIS_DOMAIN_OTHER_READ
;
4968 iris_use_pinned_bo(batch
, sres
->bo
, cso_zsa
->stencil_writes_enabled
,
4973 /* ------------------------------------------------------------------- */
4976 * Pin any BOs which were installed by a previous batch, and restored
4977 * via the hardware logical context mechanism.
4979 * We don't need to re-emit all state every batch - the hardware context
4980 * mechanism will save and restore it for us. This includes pointers to
4981 * various BOs...which won't exist unless we ask the kernel to pin them
4982 * by adding them to the validation list.
4984 * We can skip buffers if we've re-emitted those packets, as we're
4985 * overwriting those stale pointers with new ones, and don't actually
4986 * refer to the old BOs.
4989 iris_restore_render_saved_bos(struct iris_context
*ice
,
4990 struct iris_batch
*batch
,
4991 const struct pipe_draw_info
*draw
)
4993 struct iris_genx_state
*genx
= ice
->state
.genx
;
4995 const uint64_t clean
= ~ice
->state
.dirty
;
4996 const uint64_t stage_clean
= ~ice
->state
.stage_dirty
;
4998 if (clean
& IRIS_DIRTY_CC_VIEWPORT
) {
4999 iris_use_optional_res(batch
, ice
->state
.last_res
.cc_vp
, false,
5003 if (clean
& IRIS_DIRTY_SF_CL_VIEWPORT
) {
5004 iris_use_optional_res(batch
, ice
->state
.last_res
.sf_cl_vp
, false,
5008 if (clean
& IRIS_DIRTY_BLEND_STATE
) {
5009 iris_use_optional_res(batch
, ice
->state
.last_res
.blend
, false,
5013 if (clean
& IRIS_DIRTY_COLOR_CALC_STATE
) {
5014 iris_use_optional_res(batch
, ice
->state
.last_res
.color_calc
, false,
5018 if (clean
& IRIS_DIRTY_SCISSOR_RECT
) {
5019 iris_use_optional_res(batch
, ice
->state
.last_res
.scissor
, false,
5023 if (ice
->state
.streamout_active
&& (clean
& IRIS_DIRTY_SO_BUFFERS
)) {
5024 for (int i
= 0; i
< 4; i
++) {
5025 struct iris_stream_output_target
*tgt
=
5026 (void *) ice
->state
.so_target
[i
];
5028 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->base
.buffer
),
5029 true, IRIS_DOMAIN_OTHER_WRITE
);
5030 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->offset
.res
),
5031 true, IRIS_DOMAIN_OTHER_WRITE
);
5036 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5037 if (!(stage_clean
& (IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
)))
5040 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5041 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5046 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
5048 for (int i
= 0; i
< 4; i
++) {
5049 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[i
];
5051 if (range
->length
== 0)
5054 /* Range block is a binding table index, map back to UBO index. */
5055 unsigned block_index
= iris_bti_to_group_index(
5056 &shader
->bt
, IRIS_SURFACE_GROUP_UBO
, range
->block
);
5057 assert(block_index
!= IRIS_SURFACE_NOT_USED
);
5059 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[block_index
];
5060 struct iris_resource
*res
= (void *) cbuf
->buffer
;
5063 iris_use_pinned_bo(batch
, res
->bo
, false, IRIS_DOMAIN_OTHER_READ
);
5065 iris_use_pinned_bo(batch
, batch
->screen
->workaround_bo
, false,
5066 IRIS_DOMAIN_OTHER_READ
);
5070 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5071 if (stage_clean
& (IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
)) {
5072 /* Re-pin any buffers referred to by the binding table. */
5073 iris_populate_binding_table(ice
, batch
, stage
, true);
5077 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5078 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5079 struct pipe_resource
*res
= shs
->sampler_table
.res
;
5081 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false,
5085 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5086 if (stage_clean
& (IRIS_STAGE_DIRTY_VS
<< stage
)) {
5087 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5090 struct iris_bo
*bo
= iris_resource_bo(shader
->assembly
.res
);
5091 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
5093 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
5095 if (prog_data
->total_scratch
> 0) {
5096 struct iris_bo
*bo
=
5097 iris_get_scratch_space(ice
, prog_data
->total_scratch
, stage
);
5098 iris_use_pinned_bo(batch
, bo
, true, IRIS_DOMAIN_NONE
);
5104 if ((clean
& IRIS_DIRTY_DEPTH_BUFFER
) &&
5105 (clean
& IRIS_DIRTY_WM_DEPTH_STENCIL
)) {
5106 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5107 pin_depth_and_stencil_buffers(batch
, cso_fb
->zsbuf
, ice
->state
.cso_zsa
);
5110 iris_use_optional_res(batch
, ice
->state
.last_res
.index_buffer
, false,
5111 IRIS_DOMAIN_OTHER_READ
);
5113 if (clean
& IRIS_DIRTY_VERTEX_BUFFERS
) {
5114 uint64_t bound
= ice
->state
.bound_vertex_buffers
;
5116 const int i
= u_bit_scan64(&bound
);
5117 struct pipe_resource
*res
= genx
->vertex_buffers
[i
].resource
;
5118 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false,
5119 IRIS_DOMAIN_OTHER_READ
);
5125 iris_restore_compute_saved_bos(struct iris_context
*ice
,
5126 struct iris_batch
*batch
,
5127 const struct pipe_grid_info
*grid
)
5129 const uint64_t stage_clean
= ~ice
->state
.stage_dirty
;
5131 const int stage
= MESA_SHADER_COMPUTE
;
5132 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5134 if (stage_clean
& IRIS_STAGE_DIRTY_BINDINGS_CS
) {
5135 /* Re-pin any buffers referred to by the binding table. */
5136 iris_populate_binding_table(ice
, batch
, stage
, true);
5139 struct pipe_resource
*sampler_res
= shs
->sampler_table
.res
;
5141 iris_use_pinned_bo(batch
, iris_resource_bo(sampler_res
), false,
5144 if ((stage_clean
& IRIS_STAGE_DIRTY_SAMPLER_STATES_CS
) &&
5145 (stage_clean
& IRIS_STAGE_DIRTY_BINDINGS_CS
) &&
5146 (stage_clean
& IRIS_STAGE_DIRTY_CONSTANTS_CS
) &&
5147 (stage_clean
& IRIS_STAGE_DIRTY_CS
)) {
5148 iris_use_optional_res(batch
, ice
->state
.last_res
.cs_desc
, false,
5152 if (stage_clean
& IRIS_STAGE_DIRTY_CS
) {
5153 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5156 struct iris_bo
*bo
= iris_resource_bo(shader
->assembly
.res
);
5157 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
5159 struct iris_bo
*curbe_bo
=
5160 iris_resource_bo(ice
->state
.last_res
.cs_thread_ids
);
5161 iris_use_pinned_bo(batch
, curbe_bo
, false, IRIS_DOMAIN_NONE
);
5163 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
5165 if (prog_data
->total_scratch
> 0) {
5166 struct iris_bo
*bo
=
5167 iris_get_scratch_space(ice
, prog_data
->total_scratch
, stage
);
5168 iris_use_pinned_bo(batch
, bo
, true, IRIS_DOMAIN_NONE
);
5175 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5178 iris_update_surface_base_address(struct iris_batch
*batch
,
5179 struct iris_binder
*binder
)
5181 if (batch
->last_surface_base_address
== binder
->bo
->gtt_offset
)
5184 uint32_t mocs
= batch
->screen
->isl_dev
.mocs
.internal
;
5186 iris_batch_sync_region_start(batch
);
5188 flush_before_state_base_change(batch
);
5191 /* GEN:BUG:1607854226:
5193 * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5194 * mode by putting the pipeline temporarily in 3D mode..
5196 if (batch
->name
== IRIS_BATCH_COMPUTE
)
5197 emit_pipeline_select(batch
, _3D
);
5200 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
5201 sba
.SurfaceStateBaseAddressModifyEnable
= true;
5202 sba
.SurfaceStateBaseAddress
= ro_bo(binder
->bo
, 0);
5204 /* The hardware appears to pay attention to the MOCS fields even
5205 * if you don't set the "Address Modify Enable" bit for the base.
5207 sba
.GeneralStateMOCS
= mocs
;
5208 sba
.StatelessDataPortAccessMOCS
= mocs
;
5209 sba
.DynamicStateMOCS
= mocs
;
5210 sba
.IndirectObjectMOCS
= mocs
;
5211 sba
.InstructionMOCS
= mocs
;
5212 sba
.SurfaceStateMOCS
= mocs
;
5214 sba
.BindlessSurfaceStateMOCS
= mocs
;
5219 /* GEN:BUG:1607854226:
5221 * Put the pipeline back into compute mode.
5223 if (batch
->name
== IRIS_BATCH_COMPUTE
)
5224 emit_pipeline_select(batch
, GPGPU
);
5227 flush_after_state_base_change(batch
);
5228 iris_batch_sync_region_end(batch
);
5230 batch
->last_surface_base_address
= binder
->bo
->gtt_offset
;
5234 iris_viewport_zmin_zmax(const struct pipe_viewport_state
*vp
, bool halfz
,
5235 bool window_space_position
, float *zmin
, float *zmax
)
5237 if (window_space_position
) {
5242 util_viewport_zmin_zmax(vp
, halfz
, zmin
, zmax
);
5247 genX(invalidate_aux_map_state
)(struct iris_batch
*batch
)
5249 struct iris_screen
*screen
= batch
->screen
;
5250 void *aux_map_ctx
= iris_bufmgr_get_aux_map_context(screen
->bufmgr
);
5253 uint32_t aux_map_state_num
= gen_aux_map_get_state_num(aux_map_ctx
);
5254 if (batch
->last_aux_map_state
!= aux_map_state_num
) {
5255 /* HSD 1209978178: docs say that before programming the aux table:
5257 * "Driver must ensure that the engine is IDLE but ensure it doesn't
5258 * add extra flushes in the case it knows that the engine is already
5261 * An end of pipe sync is needed here, otherwise we see GPU hangs in
5262 * dEQP-GLES31.functional.copy_image.* tests.
5264 iris_emit_end_of_pipe_sync(batch
, "Invalidate aux map table",
5265 PIPE_CONTROL_CS_STALL
);
5267 /* If the aux-map state number increased, then we need to rewrite the
5268 * register. Rewriting the register is used to both set the aux-map
5269 * translation table address, and also to invalidate any previously
5270 * cached translations.
5272 iris_load_register_imm32(batch
, GENX(GFX_CCS_AUX_INV_num
), 1);
5273 batch
->last_aux_map_state
= aux_map_state_num
;
5278 init_aux_map_state(struct iris_batch
*batch
)
5280 struct iris_screen
*screen
= batch
->screen
;
5281 void *aux_map_ctx
= iris_bufmgr_get_aux_map_context(screen
->bufmgr
);
5285 uint64_t base_addr
= gen_aux_map_get_base(aux_map_ctx
);
5286 assert(base_addr
!= 0 && align64(base_addr
, 32 * 1024) == base_addr
);
5287 iris_load_register_imm64(batch
, GENX(GFX_AUX_TABLE_BASE_ADDR_num
),
5294 struct iris_address addr
;
5298 uint32_t max_length
;
5302 setup_constant_buffers(struct iris_context
*ice
,
5303 struct iris_batch
*batch
,
5305 struct push_bos
*push_bos
)
5307 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5308 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5309 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
5311 uint32_t push_range_sum
= 0;
5314 for (int i
= 0; i
< 4; i
++) {
5315 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[i
];
5317 if (range
->length
== 0)
5320 push_range_sum
+= range
->length
;
5322 if (range
->length
> push_bos
->max_length
)
5323 push_bos
->max_length
= range
->length
;
5325 /* Range block is a binding table index, map back to UBO index. */
5326 unsigned block_index
= iris_bti_to_group_index(
5327 &shader
->bt
, IRIS_SURFACE_GROUP_UBO
, range
->block
);
5328 assert(block_index
!= IRIS_SURFACE_NOT_USED
);
5330 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[block_index
];
5331 struct iris_resource
*res
= (void *) cbuf
->buffer
;
5333 assert(cbuf
->buffer_offset
% 32 == 0);
5335 push_bos
->buffers
[n
].length
= range
->length
;
5336 push_bos
->buffers
[n
].addr
=
5337 res
? ro_bo(res
->bo
, range
->start
* 32 + cbuf
->buffer_offset
)
5338 : batch
->screen
->workaround_address
;
5342 /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5344 * "The sum of all four read length fields must be less than or
5345 * equal to the size of 64."
5347 assert(push_range_sum
<= 64);
5349 push_bos
->buffer_count
= n
;
5353 emit_push_constant_packets(struct iris_context
*ice
,
5354 struct iris_batch
*batch
,
5356 const struct push_bos
*push_bos
)
5358 UNUSED
struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
5359 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5360 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
5362 iris_emit_cmd(batch
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
5363 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
5365 pkt
.MOCS
= isl_dev
->mocs
.internal
;
5368 /* The Skylake PRM contains the following restriction:
5370 * "The driver must ensure The following case does not occur
5371 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5372 * buffer 3 read length equal to zero committed followed by a
5373 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5376 * To avoid this, we program the buffers in the highest slots.
5377 * This way, slot 0 is only used if slot 3 is also used.
5379 int n
= push_bos
->buffer_count
;
5381 const unsigned shift
= 4 - n
;
5382 for (int i
= 0; i
< n
; i
++) {
5383 pkt
.ConstantBody
.ReadLength
[i
+ shift
] =
5384 push_bos
->buffers
[i
].length
;
5385 pkt
.ConstantBody
.Buffer
[i
+ shift
] = push_bos
->buffers
[i
].addr
;
5393 emit_push_constant_packet_all(struct iris_context
*ice
,
5394 struct iris_batch
*batch
,
5395 uint32_t shader_mask
,
5396 const struct push_bos
*push_bos
)
5398 struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
5401 iris_emit_cmd(batch
, GENX(3DSTATE_CONSTANT_ALL
), pc
) {
5402 pc
.ShaderUpdateEnable
= shader_mask
;
5407 const uint32_t n
= push_bos
->buffer_count
;
5408 const uint32_t max_pointers
= 4;
5409 const uint32_t num_dwords
= 2 + 2 * n
;
5410 uint32_t const_all
[2 + 2 * max_pointers
];
5411 uint32_t *dw
= &const_all
[0];
5413 assert(n
<= max_pointers
);
5414 iris_pack_command(GENX(3DSTATE_CONSTANT_ALL
), dw
, all
) {
5415 all
.DWordLength
= num_dwords
- 2;
5416 all
.MOCS
= isl_dev
->mocs
.internal
;
5417 all
.ShaderUpdateEnable
= shader_mask
;
5418 all
.PointerBufferMask
= (1 << n
) - 1;
5422 for (int i
= 0; i
< n
; i
++) {
5423 _iris_pack_state(batch
, GENX(3DSTATE_CONSTANT_ALL_DATA
),
5425 data
.PointerToConstantBuffer
= push_bos
->buffers
[i
].addr
;
5426 data
.ConstantBufferReadLength
= push_bos
->buffers
[i
].length
;
5429 iris_batch_emit(batch
, const_all
, sizeof(uint32_t) * num_dwords
);
5434 iris_upload_dirty_render_state(struct iris_context
*ice
,
5435 struct iris_batch
*batch
,
5436 const struct pipe_draw_info
*draw
)
5438 const uint64_t dirty
= ice
->state
.dirty
;
5439 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
5441 if (!(dirty
& IRIS_ALL_DIRTY_FOR_RENDER
) &&
5442 !(stage_dirty
& IRIS_ALL_STAGE_DIRTY_FOR_RENDER
))
5445 struct iris_genx_state
*genx
= ice
->state
.genx
;
5446 struct iris_binder
*binder
= &ice
->state
.binder
;
5447 struct brw_wm_prog_data
*wm_prog_data
= (void *)
5448 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
5450 if (dirty
& IRIS_DIRTY_CC_VIEWPORT
) {
5451 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
5452 uint32_t cc_vp_address
;
5454 /* XXX: could avoid streaming for depth_clip [0,1] case. */
5455 uint32_t *cc_vp_map
=
5456 stream_state(batch
, ice
->state
.dynamic_uploader
,
5457 &ice
->state
.last_res
.cc_vp
,
5458 4 * ice
->state
.num_viewports
*
5459 GENX(CC_VIEWPORT_length
), 32, &cc_vp_address
);
5460 for (int i
= 0; i
< ice
->state
.num_viewports
; i
++) {
5462 iris_viewport_zmin_zmax(&ice
->state
.viewports
[i
], cso_rast
->clip_halfz
,
5463 ice
->state
.window_space_position
,
5465 if (cso_rast
->depth_clip_near
)
5467 if (cso_rast
->depth_clip_far
)
5470 iris_pack_state(GENX(CC_VIEWPORT
), cc_vp_map
, ccv
) {
5471 ccv
.MinimumDepth
= zmin
;
5472 ccv
.MaximumDepth
= zmax
;
5475 cc_vp_map
+= GENX(CC_VIEWPORT_length
);
5478 iris_emit_cmd(batch
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
5479 ptr
.CCViewportPointer
= cc_vp_address
;
5483 if (dirty
& IRIS_DIRTY_SF_CL_VIEWPORT
) {
5484 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5485 uint32_t sf_cl_vp_address
;
5487 stream_state(batch
, ice
->state
.dynamic_uploader
,
5488 &ice
->state
.last_res
.sf_cl_vp
,
5489 4 * ice
->state
.num_viewports
*
5490 GENX(SF_CLIP_VIEWPORT_length
), 64, &sf_cl_vp_address
);
5492 for (unsigned i
= 0; i
< ice
->state
.num_viewports
; i
++) {
5493 const struct pipe_viewport_state
*state
= &ice
->state
.viewports
[i
];
5494 float gb_xmin
, gb_xmax
, gb_ymin
, gb_ymax
;
5496 float vp_xmin
= viewport_extent(state
, 0, -1.0f
);
5497 float vp_xmax
= viewport_extent(state
, 0, 1.0f
);
5498 float vp_ymin
= viewport_extent(state
, 1, -1.0f
);
5499 float vp_ymax
= viewport_extent(state
, 1, 1.0f
);
5501 gen_calculate_guardband_size(cso_fb
->width
, cso_fb
->height
,
5502 state
->scale
[0], state
->scale
[1],
5503 state
->translate
[0], state
->translate
[1],
5504 &gb_xmin
, &gb_xmax
, &gb_ymin
, &gb_ymax
);
5506 iris_pack_state(GENX(SF_CLIP_VIEWPORT
), vp_map
, vp
) {
5507 vp
.ViewportMatrixElementm00
= state
->scale
[0];
5508 vp
.ViewportMatrixElementm11
= state
->scale
[1];
5509 vp
.ViewportMatrixElementm22
= state
->scale
[2];
5510 vp
.ViewportMatrixElementm30
= state
->translate
[0];
5511 vp
.ViewportMatrixElementm31
= state
->translate
[1];
5512 vp
.ViewportMatrixElementm32
= state
->translate
[2];
5513 vp
.XMinClipGuardband
= gb_xmin
;
5514 vp
.XMaxClipGuardband
= gb_xmax
;
5515 vp
.YMinClipGuardband
= gb_ymin
;
5516 vp
.YMaxClipGuardband
= gb_ymax
;
5517 vp
.XMinViewPort
= MAX2(vp_xmin
, 0);
5518 vp
.XMaxViewPort
= MIN2(vp_xmax
, cso_fb
->width
) - 1;
5519 vp
.YMinViewPort
= MAX2(vp_ymin
, 0);
5520 vp
.YMaxViewPort
= MIN2(vp_ymax
, cso_fb
->height
) - 1;
5523 vp_map
+= GENX(SF_CLIP_VIEWPORT_length
);
5526 iris_emit_cmd(batch
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
5527 ptr
.SFClipViewportPointer
= sf_cl_vp_address
;
5531 if (dirty
& IRIS_DIRTY_URB
) {
5534 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
5535 if (!ice
->shaders
.prog
[i
]) {
5538 struct brw_vue_prog_data
*vue_prog_data
=
5539 (void *) ice
->shaders
.prog
[i
]->prog_data
;
5540 size
[i
] = vue_prog_data
->urb_entry_size
;
5542 assert(size
[i
] != 0);
5545 unsigned entries
[4], start
[4];
5546 gen_get_urb_config(&batch
->screen
->devinfo
,
5547 batch
->screen
->l3_config_3d
,
5548 ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
] != NULL
,
5549 ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
] != NULL
,
5550 size
, entries
, start
,
5551 &ice
->state
.urb_deref_block_size
);
5553 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
5554 iris_emit_cmd(batch
, GENX(3DSTATE_URB_VS
), urb
) {
5555 urb
._3DCommandSubOpcode
+= i
;
5556 urb
.VSURBStartingAddress
= start
[i
];
5557 urb
.VSURBEntryAllocationSize
= size
[i
] - 1;
5558 urb
.VSNumberofURBEntries
= entries
[i
];
5563 if (dirty
& IRIS_DIRTY_BLEND_STATE
) {
5564 struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
5565 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5566 struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
5567 const int header_dwords
= GENX(BLEND_STATE_length
);
5569 /* Always write at least one BLEND_STATE - the final RT message will
5570 * reference BLEND_STATE[0] even if there aren't color writes. There
5571 * may still be alpha testing, computed depth, and so on.
5573 const int rt_dwords
=
5574 MAX2(cso_fb
->nr_cbufs
, 1) * GENX(BLEND_STATE_ENTRY_length
);
5576 uint32_t blend_offset
;
5577 uint32_t *blend_map
=
5578 stream_state(batch
, ice
->state
.dynamic_uploader
,
5579 &ice
->state
.last_res
.blend
,
5580 4 * (header_dwords
+ rt_dwords
), 64, &blend_offset
);
5582 uint32_t blend_state_header
;
5583 iris_pack_state(GENX(BLEND_STATE
), &blend_state_header
, bs
) {
5584 bs
.AlphaTestEnable
= cso_zsa
->alpha
.enabled
;
5585 bs
.AlphaTestFunction
= translate_compare_func(cso_zsa
->alpha
.func
);
5588 blend_map
[0] = blend_state_header
| cso_blend
->blend_state
[0];
5589 memcpy(&blend_map
[1], &cso_blend
->blend_state
[1], 4 * rt_dwords
);
5591 iris_emit_cmd(batch
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
5592 ptr
.BlendStatePointer
= blend_offset
;
5593 ptr
.BlendStatePointerValid
= true;
5597 if (dirty
& IRIS_DIRTY_COLOR_CALC_STATE
) {
5598 struct iris_depth_stencil_alpha_state
*cso
= ice
->state
.cso_zsa
;
5600 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
5604 stream_state(batch
, ice
->state
.dynamic_uploader
,
5605 &ice
->state
.last_res
.color_calc
,
5606 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length
),
5608 iris_pack_state(GENX(COLOR_CALC_STATE
), cc_map
, cc
) {
5609 cc
.AlphaTestFormat
= ALPHATEST_FLOAT32
;
5610 cc
.AlphaReferenceValueAsFLOAT32
= cso
->alpha
.ref_value
;
5611 cc
.BlendConstantColorRed
= ice
->state
.blend_color
.color
[0];
5612 cc
.BlendConstantColorGreen
= ice
->state
.blend_color
.color
[1];
5613 cc
.BlendConstantColorBlue
= ice
->state
.blend_color
.color
[2];
5614 cc
.BlendConstantColorAlpha
= ice
->state
.blend_color
.color
[3];
5616 cc
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
5617 cc
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
5620 iris_emit_cmd(batch
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
5621 ptr
.ColorCalcStatePointer
= cc_offset
;
5622 ptr
.ColorCalcStatePointerValid
= true;
5626 /* GEN:BUG:1604061319
5628 * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5630 * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5631 * any stage has a dirty binding table.
5633 const bool emit_const_wa
= GEN_GEN
>= 11 &&
5634 ((dirty
& IRIS_DIRTY_RENDER_BUFFER
) ||
5635 (stage_dirty
& IRIS_ALL_STAGE_DIRTY_BINDINGS
));
5638 uint32_t nobuffer_stages
= 0;
5641 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5642 if (!(stage_dirty
& (IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
)) &&
5646 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5647 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5652 if (shs
->sysvals_need_upload
)
5653 upload_sysvals(ice
, stage
, NULL
);
5655 struct push_bos push_bos
= {};
5656 setup_constant_buffers(ice
, batch
, stage
, &push_bos
);
5659 /* If this stage doesn't have any push constants, emit it later in a
5660 * single CONSTANT_ALL packet with all the other stages.
5662 if (push_bos
.buffer_count
== 0) {
5663 nobuffer_stages
|= 1 << stage
;
5667 /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5668 * contains only 5 bits, so we can only use it for buffers smaller than
5671 if (push_bos
.max_length
< 32) {
5672 emit_push_constant_packet_all(ice
, batch
, 1 << stage
, &push_bos
);
5676 emit_push_constant_packets(ice
, batch
, stage
, &push_bos
);
5680 if (nobuffer_stages
)
5681 emit_push_constant_packet_all(ice
, batch
, nobuffer_stages
, NULL
);
5684 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5685 /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5686 * in order to commit constants. TODO: Investigate "Disable Gather
5687 * at Set Shader" to go back to legacy mode...
5689 if (stage_dirty
& ((IRIS_STAGE_DIRTY_BINDINGS_VS
|
5690 (GEN_GEN
== 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS
: 0))
5692 iris_emit_cmd(batch
, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
), ptr
) {
5693 ptr
._3DCommandSubOpcode
= 38 + stage
;
5694 ptr
.PointertoVSBindingTable
= binder
->bt_offset
[stage
];
5699 if (GEN_GEN
>= 11 && (dirty
& IRIS_DIRTY_RENDER_BUFFER
)) {
5700 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5701 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5703 /* The PIPE_CONTROL command description says:
5705 * "Whenever a Binding Table Index (BTI) used by a Render Target
5706 * Message points to a different RENDER_SURFACE_STATE, SW must issue a
5707 * Render Target Cache Flush by enabling this bit. When render target
5708 * flush is set due to new association of BTI, PS Scoreboard Stall bit
5709 * must be set in this packet."
5711 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5712 iris_emit_pipe_control_flush(batch
, "workaround: RT BTI change [draw]",
5713 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
5714 PIPE_CONTROL_STALL_AT_SCOREBOARD
);
5717 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5718 if (stage_dirty
& (IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
)) {
5719 iris_populate_binding_table(ice
, batch
, stage
, false);
5723 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5724 if (!(stage_dirty
& (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS
<< stage
)) ||
5725 !ice
->shaders
.prog
[stage
])
5728 iris_upload_sampler_states(ice
, stage
);
5730 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5731 struct pipe_resource
*res
= shs
->sampler_table
.res
;
5733 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false,
5736 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
5737 ptr
._3DCommandSubOpcode
= 43 + stage
;
5738 ptr
.PointertoVSSamplerState
= shs
->sampler_table
.offset
;
5742 if (ice
->state
.need_border_colors
)
5743 iris_use_pinned_bo(batch
, ice
->state
.border_color_pool
.bo
, false,
5746 if (dirty
& IRIS_DIRTY_MULTISAMPLE
) {
5747 iris_emit_cmd(batch
, GENX(3DSTATE_MULTISAMPLE
), ms
) {
5749 ice
->state
.cso_rast
->half_pixel_center
? CENTER
: UL_CORNER
;
5750 if (ice
->state
.framebuffer
.samples
> 0)
5751 ms
.NumberofMultisamples
= ffs(ice
->state
.framebuffer
.samples
) - 1;
5755 if (dirty
& IRIS_DIRTY_SAMPLE_MASK
) {
5756 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLE_MASK
), ms
) {
5757 ms
.SampleMask
= ice
->state
.sample_mask
;
5761 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5762 if (!(stage_dirty
& (IRIS_STAGE_DIRTY_VS
<< stage
)))
5765 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5768 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
5769 struct iris_resource
*cache
= (void *) shader
->assembly
.res
;
5770 iris_use_pinned_bo(batch
, cache
->bo
, false, IRIS_DOMAIN_NONE
);
5772 if (prog_data
->total_scratch
> 0) {
5773 struct iris_bo
*bo
=
5774 iris_get_scratch_space(ice
, prog_data
->total_scratch
, stage
);
5775 iris_use_pinned_bo(batch
, bo
, true, IRIS_DOMAIN_NONE
);
5778 if (stage
== MESA_SHADER_FRAGMENT
) {
5779 UNUSED
struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
5780 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5782 uint32_t ps_state
[GENX(3DSTATE_PS_length
)] = {0};
5783 iris_pack_command(GENX(3DSTATE_PS
), ps_state
, ps
) {
5784 ps
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
5785 ps
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
5786 ps
._32PixelDispatchEnable
= wm_prog_data
->dispatch_32
;
5788 /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5790 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5791 * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5794 * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
5796 if (GEN_GEN
>= 9 && cso_fb
->samples
== 16 &&
5797 !wm_prog_data
->persample_dispatch
) {
5798 assert(ps
._8PixelDispatchEnable
|| ps
._16PixelDispatchEnable
);
5799 ps
._32PixelDispatchEnable
= false;
5802 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
5803 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 0);
5804 ps
.DispatchGRFStartRegisterForConstantSetupData1
=
5805 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 1);
5806 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
5807 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 2);
5809 ps
.KernelStartPointer0
= KSP(shader
) +
5810 brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 0);
5811 ps
.KernelStartPointer1
= KSP(shader
) +
5812 brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 1);
5813 ps
.KernelStartPointer2
= KSP(shader
) +
5814 brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 2);
5817 uint32_t psx_state
[GENX(3DSTATE_PS_EXTRA_length
)] = {0};
5818 iris_pack_command(GENX(3DSTATE_PS_EXTRA
), psx_state
, psx
) {
5820 if (!wm_prog_data
->uses_sample_mask
)
5821 psx
.InputCoverageMaskState
= ICMS_NONE
;
5822 else if (wm_prog_data
->post_depth_coverage
)
5823 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
5824 else if (wm_prog_data
->inner_coverage
&&
5825 cso
->conservative_rasterization
)
5826 psx
.InputCoverageMaskState
= ICMS_INNER_CONSERVATIVE
;
5828 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
5830 psx
.PixelShaderUsesInputCoverageMask
=
5831 wm_prog_data
->uses_sample_mask
;
5835 uint32_t *shader_ps
= (uint32_t *) shader
->derived_data
;
5836 uint32_t *shader_psx
= shader_ps
+ GENX(3DSTATE_PS_length
);
5837 iris_emit_merge(batch
, shader_ps
, ps_state
,
5838 GENX(3DSTATE_PS_length
));
5839 iris_emit_merge(batch
, shader_psx
, psx_state
,
5840 GENX(3DSTATE_PS_EXTRA_length
));
5842 iris_batch_emit(batch
, shader
->derived_data
,
5843 iris_derived_program_state_size(stage
));
5846 if (stage
== MESA_SHADER_TESS_EVAL
) {
5847 iris_emit_cmd(batch
, GENX(3DSTATE_HS
), hs
);
5848 iris_emit_cmd(batch
, GENX(3DSTATE_TE
), te
);
5849 iris_emit_cmd(batch
, GENX(3DSTATE_DS
), ds
);
5850 } else if (stage
== MESA_SHADER_GEOMETRY
) {
5851 iris_emit_cmd(batch
, GENX(3DSTATE_GS
), gs
);
5856 if (ice
->state
.streamout_active
) {
5857 if (dirty
& IRIS_DIRTY_SO_BUFFERS
) {
5858 iris_batch_emit(batch
, genx
->so_buffers
,
5859 4 * 4 * GENX(3DSTATE_SO_BUFFER_length
));
5860 for (int i
= 0; i
< 4; i
++) {
5861 struct iris_stream_output_target
*tgt
=
5862 (void *) ice
->state
.so_target
[i
];
5865 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->base
.buffer
),
5866 true, IRIS_DOMAIN_OTHER_WRITE
);
5867 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->offset
.res
),
5868 true, IRIS_DOMAIN_OTHER_WRITE
);
5873 if ((dirty
& IRIS_DIRTY_SO_DECL_LIST
) && ice
->state
.streamout
) {
5874 uint32_t *decl_list
=
5875 ice
->state
.streamout
+ GENX(3DSTATE_STREAMOUT_length
);
5876 iris_batch_emit(batch
, decl_list
, 4 * ((decl_list
[0] & 0xff) + 2));
5879 if (dirty
& IRIS_DIRTY_STREAMOUT
) {
5880 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
5882 uint32_t dynamic_sol
[GENX(3DSTATE_STREAMOUT_length
)];
5883 iris_pack_command(GENX(3DSTATE_STREAMOUT
), dynamic_sol
, sol
) {
5884 sol
.SOFunctionEnable
= true;
5885 sol
.SOStatisticsEnable
= true;
5887 sol
.RenderingDisable
= cso_rast
->rasterizer_discard
&&
5888 !ice
->state
.prims_generated_query_active
;
5889 sol
.ReorderMode
= cso_rast
->flatshade_first
? LEADING
: TRAILING
;
5892 assert(ice
->state
.streamout
);
5894 iris_emit_merge(batch
, ice
->state
.streamout
, dynamic_sol
,
5895 GENX(3DSTATE_STREAMOUT_length
));
5898 if (dirty
& IRIS_DIRTY_STREAMOUT
) {
5899 iris_emit_cmd(batch
, GENX(3DSTATE_STREAMOUT
), sol
);
5903 if (dirty
& IRIS_DIRTY_CLIP
) {
5904 struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
5905 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5907 bool gs_or_tes
= ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
] ||
5908 ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
];
5909 bool points_or_lines
= cso_rast
->fill_mode_point_or_line
||
5910 (gs_or_tes
? ice
->shaders
.output_topology_is_points_or_lines
5911 : ice
->state
.prim_is_points_or_lines
);
5913 uint32_t dynamic_clip
[GENX(3DSTATE_CLIP_length
)];
5914 iris_pack_command(GENX(3DSTATE_CLIP
), &dynamic_clip
, cl
) {
5915 cl
.StatisticsEnable
= ice
->state
.statistics_counters_enabled
;
5916 if (cso_rast
->rasterizer_discard
)
5917 cl
.ClipMode
= CLIPMODE_REJECT_ALL
;
5918 else if (ice
->state
.window_space_position
)
5919 cl
.ClipMode
= CLIPMODE_ACCEPT_ALL
;
5921 cl
.ClipMode
= CLIPMODE_NORMAL
;
5923 cl
.PerspectiveDivideDisable
= ice
->state
.window_space_position
;
5924 cl
.ViewportXYClipTestEnable
= !points_or_lines
;
5926 if (wm_prog_data
->barycentric_interp_modes
&
5927 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
5928 cl
.NonPerspectiveBarycentricEnable
= true;
5930 cl
.ForceZeroRTAIndexEnable
= cso_fb
->layers
<= 1;
5931 cl
.MaximumVPIndex
= ice
->state
.num_viewports
- 1;
5933 iris_emit_merge(batch
, cso_rast
->clip
, dynamic_clip
,
5934 ARRAY_SIZE(cso_rast
->clip
));
5937 if (dirty
& (IRIS_DIRTY_RASTER
| IRIS_DIRTY_URB
)) {
5938 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
5939 iris_batch_emit(batch
, cso
->raster
, sizeof(cso
->raster
));
5941 uint32_t dynamic_sf
[GENX(3DSTATE_SF_length
)];
5942 iris_pack_command(GENX(3DSTATE_SF
), &dynamic_sf
, sf
) {
5943 sf
.ViewportTransformEnable
= !ice
->state
.window_space_position
;
5946 sf
.DerefBlockSize
= ice
->state
.urb_deref_block_size
;
5949 iris_emit_merge(batch
, cso
->sf
, dynamic_sf
,
5950 ARRAY_SIZE(dynamic_sf
));
5953 if (dirty
& IRIS_DIRTY_WM
) {
5954 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
5955 uint32_t dynamic_wm
[GENX(3DSTATE_WM_length
)];
5957 iris_pack_command(GENX(3DSTATE_WM
), &dynamic_wm
, wm
) {
5958 wm
.StatisticsEnable
= ice
->state
.statistics_counters_enabled
;
5960 wm
.BarycentricInterpolationMode
=
5961 wm_prog_data
->barycentric_interp_modes
;
5963 if (wm_prog_data
->early_fragment_tests
)
5964 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
5965 else if (wm_prog_data
->has_side_effects
)
5966 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
5968 /* We could skip this bit if color writes are enabled. */
5969 if (wm_prog_data
->has_side_effects
|| wm_prog_data
->uses_kill
)
5970 wm
.ForceThreadDispatchEnable
= ForceON
;
5972 iris_emit_merge(batch
, cso
->wm
, dynamic_wm
, ARRAY_SIZE(cso
->wm
));
5975 if (dirty
& IRIS_DIRTY_SBE
) {
5976 iris_emit_sbe(batch
, ice
);
5979 if (dirty
& IRIS_DIRTY_PS_BLEND
) {
5980 struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
5981 struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
5982 const struct shader_info
*fs_info
=
5983 iris_get_shader_info(ice
, MESA_SHADER_FRAGMENT
);
5985 uint32_t dynamic_pb
[GENX(3DSTATE_PS_BLEND_length
)];
5986 iris_pack_command(GENX(3DSTATE_PS_BLEND
), &dynamic_pb
, pb
) {
5987 pb
.HasWriteableRT
= has_writeable_rt(cso_blend
, fs_info
);
5988 pb
.AlphaTestEnable
= cso_zsa
->alpha
.enabled
;
5990 /* The dual source blending docs caution against using SRC1 factors
5991 * when the shader doesn't use a dual source render target write.
5992 * Empirically, this can lead to GPU hangs, and the results are
5993 * undefined anyway, so simply disable blending to avoid the hang.
5995 pb
.ColorBufferBlendEnable
= (cso_blend
->blend_enables
& 1) &&
5996 (!cso_blend
->dual_color_blending
|| wm_prog_data
->dual_src_blend
);
5999 iris_emit_merge(batch
, cso_blend
->ps_blend
, dynamic_pb
,
6000 ARRAY_SIZE(cso_blend
->ps_blend
));
6003 if (dirty
& IRIS_DIRTY_WM_DEPTH_STENCIL
) {
6004 struct iris_depth_stencil_alpha_state
*cso
= ice
->state
.cso_zsa
;
6005 #if GEN_GEN >= 9 && GEN_GEN < 12
6006 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
6007 uint32_t stencil_refs
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
6008 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), &stencil_refs
, wmds
) {
6009 wmds
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
6010 wmds
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
6012 iris_emit_merge(batch
, cso
->wmds
, stencil_refs
, ARRAY_SIZE(cso
->wmds
));
6014 /* Use modify disable fields which allow us to emit packets
6015 * directly instead of merging them later.
6017 iris_batch_emit(batch
, cso
->wmds
, sizeof(cso
->wmds
));
6021 iris_batch_emit(batch
, cso
->depth_bounds
, sizeof(cso
->depth_bounds
));
6025 if (dirty
& IRIS_DIRTY_STENCIL_REF
) {
6027 /* Use modify disable fields which allow us to emit packets
6028 * directly instead of merging them later.
6030 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
6031 uint32_t stencil_refs
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
6032 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), &stencil_refs
, wmds
) {
6033 wmds
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
6034 wmds
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
6035 wmds
.StencilTestMaskModifyDisable
= true;
6036 wmds
.StencilWriteMaskModifyDisable
= true;
6037 wmds
.StencilStateModifyDisable
= true;
6038 wmds
.DepthStateModifyDisable
= true;
6040 iris_batch_emit(batch
, stencil_refs
, sizeof(stencil_refs
));
6044 if (dirty
& IRIS_DIRTY_SCISSOR_RECT
) {
6045 uint32_t scissor_offset
=
6046 emit_state(batch
, ice
->state
.dynamic_uploader
,
6047 &ice
->state
.last_res
.scissor
,
6048 ice
->state
.scissors
,
6049 sizeof(struct pipe_scissor_state
) *
6050 ice
->state
.num_viewports
, 32);
6052 iris_emit_cmd(batch
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
6053 ptr
.ScissorRectPointer
= scissor_offset
;
6057 if (dirty
& IRIS_DIRTY_DEPTH_BUFFER
) {
6058 struct iris_depth_buffer_state
*cso_z
= &ice
->state
.genx
->depth_buffer
;
6060 /* Do not emit the clear params yets. We need to update the clear value
6063 uint32_t clear_length
= GENX(3DSTATE_CLEAR_PARAMS_length
) * 4;
6064 uint32_t cso_z_size
= batch
->screen
->isl_dev
.ds
.size
- clear_length
;;
6067 /* GEN:BUG:14010455700
6069 * ISL will change some CHICKEN registers depending on the depth surface
6070 * format, along with emitting the depth and stencil packets. In that
6071 * case, we want to do a depth flush and stall, so the pipeline is not
6072 * using these settings while we change the registers.
6074 iris_emit_end_of_pipe_sync(batch
,
6075 "Workaround: Stop pipeline for 14010455700",
6076 PIPE_CONTROL_DEPTH_STALL
|
6077 PIPE_CONTROL_DEPTH_CACHE_FLUSH
);
6080 iris_batch_emit(batch
, cso_z
->packets
, cso_z_size
);
6081 if (GEN_GEN
>= 12) {
6082 /* GEN:BUG:1408224581
6084 * Workaround: Gen12LP Astep only An additional pipe control with
6085 * post-sync = store dword operation would be required.( w/a is to
6086 * have an additional pipe control after the stencil state whenever
6087 * the surface state bits of this state is changing).
6089 iris_emit_pipe_control_write(batch
, "WA for stencil state",
6090 PIPE_CONTROL_WRITE_IMMEDIATE
,
6091 batch
->screen
->workaround_address
.bo
,
6092 batch
->screen
->workaround_address
.offset
, 0);
6095 union isl_color_value clear_value
= { .f32
= { 0, } };
6097 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
6098 if (cso_fb
->zsbuf
) {
6099 struct iris_resource
*zres
, *sres
;
6100 iris_get_depth_stencil_resources(cso_fb
->zsbuf
->texture
,
6102 if (zres
&& zres
->aux
.bo
)
6103 clear_value
= iris_resource_get_clear_color(zres
, NULL
, NULL
);
6106 uint32_t clear_params
[GENX(3DSTATE_CLEAR_PARAMS_length
)];
6107 iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS
), clear_params
, clear
) {
6108 clear
.DepthClearValueValid
= true;
6109 clear
.DepthClearValue
= clear_value
.f32
[0];
6111 iris_batch_emit(batch
, clear_params
, clear_length
);
6114 if (dirty
& (IRIS_DIRTY_DEPTH_BUFFER
| IRIS_DIRTY_WM_DEPTH_STENCIL
)) {
6115 /* Listen for buffer changes, and also write enable changes. */
6116 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
6117 pin_depth_and_stencil_buffers(batch
, cso_fb
->zsbuf
, ice
->state
.cso_zsa
);
6120 if (dirty
& IRIS_DIRTY_POLYGON_STIPPLE
) {
6121 iris_emit_cmd(batch
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
6122 for (int i
= 0; i
< 32; i
++) {
6123 poly
.PatternRow
[i
] = ice
->state
.poly_stipple
.stipple
[i
];
6128 if (dirty
& IRIS_DIRTY_LINE_STIPPLE
) {
6129 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
6130 iris_batch_emit(batch
, cso
->line_stipple
, sizeof(cso
->line_stipple
));
6133 if (dirty
& IRIS_DIRTY_VF_TOPOLOGY
) {
6134 iris_emit_cmd(batch
, GENX(3DSTATE_VF_TOPOLOGY
), topo
) {
6135 topo
.PrimitiveTopologyType
=
6136 translate_prim_type(draw
->mode
, draw
->vertices_per_patch
);
6140 if (dirty
& IRIS_DIRTY_VERTEX_BUFFERS
) {
6141 int count
= util_bitcount64(ice
->state
.bound_vertex_buffers
);
6142 uint64_t dynamic_bound
= ice
->state
.bound_vertex_buffers
;
6144 if (ice
->state
.vs_uses_draw_params
) {
6145 assert(ice
->draw
.draw_params
.res
);
6147 struct iris_vertex_buffer_state
*state
=
6148 &(ice
->state
.genx
->vertex_buffers
[count
]);
6149 pipe_resource_reference(&state
->resource
, ice
->draw
.draw_params
.res
);
6150 struct iris_resource
*res
= (void *) state
->resource
;
6152 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), state
->state
, vb
) {
6153 vb
.VertexBufferIndex
= count
;
6154 vb
.AddressModifyEnable
= true;
6156 vb
.BufferSize
= res
->bo
->size
- ice
->draw
.draw_params
.offset
;
6157 vb
.BufferStartingAddress
=
6158 ro_bo(NULL
, res
->bo
->gtt_offset
+
6159 (int) ice
->draw
.draw_params
.offset
);
6160 vb
.MOCS
= iris_mocs(res
->bo
, &batch
->screen
->isl_dev
);
6162 dynamic_bound
|= 1ull << count
;
6166 if (ice
->state
.vs_uses_derived_draw_params
) {
6167 struct iris_vertex_buffer_state
*state
=
6168 &(ice
->state
.genx
->vertex_buffers
[count
]);
6169 pipe_resource_reference(&state
->resource
,
6170 ice
->draw
.derived_draw_params
.res
);
6171 struct iris_resource
*res
= (void *) ice
->draw
.derived_draw_params
.res
;
6173 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), state
->state
, vb
) {
6174 vb
.VertexBufferIndex
= count
;
6175 vb
.AddressModifyEnable
= true;
6178 res
->bo
->size
- ice
->draw
.derived_draw_params
.offset
;
6179 vb
.BufferStartingAddress
=
6180 ro_bo(NULL
, res
->bo
->gtt_offset
+
6181 (int) ice
->draw
.derived_draw_params
.offset
);
6182 vb
.MOCS
= iris_mocs(res
->bo
, &batch
->screen
->isl_dev
);
6184 dynamic_bound
|= 1ull << count
;
6190 /* Gen11+ doesn't need the cache workaround below */
6191 uint64_t bound
= dynamic_bound
;
6193 const int i
= u_bit_scan64(&bound
);
6194 iris_use_optional_res(batch
, genx
->vertex_buffers
[i
].resource
,
6195 false, IRIS_DOMAIN_OTHER_READ
);
6198 /* The VF cache designers cut corners, and made the cache key's
6199 * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6200 * 32 bits of the address. If you have two vertex buffers which get
6201 * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6202 * you can get collisions (even within a single batch).
6204 * So, we need to do a VF cache invalidate if the buffer for a VB
6205 * slot slot changes [48:32] address bits from the previous time.
6207 unsigned flush_flags
= 0;
6209 uint64_t bound
= dynamic_bound
;
6211 const int i
= u_bit_scan64(&bound
);
6212 uint16_t high_bits
= 0;
6214 struct iris_resource
*res
=
6215 (void *) genx
->vertex_buffers
[i
].resource
;
6217 iris_use_pinned_bo(batch
, res
->bo
, false, IRIS_DOMAIN_OTHER_READ
);
6219 high_bits
= res
->bo
->gtt_offset
>> 32ull;
6220 if (high_bits
!= ice
->state
.last_vbo_high_bits
[i
]) {
6221 flush_flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
|
6222 PIPE_CONTROL_CS_STALL
;
6223 ice
->state
.last_vbo_high_bits
[i
] = high_bits
;
6229 iris_emit_pipe_control_flush(batch
,
6230 "workaround: VF cache 32-bit key [VB]",
6235 const unsigned vb_dwords
= GENX(VERTEX_BUFFER_STATE_length
);
6238 iris_get_command_space(batch
, 4 * (1 + vb_dwords
* count
));
6239 _iris_pack_command(batch
, GENX(3DSTATE_VERTEX_BUFFERS
), map
, vb
) {
6240 vb
.DWordLength
= (vb_dwords
* count
+ 1) - 2;
6244 bound
= dynamic_bound
;
6246 const int i
= u_bit_scan64(&bound
);
6247 memcpy(map
, genx
->vertex_buffers
[i
].state
,
6248 sizeof(uint32_t) * vb_dwords
);
6254 if (dirty
& IRIS_DIRTY_VERTEX_ELEMENTS
) {
6255 struct iris_vertex_element_state
*cso
= ice
->state
.cso_vertex_elements
;
6256 const unsigned entries
= MAX2(cso
->count
, 1);
6257 if (!(ice
->state
.vs_needs_sgvs_element
||
6258 ice
->state
.vs_uses_derived_draw_params
||
6259 ice
->state
.vs_needs_edge_flag
)) {
6260 iris_batch_emit(batch
, cso
->vertex_elements
, sizeof(uint32_t) *
6261 (1 + entries
* GENX(VERTEX_ELEMENT_STATE_length
)));
6263 uint32_t dynamic_ves
[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length
)];
6264 const unsigned dyn_count
= cso
->count
+
6265 ice
->state
.vs_needs_sgvs_element
+
6266 ice
->state
.vs_uses_derived_draw_params
;
6268 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS
),
6271 1 + GENX(VERTEX_ELEMENT_STATE_length
) * dyn_count
- 2;
6273 memcpy(&dynamic_ves
[1], &cso
->vertex_elements
[1],
6274 (cso
->count
- ice
->state
.vs_needs_edge_flag
) *
6275 GENX(VERTEX_ELEMENT_STATE_length
) * sizeof(uint32_t));
6276 uint32_t *ve_pack_dest
=
6277 &dynamic_ves
[1 + (cso
->count
- ice
->state
.vs_needs_edge_flag
) *
6278 GENX(VERTEX_ELEMENT_STATE_length
)];
6280 if (ice
->state
.vs_needs_sgvs_element
) {
6281 uint32_t base_ctrl
= ice
->state
.vs_uses_draw_params
?
6282 VFCOMP_STORE_SRC
: VFCOMP_STORE_0
;
6283 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
6285 ve
.VertexBufferIndex
=
6286 util_bitcount64(ice
->state
.bound_vertex_buffers
);
6287 ve
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
6288 ve
.Component0Control
= base_ctrl
;
6289 ve
.Component1Control
= base_ctrl
;
6290 ve
.Component2Control
= VFCOMP_STORE_0
;
6291 ve
.Component3Control
= VFCOMP_STORE_0
;
6293 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
6295 if (ice
->state
.vs_uses_derived_draw_params
) {
6296 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
6298 ve
.VertexBufferIndex
=
6299 util_bitcount64(ice
->state
.bound_vertex_buffers
) +
6300 ice
->state
.vs_uses_draw_params
;
6301 ve
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
6302 ve
.Component0Control
= VFCOMP_STORE_SRC
;
6303 ve
.Component1Control
= VFCOMP_STORE_SRC
;
6304 ve
.Component2Control
= VFCOMP_STORE_0
;
6305 ve
.Component3Control
= VFCOMP_STORE_0
;
6307 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
6309 if (ice
->state
.vs_needs_edge_flag
) {
6310 for (int i
= 0; i
< GENX(VERTEX_ELEMENT_STATE_length
); i
++)
6311 ve_pack_dest
[i
] = cso
->edgeflag_ve
[i
];
6314 iris_batch_emit(batch
, &dynamic_ves
, sizeof(uint32_t) *
6315 (1 + dyn_count
* GENX(VERTEX_ELEMENT_STATE_length
)));
6318 if (!ice
->state
.vs_needs_edge_flag
) {
6319 iris_batch_emit(batch
, cso
->vf_instancing
, sizeof(uint32_t) *
6320 entries
* GENX(3DSTATE_VF_INSTANCING_length
));
6322 assert(cso
->count
> 0);
6323 const unsigned edgeflag_index
= cso
->count
- 1;
6324 uint32_t dynamic_vfi
[33 * GENX(3DSTATE_VF_INSTANCING_length
)];
6325 memcpy(&dynamic_vfi
[0], cso
->vf_instancing
, edgeflag_index
*
6326 GENX(3DSTATE_VF_INSTANCING_length
) * sizeof(uint32_t));
6328 uint32_t *vfi_pack_dest
= &dynamic_vfi
[0] +
6329 edgeflag_index
* GENX(3DSTATE_VF_INSTANCING_length
);
6330 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
6331 vi
.VertexElementIndex
= edgeflag_index
+
6332 ice
->state
.vs_needs_sgvs_element
+
6333 ice
->state
.vs_uses_derived_draw_params
;
6335 for (int i
= 0; i
< GENX(3DSTATE_VF_INSTANCING_length
); i
++)
6336 vfi_pack_dest
[i
] |= cso
->edgeflag_vfi
[i
];
6338 iris_batch_emit(batch
, &dynamic_vfi
[0], sizeof(uint32_t) *
6339 entries
* GENX(3DSTATE_VF_INSTANCING_length
));
6343 if (dirty
& IRIS_DIRTY_VF_SGVS
) {
6344 const struct brw_vs_prog_data
*vs_prog_data
= (void *)
6345 ice
->shaders
.prog
[MESA_SHADER_VERTEX
]->prog_data
;
6346 struct iris_vertex_element_state
*cso
= ice
->state
.cso_vertex_elements
;
6348 iris_emit_cmd(batch
, GENX(3DSTATE_VF_SGVS
), sgv
) {
6349 if (vs_prog_data
->uses_vertexid
) {
6350 sgv
.VertexIDEnable
= true;
6351 sgv
.VertexIDComponentNumber
= 2;
6352 sgv
.VertexIDElementOffset
=
6353 cso
->count
- ice
->state
.vs_needs_edge_flag
;
6356 if (vs_prog_data
->uses_instanceid
) {
6357 sgv
.InstanceIDEnable
= true;
6358 sgv
.InstanceIDComponentNumber
= 3;
6359 sgv
.InstanceIDElementOffset
=
6360 cso
->count
- ice
->state
.vs_needs_edge_flag
;
6365 if (dirty
& IRIS_DIRTY_VF
) {
6366 iris_emit_cmd(batch
, GENX(3DSTATE_VF
), vf
) {
6367 if (draw
->primitive_restart
) {
6368 vf
.IndexedDrawCutIndexEnable
= true;
6369 vf
.CutIndex
= draw
->restart_index
;
6374 if (dirty
& IRIS_DIRTY_VF_STATISTICS
) {
6375 iris_emit_cmd(batch
, GENX(3DSTATE_VF_STATISTICS
), vf
) {
6376 vf
.StatisticsEnable
= true;
6381 if (dirty
& IRIS_DIRTY_PMA_FIX
) {
6382 bool enable
= want_pma_fix(ice
);
6383 genX(update_pma_fix
)(ice
, batch
, enable
);
6387 if (ice
->state
.current_hash_scale
!= 1)
6388 genX(emit_hashing_mode
)(ice
, batch
, UINT_MAX
, UINT_MAX
, 1);
6391 genX(invalidate_aux_map_state
)(batch
);
6396 iris_upload_render_state(struct iris_context
*ice
,
6397 struct iris_batch
*batch
,
6398 const struct pipe_draw_info
*draw
)
6400 bool use_predicate
= ice
->state
.predicate
== IRIS_PREDICATE_STATE_USE_BIT
;
6402 iris_batch_sync_region_start(batch
);
6404 /* Always pin the binder. If we're emitting new binding table pointers,
6405 * we need it. If not, we're probably inheriting old tables via the
6406 * context, and need it anyway. Since true zero-bindings cases are
6407 * practically non-existent, just pin it and avoid last_res tracking.
6409 iris_use_pinned_bo(batch
, ice
->state
.binder
.bo
, false,
6412 if (!batch
->contains_draw_with_next_seqno
) {
6413 iris_restore_render_saved_bos(ice
, batch
, draw
);
6414 batch
->contains_draw_with_next_seqno
= batch
->contains_draw
= true;
6417 iris_upload_dirty_render_state(ice
, batch
, draw
);
6419 if (draw
->index_size
> 0) {
6422 if (draw
->has_user_indices
) {
6423 u_upload_data(ice
->ctx
.stream_uploader
, 0,
6424 draw
->count
* draw
->index_size
, 4, draw
->index
.user
,
6425 &offset
, &ice
->state
.last_res
.index_buffer
);
6427 struct iris_resource
*res
= (void *) draw
->index
.resource
;
6428 res
->bind_history
|= PIPE_BIND_INDEX_BUFFER
;
6430 pipe_resource_reference(&ice
->state
.last_res
.index_buffer
,
6431 draw
->index
.resource
);
6435 struct iris_genx_state
*genx
= ice
->state
.genx
;
6436 struct iris_bo
*bo
= iris_resource_bo(ice
->state
.last_res
.index_buffer
);
6438 uint32_t ib_packet
[GENX(3DSTATE_INDEX_BUFFER_length
)];
6439 iris_pack_command(GENX(3DSTATE_INDEX_BUFFER
), ib_packet
, ib
) {
6440 ib
.IndexFormat
= draw
->index_size
>> 1;
6441 ib
.MOCS
= iris_mocs(bo
, &batch
->screen
->isl_dev
);
6442 ib
.BufferSize
= bo
->size
- offset
;
6443 ib
.BufferStartingAddress
= ro_bo(NULL
, bo
->gtt_offset
+ offset
);
6446 if (memcmp(genx
->last_index_buffer
, ib_packet
, sizeof(ib_packet
)) != 0) {
6447 memcpy(genx
->last_index_buffer
, ib_packet
, sizeof(ib_packet
));
6448 iris_batch_emit(batch
, ib_packet
, sizeof(ib_packet
));
6449 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_OTHER_READ
);
6453 /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6454 uint16_t high_bits
= bo
->gtt_offset
>> 32ull;
6455 if (high_bits
!= ice
->state
.last_index_bo_high_bits
) {
6456 iris_emit_pipe_control_flush(batch
,
6457 "workaround: VF cache 32-bit key [IB]",
6458 PIPE_CONTROL_VF_CACHE_INVALIDATE
|
6459 PIPE_CONTROL_CS_STALL
);
6460 ice
->state
.last_index_bo_high_bits
= high_bits
;
6465 #define _3DPRIM_END_OFFSET 0x2420
6466 #define _3DPRIM_START_VERTEX 0x2430
6467 #define _3DPRIM_VERTEX_COUNT 0x2434
6468 #define _3DPRIM_INSTANCE_COUNT 0x2438
6469 #define _3DPRIM_START_INSTANCE 0x243C
6470 #define _3DPRIM_BASE_VERTEX 0x2440
6472 if (draw
->indirect
) {
6473 if (draw
->indirect
->indirect_draw_count
) {
6474 use_predicate
= true;
6476 struct iris_bo
*draw_count_bo
=
6477 iris_resource_bo(draw
->indirect
->indirect_draw_count
);
6478 unsigned draw_count_offset
=
6479 draw
->indirect
->indirect_draw_count_offset
;
6481 iris_emit_pipe_control_flush(batch
,
6482 "ensure indirect draw buffer is flushed",
6483 PIPE_CONTROL_FLUSH_ENABLE
);
6485 if (ice
->state
.predicate
== IRIS_PREDICATE_STATE_USE_BIT
) {
6486 struct gen_mi_builder b
;
6487 gen_mi_builder_init(&b
, batch
);
6489 /* comparison = draw id < draw count */
6490 struct gen_mi_value comparison
=
6491 gen_mi_ult(&b
, gen_mi_imm(draw
->drawid
),
6492 gen_mi_mem32(ro_bo(draw_count_bo
,
6493 draw_count_offset
)));
6495 /* predicate = comparison & conditional rendering predicate */
6496 gen_mi_store(&b
, gen_mi_reg32(MI_PREDICATE_RESULT
),
6497 gen_mi_iand(&b
, comparison
,
6498 gen_mi_reg32(CS_GPR(15))));
6500 uint32_t mi_predicate
;
6502 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6503 iris_load_register_imm64(batch
, MI_PREDICATE_SRC1
, draw
->drawid
);
6504 /* Upload the current draw count from the draw parameters buffer
6505 * to MI_PREDICATE_SRC0.
6507 iris_load_register_mem32(batch
, MI_PREDICATE_SRC0
,
6508 draw_count_bo
, draw_count_offset
);
6509 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6510 iris_load_register_imm32(batch
, MI_PREDICATE_SRC0
+ 4, 0);
6512 if (draw
->drawid
== 0) {
6513 mi_predicate
= MI_PREDICATE
| MI_PREDICATE_LOADOP_LOADINV
|
6514 MI_PREDICATE_COMBINEOP_SET
|
6515 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
;
6517 /* While draw_index < draw_count the predicate's result will be
6518 * (draw_index == draw_count) ^ TRUE = TRUE
6519 * When draw_index == draw_count the result is
6520 * (TRUE) ^ TRUE = FALSE
6521 * After this all results will be:
6522 * (FALSE) ^ FALSE = FALSE
6524 mi_predicate
= MI_PREDICATE
| MI_PREDICATE_LOADOP_LOAD
|
6525 MI_PREDICATE_COMBINEOP_XOR
|
6526 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
;
6528 iris_batch_emit(batch
, &mi_predicate
, sizeof(uint32_t));
6531 struct iris_bo
*bo
= iris_resource_bo(draw
->indirect
->buffer
);
6534 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6535 lrm
.RegisterAddress
= _3DPRIM_VERTEX_COUNT
;
6536 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 0);
6538 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6539 lrm
.RegisterAddress
= _3DPRIM_INSTANCE_COUNT
;
6540 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 4);
6542 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6543 lrm
.RegisterAddress
= _3DPRIM_START_VERTEX
;
6544 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 8);
6546 if (draw
->index_size
) {
6547 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6548 lrm
.RegisterAddress
= _3DPRIM_BASE_VERTEX
;
6549 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 12);
6551 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6552 lrm
.RegisterAddress
= _3DPRIM_START_INSTANCE
;
6553 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 16);
6556 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6557 lrm
.RegisterAddress
= _3DPRIM_START_INSTANCE
;
6558 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 12);
6560 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
6561 lri
.RegisterOffset
= _3DPRIM_BASE_VERTEX
;
6565 } else if (draw
->count_from_stream_output
) {
6566 struct iris_stream_output_target
*so
=
6567 (void *) draw
->count_from_stream_output
;
6569 /* XXX: Replace with actual cache tracking */
6570 iris_emit_pipe_control_flush(batch
,
6571 "draw count from stream output stall",
6572 PIPE_CONTROL_CS_STALL
);
6574 struct gen_mi_builder b
;
6575 gen_mi_builder_init(&b
, batch
);
6577 struct iris_address addr
=
6578 ro_bo(iris_resource_bo(so
->offset
.res
), so
->offset
.offset
);
6579 struct gen_mi_value offset
=
6580 gen_mi_iadd_imm(&b
, gen_mi_mem32(addr
), -so
->base
.buffer_offset
);
6582 gen_mi_store(&b
, gen_mi_reg32(_3DPRIM_VERTEX_COUNT
),
6583 gen_mi_udiv32_imm(&b
, offset
, so
->stride
));
6585 _iris_emit_lri(batch
, _3DPRIM_START_VERTEX
, 0);
6586 _iris_emit_lri(batch
, _3DPRIM_BASE_VERTEX
, 0);
6587 _iris_emit_lri(batch
, _3DPRIM_START_INSTANCE
, 0);
6588 _iris_emit_lri(batch
, _3DPRIM_INSTANCE_COUNT
, draw
->instance_count
);
6591 iris_emit_cmd(batch
, GENX(3DPRIMITIVE
), prim
) {
6592 prim
.VertexAccessType
= draw
->index_size
> 0 ? RANDOM
: SEQUENTIAL
;
6593 prim
.PredicateEnable
= use_predicate
;
6595 if (draw
->indirect
|| draw
->count_from_stream_output
) {
6596 prim
.IndirectParameterEnable
= true;
6598 prim
.StartInstanceLocation
= draw
->start_instance
;
6599 prim
.InstanceCount
= draw
->instance_count
;
6600 prim
.VertexCountPerInstance
= draw
->count
;
6602 prim
.StartVertexLocation
= draw
->start
;
6604 if (draw
->index_size
) {
6605 prim
.BaseVertexLocation
+= draw
->index_bias
;
6607 prim
.StartVertexLocation
+= draw
->index_bias
;
6612 iris_batch_sync_region_end(batch
);
6616 iris_load_indirect_location(struct iris_context
*ice
,
6617 struct iris_batch
*batch
,
6618 const struct pipe_grid_info
*grid
)
6620 #define GPGPU_DISPATCHDIMX 0x2500
6621 #define GPGPU_DISPATCHDIMY 0x2504
6622 #define GPGPU_DISPATCHDIMZ 0x2508
6624 assert(grid
->indirect
);
6626 struct iris_state_ref
*grid_size
= &ice
->state
.grid_size
;
6627 struct iris_bo
*bo
= iris_resource_bo(grid_size
->res
);
6628 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6629 lrm
.RegisterAddress
= GPGPU_DISPATCHDIMX
;
6630 lrm
.MemoryAddress
= ro_bo(bo
, grid_size
->offset
+ 0);
6632 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6633 lrm
.RegisterAddress
= GPGPU_DISPATCHDIMY
;
6634 lrm
.MemoryAddress
= ro_bo(bo
, grid_size
->offset
+ 4);
6636 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6637 lrm
.RegisterAddress
= GPGPU_DISPATCHDIMZ
;
6638 lrm
.MemoryAddress
= ro_bo(bo
, grid_size
->offset
+ 8);
6643 iris_upload_gpgpu_walker(struct iris_context
*ice
,
6644 struct iris_batch
*batch
,
6645 const struct pipe_grid_info
*grid
)
6647 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
6648 struct iris_screen
*screen
= batch
->screen
;
6649 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
6650 struct iris_binder
*binder
= &ice
->state
.binder
;
6651 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_COMPUTE
];
6652 struct iris_compiled_shader
*shader
=
6653 ice
->shaders
.prog
[MESA_SHADER_COMPUTE
];
6654 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
6655 struct brw_cs_prog_data
*cs_prog_data
= (void *) prog_data
;
6656 const uint32_t group_size
= grid
->block
[0] * grid
->block
[1] * grid
->block
[2];
6657 const unsigned simd_size
=
6658 brw_cs_simd_size_for_group_size(devinfo
, cs_prog_data
, group_size
);
6659 const unsigned threads
= DIV_ROUND_UP(group_size
, simd_size
);
6662 if (stage_dirty
& IRIS_STAGE_DIRTY_CS
) {
6663 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
6665 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6666 * the only bits that are changed are scoreboard related: Scoreboard
6667 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
6668 * these scoreboard related states, a MEDIA_STATE_FLUSH is
6671 iris_emit_pipe_control_flush(batch
,
6672 "workaround: stall before MEDIA_VFE_STATE",
6673 PIPE_CONTROL_CS_STALL
);
6675 iris_emit_cmd(batch
, GENX(MEDIA_VFE_STATE
), vfe
) {
6676 if (prog_data
->total_scratch
) {
6677 struct iris_bo
*bo
=
6678 iris_get_scratch_space(ice
, prog_data
->total_scratch
,
6679 MESA_SHADER_COMPUTE
);
6680 vfe
.PerThreadScratchSpace
= ffs(prog_data
->total_scratch
) - 11;
6681 vfe
.ScratchSpaceBasePointer
= rw_bo(bo
, 0, IRIS_DOMAIN_NONE
);
6684 vfe
.MaximumNumberofThreads
=
6685 devinfo
->max_cs_threads
* screen
->subslice_total
- 1;
6687 vfe
.ResetGatewayTimer
=
6688 Resettingrelativetimerandlatchingtheglobaltimestamp
;
6691 vfe
.BypassGatewayControl
= true;
6693 vfe
.NumberofURBEntries
= 2;
6694 vfe
.URBEntryAllocationSize
= 2;
6696 vfe
.CURBEAllocationSize
=
6697 ALIGN(cs_prog_data
->push
.per_thread
.regs
* threads
+
6698 cs_prog_data
->push
.cross_thread
.regs
, 2);
6702 /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6703 if (stage_dirty
& IRIS_STAGE_DIRTY_CS
) {
6704 uint32_t curbe_data_offset
= 0;
6705 assert(cs_prog_data
->push
.cross_thread
.dwords
== 0 &&
6706 cs_prog_data
->push
.per_thread
.dwords
== 1 &&
6707 cs_prog_data
->base
.param
[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID
);
6708 const unsigned push_const_size
=
6709 brw_cs_push_const_total_size(cs_prog_data
, threads
);
6710 uint32_t *curbe_data_map
=
6711 stream_state(batch
, ice
->state
.dynamic_uploader
,
6712 &ice
->state
.last_res
.cs_thread_ids
,
6713 ALIGN(push_const_size
, 64), 64,
6714 &curbe_data_offset
);
6715 assert(curbe_data_map
);
6716 memset(curbe_data_map
, 0x5a, ALIGN(push_const_size
, 64));
6717 iris_fill_cs_push_const_buffer(cs_prog_data
, threads
, curbe_data_map
);
6719 iris_emit_cmd(batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
6720 curbe
.CURBETotalDataLength
= ALIGN(push_const_size
, 64);
6721 curbe
.CURBEDataStartAddress
= curbe_data_offset
;
6725 for (unsigned i
= 0; i
< IRIS_MAX_GLOBAL_BINDINGS
; i
++) {
6726 struct pipe_resource
*res
= ice
->state
.global_bindings
[i
];
6730 iris_use_pinned_bo(batch
, iris_resource_bo(res
),
6731 true, IRIS_DOMAIN_NONE
);
6734 if (stage_dirty
& (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS
|
6735 IRIS_STAGE_DIRTY_BINDINGS_CS
|
6736 IRIS_STAGE_DIRTY_CONSTANTS_CS
|
6737 IRIS_STAGE_DIRTY_CS
)) {
6738 uint32_t desc
[GENX(INTERFACE_DESCRIPTOR_DATA_length
)];
6740 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA
), desc
, idd
) {
6741 idd
.KernelStartPointer
=
6742 KSP(shader
) + brw_cs_prog_data_prog_offset(cs_prog_data
, simd_size
);
6743 idd
.SamplerStatePointer
= shs
->sampler_table
.offset
;
6744 idd
.BindingTablePointer
= binder
->bt_offset
[MESA_SHADER_COMPUTE
];
6745 idd
.NumberofThreadsinGPGPUThreadGroup
= threads
;
6748 for (int i
= 0; i
< GENX(INTERFACE_DESCRIPTOR_DATA_length
); i
++)
6749 desc
[i
] |= ((uint32_t *) shader
->derived_data
)[i
];
6751 iris_emit_cmd(batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
6752 load
.InterfaceDescriptorTotalLength
=
6753 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
6754 load
.InterfaceDescriptorDataStartAddress
=
6755 emit_state(batch
, ice
->state
.dynamic_uploader
,
6756 &ice
->state
.last_res
.cs_desc
, desc
, sizeof(desc
), 64);
6761 iris_load_indirect_location(ice
, batch
, grid
);
6763 const uint32_t right_mask
= brw_cs_right_mask(group_size
, simd_size
);
6765 iris_emit_cmd(batch
, GENX(GPGPU_WALKER
), ggw
) {
6766 ggw
.IndirectParameterEnable
= grid
->indirect
!= NULL
;
6767 ggw
.SIMDSize
= simd_size
/ 16;
6768 ggw
.ThreadDepthCounterMaximum
= 0;
6769 ggw
.ThreadHeightCounterMaximum
= 0;
6770 ggw
.ThreadWidthCounterMaximum
= threads
- 1;
6771 ggw
.ThreadGroupIDXDimension
= grid
->grid
[0];
6772 ggw
.ThreadGroupIDYDimension
= grid
->grid
[1];
6773 ggw
.ThreadGroupIDZDimension
= grid
->grid
[2];
6774 ggw
.RightExecutionMask
= right_mask
;
6775 ggw
.BottomExecutionMask
= 0xffffffff;
6778 iris_emit_cmd(batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
6782 iris_upload_compute_state(struct iris_context
*ice
,
6783 struct iris_batch
*batch
,
6784 const struct pipe_grid_info
*grid
)
6786 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
6787 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_COMPUTE
];
6788 struct iris_compiled_shader
*shader
=
6789 ice
->shaders
.prog
[MESA_SHADER_COMPUTE
];
6791 iris_batch_sync_region_start(batch
);
6793 /* Always pin the binder. If we're emitting new binding table pointers,
6794 * we need it. If not, we're probably inheriting old tables via the
6795 * context, and need it anyway. Since true zero-bindings cases are
6796 * practically non-existent, just pin it and avoid last_res tracking.
6798 iris_use_pinned_bo(batch
, ice
->state
.binder
.bo
, false, IRIS_DOMAIN_NONE
);
6800 if ((stage_dirty
& IRIS_STAGE_DIRTY_CONSTANTS_CS
) &&
6801 shs
->sysvals_need_upload
)
6802 upload_sysvals(ice
, MESA_SHADER_COMPUTE
, grid
);
6804 if (stage_dirty
& IRIS_STAGE_DIRTY_BINDINGS_CS
)
6805 iris_populate_binding_table(ice
, batch
, MESA_SHADER_COMPUTE
, false);
6807 if (stage_dirty
& IRIS_STAGE_DIRTY_SAMPLER_STATES_CS
)
6808 iris_upload_sampler_states(ice
, MESA_SHADER_COMPUTE
);
6810 iris_use_optional_res(batch
, shs
->sampler_table
.res
, false,
6812 iris_use_pinned_bo(batch
, iris_resource_bo(shader
->assembly
.res
), false,
6815 if (ice
->state
.need_border_colors
)
6816 iris_use_pinned_bo(batch
, ice
->state
.border_color_pool
.bo
, false,
6820 genX(invalidate_aux_map_state
)(batch
);
6823 iris_upload_gpgpu_walker(ice
, batch
, grid
);
6825 if (!batch
->contains_draw_with_next_seqno
) {
6826 iris_restore_compute_saved_bos(ice
, batch
, grid
);
6827 batch
->contains_draw_with_next_seqno
= batch
->contains_draw
= true;
6830 iris_batch_sync_region_end(batch
);
6834 * State module teardown.
6837 iris_destroy_state(struct iris_context
*ice
)
6839 struct iris_genx_state
*genx
= ice
->state
.genx
;
6841 pipe_resource_reference(&ice
->draw
.draw_params
.res
, NULL
);
6842 pipe_resource_reference(&ice
->draw
.derived_draw_params
.res
, NULL
);
6844 /* Loop over all VBOs, including ones for draw parameters */
6845 for (unsigned i
= 0; i
< ARRAY_SIZE(genx
->vertex_buffers
); i
++) {
6846 pipe_resource_reference(&genx
->vertex_buffers
[i
].resource
, NULL
);
6849 free(ice
->state
.genx
);
6851 for (int i
= 0; i
< 4; i
++) {
6852 pipe_so_target_reference(&ice
->state
.so_target
[i
], NULL
);
6855 for (unsigned i
= 0; i
< ice
->state
.framebuffer
.nr_cbufs
; i
++) {
6856 pipe_surface_reference(&ice
->state
.framebuffer
.cbufs
[i
], NULL
);
6858 pipe_surface_reference(&ice
->state
.framebuffer
.zsbuf
, NULL
);
6860 for (int stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
6861 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
6862 pipe_resource_reference(&shs
->sampler_table
.res
, NULL
);
6863 for (int i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++) {
6864 pipe_resource_reference(&shs
->constbuf
[i
].buffer
, NULL
);
6865 pipe_resource_reference(&shs
->constbuf_surf_state
[i
].res
, NULL
);
6867 for (int i
= 0; i
< PIPE_MAX_SHADER_IMAGES
; i
++) {
6868 pipe_resource_reference(&shs
->image
[i
].base
.resource
, NULL
);
6869 pipe_resource_reference(&shs
->image
[i
].surface_state
.ref
.res
, NULL
);
6870 free(shs
->image
[i
].surface_state
.cpu
);
6872 for (int i
= 0; i
< PIPE_MAX_SHADER_BUFFERS
; i
++) {
6873 pipe_resource_reference(&shs
->ssbo
[i
].buffer
, NULL
);
6874 pipe_resource_reference(&shs
->ssbo_surf_state
[i
].res
, NULL
);
6876 for (int i
= 0; i
< IRIS_MAX_TEXTURE_SAMPLERS
; i
++) {
6877 pipe_sampler_view_reference((struct pipe_sampler_view
**)
6878 &shs
->textures
[i
], NULL
);
6882 pipe_resource_reference(&ice
->state
.grid_size
.res
, NULL
);
6883 pipe_resource_reference(&ice
->state
.grid_surf_state
.res
, NULL
);
6885 pipe_resource_reference(&ice
->state
.null_fb
.res
, NULL
);
6886 pipe_resource_reference(&ice
->state
.unbound_tex
.res
, NULL
);
6888 pipe_resource_reference(&ice
->state
.last_res
.cc_vp
, NULL
);
6889 pipe_resource_reference(&ice
->state
.last_res
.sf_cl_vp
, NULL
);
6890 pipe_resource_reference(&ice
->state
.last_res
.color_calc
, NULL
);
6891 pipe_resource_reference(&ice
->state
.last_res
.scissor
, NULL
);
6892 pipe_resource_reference(&ice
->state
.last_res
.blend
, NULL
);
6893 pipe_resource_reference(&ice
->state
.last_res
.index_buffer
, NULL
);
6894 pipe_resource_reference(&ice
->state
.last_res
.cs_thread_ids
, NULL
);
6895 pipe_resource_reference(&ice
->state
.last_res
.cs_desc
, NULL
);
6898 /* ------------------------------------------------------------------- */
6901 iris_rebind_buffer(struct iris_context
*ice
,
6902 struct iris_resource
*res
)
6904 struct pipe_context
*ctx
= &ice
->ctx
;
6905 struct iris_genx_state
*genx
= ice
->state
.genx
;
6907 assert(res
->base
.target
== PIPE_BUFFER
);
6909 /* Buffers can't be framebuffer attachments, nor display related,
6910 * and we don't have upstream Clover support.
6912 assert(!(res
->bind_history
& (PIPE_BIND_DEPTH_STENCIL
|
6913 PIPE_BIND_RENDER_TARGET
|
6914 PIPE_BIND_BLENDABLE
|
6915 PIPE_BIND_DISPLAY_TARGET
|
6917 PIPE_BIND_COMPUTE_RESOURCE
|
6918 PIPE_BIND_GLOBAL
)));
6920 if (res
->bind_history
& PIPE_BIND_VERTEX_BUFFER
) {
6921 uint64_t bound_vbs
= ice
->state
.bound_vertex_buffers
;
6923 const int i
= u_bit_scan64(&bound_vbs
);
6924 struct iris_vertex_buffer_state
*state
= &genx
->vertex_buffers
[i
];
6926 /* Update the CPU struct */
6927 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start
) == 32);
6928 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits
) == 64);
6929 uint64_t *addr
= (uint64_t *) &state
->state
[1];
6930 struct iris_bo
*bo
= iris_resource_bo(state
->resource
);
6932 if (*addr
!= bo
->gtt_offset
+ state
->offset
) {
6933 *addr
= bo
->gtt_offset
+ state
->offset
;
6934 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_BUFFERS
;
6939 /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
6940 * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
6942 * There is also no need to handle these:
6943 * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
6944 * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
6947 if (res
->bind_history
& PIPE_BIND_STREAM_OUTPUT
) {
6948 /* XXX: be careful about resetting vs appending... */
6952 for (int s
= MESA_SHADER_VERTEX
; s
< MESA_SHADER_STAGES
; s
++) {
6953 struct iris_shader_state
*shs
= &ice
->state
.shaders
[s
];
6954 enum pipe_shader_type p_stage
= stage_to_pipe(s
);
6956 if (!(res
->bind_stages
& (1 << s
)))
6959 if (res
->bind_history
& PIPE_BIND_CONSTANT_BUFFER
) {
6960 /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
6961 uint32_t bound_cbufs
= shs
->bound_cbufs
& ~1u;
6962 while (bound_cbufs
) {
6963 const int i
= u_bit_scan(&bound_cbufs
);
6964 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[i
];
6965 struct iris_state_ref
*surf_state
= &shs
->constbuf_surf_state
[i
];
6967 if (res
->bo
== iris_resource_bo(cbuf
->buffer
)) {
6968 pipe_resource_reference(&surf_state
->res
, NULL
);
6969 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
<< s
;
6974 if (res
->bind_history
& PIPE_BIND_SHADER_BUFFER
) {
6975 uint32_t bound_ssbos
= shs
->bound_ssbos
;
6976 while (bound_ssbos
) {
6977 const int i
= u_bit_scan(&bound_ssbos
);
6978 struct pipe_shader_buffer
*ssbo
= &shs
->ssbo
[i
];
6980 if (res
->bo
== iris_resource_bo(ssbo
->buffer
)) {
6981 struct pipe_shader_buffer buf
= {
6982 .buffer
= &res
->base
,
6983 .buffer_offset
= ssbo
->buffer_offset
,
6984 .buffer_size
= ssbo
->buffer_size
,
6986 iris_set_shader_buffers(ctx
, p_stage
, i
, 1, &buf
,
6987 (shs
->writable_ssbos
>> i
) & 1);
6992 if (res
->bind_history
& PIPE_BIND_SAMPLER_VIEW
) {
6993 uint32_t bound_sampler_views
= shs
->bound_sampler_views
;
6994 while (bound_sampler_views
) {
6995 const int i
= u_bit_scan(&bound_sampler_views
);
6996 struct iris_sampler_view
*isv
= shs
->textures
[i
];
6997 struct iris_bo
*bo
= isv
->res
->bo
;
6999 if (update_surface_state_addrs(ice
->state
.surface_uploader
,
7000 &isv
->surface_state
, bo
)) {
7001 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< s
;
7006 if (res
->bind_history
& PIPE_BIND_SHADER_IMAGE
) {
7007 uint32_t bound_image_views
= shs
->bound_image_views
;
7008 while (bound_image_views
) {
7009 const int i
= u_bit_scan(&bound_image_views
);
7010 struct iris_image_view
*iv
= &shs
->image
[i
];
7011 struct iris_bo
*bo
= iris_resource_bo(iv
->base
.resource
);
7013 if (update_surface_state_addrs(ice
->state
.surface_uploader
,
7014 &iv
->surface_state
, bo
)) {
7015 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< s
;
7022 /* ------------------------------------------------------------------- */
7025 * Introduce a batch synchronization boundary, and update its cache coherency
7026 * status to reflect the execution of a PIPE_CONTROL command with the
7030 batch_mark_sync_for_pipe_control(struct iris_batch
*batch
, uint32_t flags
)
7032 iris_batch_sync_boundary(batch
);
7034 if ((flags
& PIPE_CONTROL_CS_STALL
)) {
7035 if ((flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
))
7036 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_RENDER_WRITE
);
7038 if ((flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
))
7039 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_DEPTH_WRITE
);
7041 if ((flags
& PIPE_CONTROL_FLUSH_ENABLE
))
7042 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_OTHER_WRITE
);
7044 if ((flags
& (PIPE_CONTROL_CACHE_FLUSH_BITS
|
7045 PIPE_CONTROL_STALL_AT_SCOREBOARD
)))
7046 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_OTHER_READ
);
7049 if ((flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
))
7050 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_RENDER_WRITE
);
7052 if ((flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
))
7053 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_DEPTH_WRITE
);
7055 if ((flags
& PIPE_CONTROL_FLUSH_ENABLE
))
7056 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_OTHER_WRITE
);
7058 if ((flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
) &&
7059 (flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
))
7060 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_OTHER_READ
);
7064 flags_to_post_sync_op(uint32_t flags
)
7066 if (flags
& PIPE_CONTROL_WRITE_IMMEDIATE
)
7067 return WriteImmediateData
;
7069 if (flags
& PIPE_CONTROL_WRITE_DEPTH_COUNT
)
7070 return WritePSDepthCount
;
7072 if (flags
& PIPE_CONTROL_WRITE_TIMESTAMP
)
7073 return WriteTimestamp
;
7079 * Do the given flags have a Post Sync or LRI Post Sync operation?
7081 static enum pipe_control_flags
7082 get_post_sync_flags(enum pipe_control_flags flags
)
7084 flags
&= PIPE_CONTROL_WRITE_IMMEDIATE
|
7085 PIPE_CONTROL_WRITE_DEPTH_COUNT
|
7086 PIPE_CONTROL_WRITE_TIMESTAMP
|
7087 PIPE_CONTROL_LRI_POST_SYNC_OP
;
7089 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7090 * "LRI Post Sync Operation". So more than one bit set would be illegal.
7092 assert(util_bitcount(flags
) <= 1);
7097 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7100 * Emit a series of PIPE_CONTROL commands, taking into account any
7101 * workarounds necessary to actually accomplish the caller's request.
7103 * Unless otherwise noted, spec quotations in this function come from:
7105 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7106 * Restrictions for PIPE_CONTROL.
7108 * You should not use this function directly. Use the helpers in
7109 * iris_pipe_control.c instead, which may split the pipe control further.
7112 iris_emit_raw_pipe_control(struct iris_batch
*batch
,
7119 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
7120 enum pipe_control_flags post_sync_flags
= get_post_sync_flags(flags
);
7121 enum pipe_control_flags non_lri_post_sync_flags
=
7122 post_sync_flags
& ~PIPE_CONTROL_LRI_POST_SYNC_OP
;
7124 /* Recursive PIPE_CONTROL workarounds --------------------------------
7125 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7127 * We do these first because we want to look at the original operation,
7128 * rather than any workarounds we set.
7130 if (GEN_GEN
== 9 && (flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
)) {
7131 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7132 * lists several workarounds:
7134 * "Project: SKL, KBL, BXT
7136 * If the VF Cache Invalidation Enable is set to a 1 in a
7137 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7138 * sets to 0, with the VF Cache Invalidation Enable set to 0
7139 * needs to be sent prior to the PIPE_CONTROL with VF Cache
7140 * Invalidation Enable set to a 1."
7142 iris_emit_raw_pipe_control(batch
,
7143 "workaround: recursive VF cache invalidate",
7147 /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
7148 * invalidates the instruction cache
7150 if (GEN_GEN
== 12 && (flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
)) {
7151 iris_emit_raw_pipe_control(batch
,
7152 "workaround: CS stall before instruction "
7154 PIPE_CONTROL_CS_STALL
|
7155 PIPE_CONTROL_STALL_AT_SCOREBOARD
, bo
, offset
,
7159 if ((GEN_GEN
== 9 || (GEN_GEN
== 12 && devinfo
->revision
== 0 /* A0*/)) &&
7160 IS_COMPUTE_PIPELINE(batch
) && post_sync_flags
) {
7161 /* Project: SKL / Argument: LRI Post Sync Operation [23]
7163 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7164 * programmed prior to programming a PIPECONTROL command with "LRI
7165 * Post Sync Operation" in GPGPU mode of operation (i.e when
7166 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
7168 * The same text exists a few rows below for Post Sync Op.
7170 * On Gen12 this is GEN:BUG:1607156449.
7172 iris_emit_raw_pipe_control(batch
,
7173 "workaround: CS stall before gpgpu post-sync",
7174 PIPE_CONTROL_CS_STALL
, bo
, offset
, imm
);
7177 /* "Flush Types" workarounds ---------------------------------------------
7178 * We do these now because they may add post-sync operations or CS stalls.
7181 if (GEN_GEN
< 11 && flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
) {
7182 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7184 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7185 * 'Write PS Depth Count' or 'Write Timestamp'."
7188 flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
7189 post_sync_flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
7190 non_lri_post_sync_flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
7191 bo
= batch
->screen
->workaround_address
.bo
;
7192 offset
= batch
->screen
->workaround_address
.offset
;
7196 if (flags
& PIPE_CONTROL_DEPTH_STALL
) {
7197 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7199 * "This bit must be DISABLED for operations other than writing
7202 * This seems like nonsense. An Ivybridge workaround requires us to
7203 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7204 * operation. Gen8+ requires us to emit depth stalls and depth cache
7205 * flushes together. So, it's hard to imagine this means anything other
7206 * than "we originally intended this to be used for PS_DEPTH_COUNT".
7208 * We ignore the supposed restriction and do nothing.
7212 if (flags
& (PIPE_CONTROL_RENDER_TARGET_FLUSH
|
7213 PIPE_CONTROL_STALL_AT_SCOREBOARD
)) {
7214 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7216 * "This bit must be DISABLED for End-of-pipe (Read) fences,
7217 * PS_DEPTH_COUNT or TIMESTAMP queries."
7219 * TODO: Implement end-of-pipe checking.
7221 assert(!(post_sync_flags
& (PIPE_CONTROL_WRITE_DEPTH_COUNT
|
7222 PIPE_CONTROL_WRITE_TIMESTAMP
)));
7225 if (GEN_GEN
< 11 && (flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
)) {
7226 /* From the PIPE_CONTROL instruction table, bit 1:
7228 * "This bit is ignored if Depth Stall Enable is set.
7229 * Further, the render cache is not flushed even if Write Cache
7230 * Flush Enable bit is set."
7232 * We assert that the caller doesn't do this combination, to try and
7233 * prevent mistakes. It shouldn't hurt the GPU, though.
7235 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
7236 * and "Render Target Flush" combo is explicitly required for BTI
7237 * update workarounds.
7239 assert(!(flags
& (PIPE_CONTROL_DEPTH_STALL
|
7240 PIPE_CONTROL_RENDER_TARGET_FLUSH
)));
7243 /* PIPE_CONTROL page workarounds ------------------------------------- */
7245 if (GEN_GEN
<= 8 && (flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
)) {
7246 /* From the PIPE_CONTROL page itself:
7249 * Restriction: Pipe_control with CS-stall bit set must be issued
7250 * before a pipe-control command that has the State Cache
7251 * Invalidate bit set."
7253 flags
|= PIPE_CONTROL_CS_STALL
;
7256 if (flags
& PIPE_CONTROL_FLUSH_LLC
) {
7257 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7260 * SW must always program Post-Sync Operation to "Write Immediate
7261 * Data" when Flush LLC is set."
7263 * For now, we just require the caller to do it.
7265 assert(flags
& PIPE_CONTROL_WRITE_IMMEDIATE
);
7268 /* "Post-Sync Operation" workarounds -------------------------------- */
7270 /* Project: All / Argument: Global Snapshot Count Reset [19]
7272 * "This bit must not be exercised on any product.
7273 * Requires stall bit ([20] of DW1) set."
7275 * We don't use this, so we just assert that it isn't used. The
7276 * PIPE_CONTROL instruction page indicates that they intended this
7277 * as a debug feature and don't think it is useful in production,
7278 * but it may actually be usable, should we ever want to.
7280 assert((flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
) == 0);
7282 if (flags
& (PIPE_CONTROL_MEDIA_STATE_CLEAR
|
7283 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
)) {
7284 /* Project: All / Arguments:
7286 * - Generic Media State Clear [16]
7287 * - Indirect State Pointers Disable [16]
7289 * "Requires stall bit ([20] of DW1) set."
7291 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7292 * State Clear) says:
7294 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7295 * programmed prior to programming a PIPECONTROL command with "Media
7296 * State Clear" set in GPGPU mode of operation"
7298 * This is a subset of the earlier rule, so there's nothing to do.
7300 flags
|= PIPE_CONTROL_CS_STALL
;
7303 if (flags
& PIPE_CONTROL_STORE_DATA_INDEX
) {
7304 /* Project: All / Argument: Store Data Index
7306 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7309 * For now, we just assert that the caller does this. We might want to
7310 * automatically add a write to the workaround BO...
7312 assert(non_lri_post_sync_flags
!= 0);
7315 if (flags
& PIPE_CONTROL_SYNC_GFDT
) {
7316 /* Project: All / Argument: Sync GFDT
7318 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7319 * than '0' or 0x2520[13] must be set."
7321 * For now, we just assert that the caller does this.
7323 assert(non_lri_post_sync_flags
!= 0);
7326 if (flags
& PIPE_CONTROL_TLB_INVALIDATE
) {
7327 /* Project: IVB+ / Argument: TLB inv
7329 * "Requires stall bit ([20] of DW1) set."
7331 * Also, from the PIPE_CONTROL instruction table:
7334 * Post Sync Operation or CS stall must be set to ensure a TLB
7335 * invalidation occurs. Otherwise no cycle will occur to the TLB
7336 * cache to invalidate."
7338 * This is not a subset of the earlier rule, so there's nothing to do.
7340 flags
|= PIPE_CONTROL_CS_STALL
;
7343 if (GEN_GEN
>= 12 && ((flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
) ||
7344 (flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
))) {
7345 /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
7348 * Unified Cache (Tile Cache Disabled):
7350 * When the Color and Depth (Z) streams are enabled to be cached in
7351 * the DC space of L2, Software must use "Render Target Cache Flush
7352 * Enable" and "Depth Cache Flush Enable" along with "Tile Cache
7353 * Flush" for getting the color and depth (Z) write data to be
7354 * globally observable. In this mode of operation it is not required
7355 * to set "CS Stall" upon setting "Tile Cache Flush" bit.
7357 flags
|= PIPE_CONTROL_TILE_CACHE_FLUSH
;
7360 if (GEN_GEN
== 9 && devinfo
->gt
== 4) {
7361 /* TODO: The big Skylake GT4 post sync op workaround */
7364 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7366 if (IS_COMPUTE_PIPELINE(batch
)) {
7367 if (GEN_GEN
>= 9 && (flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
)) {
7368 /* Project: SKL+ / Argument: Tex Invalidate
7369 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7371 flags
|= PIPE_CONTROL_CS_STALL
;
7374 if (GEN_GEN
== 8 && (post_sync_flags
||
7375 (flags
& (PIPE_CONTROL_NOTIFY_ENABLE
|
7376 PIPE_CONTROL_DEPTH_STALL
|
7377 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
7378 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
7379 PIPE_CONTROL_DATA_CACHE_FLUSH
)))) {
7380 /* Project: BDW / Arguments:
7382 * - LRI Post Sync Operation [23]
7383 * - Post Sync Op [15:14]
7385 * - Depth Stall [13]
7386 * - Render Target Cache Flush [12]
7387 * - Depth Cache Flush [0]
7388 * - DC Flush Enable [5]
7390 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
7393 flags
|= PIPE_CONTROL_CS_STALL
;
7395 /* Also, from the PIPE_CONTROL instruction table, bit 20:
7398 * This bit must be always set when PIPE_CONTROL command is
7399 * programmed by GPGPU and MEDIA workloads, except for the cases
7400 * when only Read Only Cache Invalidation bits are set (State
7401 * Cache Invalidation Enable, Instruction cache Invalidation
7402 * Enable, Texture Cache Invalidation Enable, Constant Cache
7403 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
7404 * need not implemented when FF_DOP_CG is disable via "Fixed
7405 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7407 * It sounds like we could avoid CS stalls in some cases, but we
7408 * don't currently bother. This list isn't exactly the list above,
7414 /* "Stall" workarounds ----------------------------------------------
7415 * These have to come after the earlier ones because we may have added
7416 * some additional CS stalls above.
7419 if (GEN_GEN
< 9 && (flags
& PIPE_CONTROL_CS_STALL
)) {
7420 /* Project: PRE-SKL, VLV, CHV
7422 * "[All Stepping][All SKUs]:
7424 * One of the following must also be set:
7426 * - Render Target Cache Flush Enable ([12] of DW1)
7427 * - Depth Cache Flush Enable ([0] of DW1)
7428 * - Stall at Pixel Scoreboard ([1] of DW1)
7429 * - Depth Stall ([13] of DW1)
7430 * - Post-Sync Operation ([13] of DW1)
7431 * - DC Flush Enable ([5] of DW1)"
7433 * If we don't already have one of those bits set, we choose to add
7434 * "Stall at Pixel Scoreboard". Some of the other bits require a
7435 * CS stall as a workaround (see above), which would send us into
7436 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
7437 * appears to be safe, so we choose that.
7439 const uint32_t wa_bits
= PIPE_CONTROL_RENDER_TARGET_FLUSH
|
7440 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
7441 PIPE_CONTROL_WRITE_IMMEDIATE
|
7442 PIPE_CONTROL_WRITE_DEPTH_COUNT
|
7443 PIPE_CONTROL_WRITE_TIMESTAMP
|
7444 PIPE_CONTROL_STALL_AT_SCOREBOARD
|
7445 PIPE_CONTROL_DEPTH_STALL
|
7446 PIPE_CONTROL_DATA_CACHE_FLUSH
;
7447 if (!(flags
& wa_bits
))
7448 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
7451 if (GEN_GEN
>= 12 && (flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
)) {
7452 /* GEN:BUG:1409600907:
7454 * "PIPE_CONTROL with Depth Stall Enable bit must be set
7455 * with any PIPE_CONTROL with Depth Flush Enable bit set.
7457 flags
|= PIPE_CONTROL_DEPTH_STALL
;
7460 /* Emit --------------------------------------------------------------- */
7462 if (INTEL_DEBUG
& DEBUG_PIPE_CONTROL
) {
7464 " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64
"]: %s\n",
7465 (flags
& PIPE_CONTROL_FLUSH_ENABLE
) ? "PipeCon " : "",
7466 (flags
& PIPE_CONTROL_CS_STALL
) ? "CS " : "",
7467 (flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
) ? "Scoreboard " : "",
7468 (flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
) ? "VF " : "",
7469 (flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
) ? "RT " : "",
7470 (flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
) ? "Const " : "",
7471 (flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
) ? "TC " : "",
7472 (flags
& PIPE_CONTROL_DATA_CACHE_FLUSH
) ? "DC " : "",
7473 (flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
) ? "ZFlush " : "",
7474 (flags
& PIPE_CONTROL_DEPTH_STALL
) ? "ZStall " : "",
7475 (flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
) ? "State " : "",
7476 (flags
& PIPE_CONTROL_TLB_INVALIDATE
) ? "TLB " : "",
7477 (flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
) ? "Inst " : "",
7478 (flags
& PIPE_CONTROL_MEDIA_STATE_CLEAR
) ? "MediaClear " : "",
7479 (flags
& PIPE_CONTROL_NOTIFY_ENABLE
) ? "Notify " : "",
7480 (flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
) ?
7482 (flags
& PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
) ?
7484 (flags
& PIPE_CONTROL_WRITE_IMMEDIATE
) ? "WriteImm " : "",
7485 (flags
& PIPE_CONTROL_WRITE_DEPTH_COUNT
) ? "WriteZCount " : "",
7486 (flags
& PIPE_CONTROL_WRITE_TIMESTAMP
) ? "WriteTimestamp " : "",
7487 (flags
& PIPE_CONTROL_FLUSH_HDC
) ? "HDC " : "",
7491 batch_mark_sync_for_pipe_control(batch
, flags
);
7492 iris_batch_sync_region_start(batch
);
7494 iris_emit_cmd(batch
, GENX(PIPE_CONTROL
), pc
) {
7496 pc
.TileCacheFlushEnable
= flags
& PIPE_CONTROL_TILE_CACHE_FLUSH
;
7499 pc
.HDCPipelineFlushEnable
= flags
& PIPE_CONTROL_FLUSH_HDC
;
7501 pc
.LRIPostSyncOperation
= NoLRIOperation
;
7502 pc
.PipeControlFlushEnable
= flags
& PIPE_CONTROL_FLUSH_ENABLE
;
7503 pc
.DCFlushEnable
= flags
& PIPE_CONTROL_DATA_CACHE_FLUSH
;
7504 pc
.StoreDataIndex
= 0;
7505 pc
.CommandStreamerStallEnable
= flags
& PIPE_CONTROL_CS_STALL
;
7506 pc
.GlobalSnapshotCountReset
=
7507 flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
;
7508 pc
.TLBInvalidate
= flags
& PIPE_CONTROL_TLB_INVALIDATE
;
7509 pc
.GenericMediaStateClear
= flags
& PIPE_CONTROL_MEDIA_STATE_CLEAR
;
7510 pc
.StallAtPixelScoreboard
= flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
;
7511 pc
.RenderTargetCacheFlushEnable
=
7512 flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
;
7513 pc
.DepthCacheFlushEnable
= flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
7514 pc
.StateCacheInvalidationEnable
=
7515 flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
7516 pc
.VFCacheInvalidationEnable
= flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
;
7517 pc
.ConstantCacheInvalidationEnable
=
7518 flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
7519 pc
.PostSyncOperation
= flags_to_post_sync_op(flags
);
7520 pc
.DepthStallEnable
= flags
& PIPE_CONTROL_DEPTH_STALL
;
7521 pc
.InstructionCacheInvalidateEnable
=
7522 flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
;
7523 pc
.NotifyEnable
= flags
& PIPE_CONTROL_NOTIFY_ENABLE
;
7524 pc
.IndirectStatePointersDisable
=
7525 flags
& PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
;
7526 pc
.TextureCacheInvalidationEnable
=
7527 flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
7528 pc
.Address
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
7529 pc
.ImmediateData
= imm
;
7532 iris_batch_sync_region_end(batch
);
7537 * Preemption on Gen9 has to be enabled or disabled in various cases.
7539 * See these workarounds for preemption:
7540 * - WaDisableMidObjectPreemptionForGSLineStripAdj
7541 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
7542 * - WaDisableMidObjectPreemptionForLineLoop
7545 * We don't put this in the vtable because it's only used on Gen9.
7548 gen9_toggle_preemption(struct iris_context
*ice
,
7549 struct iris_batch
*batch
,
7550 const struct pipe_draw_info
*draw
)
7552 struct iris_genx_state
*genx
= ice
->state
.genx
;
7553 bool object_preemption
= true;
7555 /* WaDisableMidObjectPreemptionForGSLineStripAdj
7557 * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7558 * and GS is enabled."
7560 if (draw
->mode
== PIPE_PRIM_LINE_STRIP_ADJACENCY
&&
7561 ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
])
7562 object_preemption
= false;
7564 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7566 * "TriFan miscompare in Execlist Preemption test. Cut index that is
7567 * on a previous context. End the previous, the resume another context
7568 * with a tri-fan or polygon, and the vertex count is corrupted. If we
7569 * prempt again we will cause corruption.
7571 * WA: Disable mid-draw preemption when draw-call has a tri-fan."
7573 if (draw
->mode
== PIPE_PRIM_TRIANGLE_FAN
)
7574 object_preemption
= false;
7576 /* WaDisableMidObjectPreemptionForLineLoop
7578 * "VF Stats Counters Missing a vertex when preemption enabled.
7580 * WA: Disable mid-draw preemption when the draw uses a lineloop
7583 if (draw
->mode
== PIPE_PRIM_LINE_LOOP
)
7584 object_preemption
= false;
7588 * "VF is corrupting GAFS data when preempted on an instance boundary
7589 * and replayed with instancing enabled.
7591 * WA: Disable preemption when using instanceing."
7593 if (draw
->instance_count
> 1)
7594 object_preemption
= false;
7596 if (genx
->object_preemption
!= object_preemption
) {
7597 iris_enable_obj_preemption(batch
, object_preemption
);
7598 genx
->object_preemption
= object_preemption
;
7604 iris_lost_genx_state(struct iris_context
*ice
, struct iris_batch
*batch
)
7606 struct iris_genx_state
*genx
= ice
->state
.genx
;
7608 memset(genx
->last_index_buffer
, 0, sizeof(genx
->last_index_buffer
));
7612 iris_emit_mi_report_perf_count(struct iris_batch
*batch
,
7614 uint32_t offset_in_bytes
,
7617 iris_batch_sync_region_start(batch
);
7618 iris_emit_cmd(batch
, GENX(MI_REPORT_PERF_COUNT
), mi_rpc
) {
7619 mi_rpc
.MemoryAddress
= rw_bo(bo
, offset_in_bytes
,
7620 IRIS_DOMAIN_OTHER_WRITE
);
7621 mi_rpc
.ReportID
= report_id
;
7623 iris_batch_sync_region_end(batch
);
7627 * Update the pixel hashing modes that determine the balancing of PS threads
7628 * across subslices and slices.
7630 * \param width Width bound of the rendering area (already scaled down if \p
7631 * scale is greater than 1).
7632 * \param height Height bound of the rendering area (already scaled down if \p
7633 * scale is greater than 1).
7634 * \param scale The number of framebuffer samples that could potentially be
7635 * affected by an individual channel of the PS thread. This is
7636 * typically one for single-sampled rendering, but for operations
7637 * like CCS resolves and fast clears a single PS invocation may
7638 * update a huge number of pixels, in which case a finer
7639 * balancing is desirable in order to maximally utilize the
7640 * bandwidth available. UINT_MAX can be used as shorthand for
7641 * "finest hashing mode available".
7644 genX(emit_hashing_mode
)(struct iris_context
*ice
, struct iris_batch
*batch
,
7645 unsigned width
, unsigned height
, unsigned scale
)
7648 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
7649 const unsigned slice_hashing
[] = {
7650 /* Because all Gen9 platforms with more than one slice require
7651 * three-way subslice hashing, a single "normal" 16x16 slice hashing
7652 * block is guaranteed to suffer from substantial imbalance, with one
7653 * subslice receiving twice as much work as the other two in the
7656 * The performance impact of that would be particularly severe when
7657 * three-way hashing is also in use for slice balancing (which is the
7658 * case for all Gen9 GT4 platforms), because one of the slices
7659 * receives one every three 16x16 blocks in either direction, which
7660 * is roughly the periodicity of the underlying subslice imbalance
7661 * pattern ("roughly" because in reality the hardware's
7662 * implementation of three-way hashing doesn't do exact modulo 3
7663 * arithmetic, which somewhat decreases the magnitude of this effect
7664 * in practice). This leads to a systematic subslice imbalance
7665 * within that slice regardless of the size of the primitive. The
7666 * 32x32 hashing mode guarantees that the subslice imbalance within a
7667 * single slice hashing block is minimal, largely eliminating this
7671 /* Finest slice hashing mode available. */
7674 const unsigned subslice_hashing
[] = {
7675 /* 16x16 would provide a slight cache locality benefit especially
7676 * visible in the sampler L1 cache efficiency of low-bandwidth
7677 * non-LLC platforms, but it comes at the cost of greater subslice
7678 * imbalance for primitives of dimensions approximately intermediate
7679 * between 16x4 and 16x16.
7682 /* Finest subslice hashing mode available. */
7685 /* Dimensions of the smallest hashing block of a given hashing mode. If
7686 * the rendering area is smaller than this there can't possibly be any
7687 * benefit from switching to this mode, so we optimize out the
7690 const unsigned min_size
[][2] = {
7694 const unsigned idx
= scale
> 1;
7696 if (width
> min_size
[idx
][0] || height
> min_size
[idx
][1]) {
7699 iris_pack_state(GENX(GT_MODE
), >_mode
, reg
) {
7700 reg
.SliceHashing
= (devinfo
->num_slices
> 1 ? slice_hashing
[idx
] : 0);
7701 reg
.SliceHashingMask
= (devinfo
->num_slices
> 1 ? -1 : 0);
7702 reg
.SubsliceHashing
= subslice_hashing
[idx
];
7703 reg
.SubsliceHashingMask
= -1;
7706 iris_emit_raw_pipe_control(batch
,
7707 "workaround: CS stall before GT_MODE LRI",
7708 PIPE_CONTROL_STALL_AT_SCOREBOARD
|
7709 PIPE_CONTROL_CS_STALL
,
7712 iris_emit_lri(batch
, GT_MODE
, gt_mode
);
7714 ice
->state
.current_hash_scale
= scale
;
7720 iris_set_frontend_noop(struct pipe_context
*ctx
, bool enable
)
7722 struct iris_context
*ice
= (struct iris_context
*) ctx
;
7724 if (iris_batch_prepare_noop(&ice
->batches
[IRIS_BATCH_RENDER
], enable
)) {
7725 ice
->state
.dirty
|= IRIS_ALL_DIRTY_FOR_RENDER
;
7726 ice
->state
.stage_dirty
|= IRIS_ALL_STAGE_DIRTY_FOR_RENDER
;
7729 if (iris_batch_prepare_noop(&ice
->batches
[IRIS_BATCH_COMPUTE
], enable
)) {
7730 ice
->state
.dirty
|= IRIS_ALL_DIRTY_FOR_COMPUTE
;
7731 ice
->state
.stage_dirty
|= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE
;
7736 genX(init_state
)(struct iris_context
*ice
)
7738 struct pipe_context
*ctx
= &ice
->ctx
;
7739 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
7741 ctx
->create_blend_state
= iris_create_blend_state
;
7742 ctx
->create_depth_stencil_alpha_state
= iris_create_zsa_state
;
7743 ctx
->create_rasterizer_state
= iris_create_rasterizer_state
;
7744 ctx
->create_sampler_state
= iris_create_sampler_state
;
7745 ctx
->create_sampler_view
= iris_create_sampler_view
;
7746 ctx
->create_surface
= iris_create_surface
;
7747 ctx
->create_vertex_elements_state
= iris_create_vertex_elements
;
7748 ctx
->bind_blend_state
= iris_bind_blend_state
;
7749 ctx
->bind_depth_stencil_alpha_state
= iris_bind_zsa_state
;
7750 ctx
->bind_sampler_states
= iris_bind_sampler_states
;
7751 ctx
->bind_rasterizer_state
= iris_bind_rasterizer_state
;
7752 ctx
->bind_vertex_elements_state
= iris_bind_vertex_elements_state
;
7753 ctx
->delete_blend_state
= iris_delete_state
;
7754 ctx
->delete_depth_stencil_alpha_state
= iris_delete_state
;
7755 ctx
->delete_rasterizer_state
= iris_delete_state
;
7756 ctx
->delete_sampler_state
= iris_delete_state
;
7757 ctx
->delete_vertex_elements_state
= iris_delete_state
;
7758 ctx
->set_blend_color
= iris_set_blend_color
;
7759 ctx
->set_clip_state
= iris_set_clip_state
;
7760 ctx
->set_constant_buffer
= iris_set_constant_buffer
;
7761 ctx
->set_shader_buffers
= iris_set_shader_buffers
;
7762 ctx
->set_shader_images
= iris_set_shader_images
;
7763 ctx
->set_sampler_views
= iris_set_sampler_views
;
7764 ctx
->set_compute_resources
= iris_set_compute_resources
;
7765 ctx
->set_global_binding
= iris_set_global_binding
;
7766 ctx
->set_tess_state
= iris_set_tess_state
;
7767 ctx
->set_framebuffer_state
= iris_set_framebuffer_state
;
7768 ctx
->set_polygon_stipple
= iris_set_polygon_stipple
;
7769 ctx
->set_sample_mask
= iris_set_sample_mask
;
7770 ctx
->set_scissor_states
= iris_set_scissor_states
;
7771 ctx
->set_stencil_ref
= iris_set_stencil_ref
;
7772 ctx
->set_vertex_buffers
= iris_set_vertex_buffers
;
7773 ctx
->set_viewport_states
= iris_set_viewport_states
;
7774 ctx
->sampler_view_destroy
= iris_sampler_view_destroy
;
7775 ctx
->surface_destroy
= iris_surface_destroy
;
7776 ctx
->draw_vbo
= iris_draw_vbo
;
7777 ctx
->launch_grid
= iris_launch_grid
;
7778 ctx
->create_stream_output_target
= iris_create_stream_output_target
;
7779 ctx
->stream_output_target_destroy
= iris_stream_output_target_destroy
;
7780 ctx
->set_stream_output_targets
= iris_set_stream_output_targets
;
7781 ctx
->set_frontend_noop
= iris_set_frontend_noop
;
7783 screen
->vtbl
.destroy_state
= iris_destroy_state
;
7784 screen
->vtbl
.init_render_context
= iris_init_render_context
;
7785 screen
->vtbl
.init_compute_context
= iris_init_compute_context
;
7786 screen
->vtbl
.upload_render_state
= iris_upload_render_state
;
7787 screen
->vtbl
.update_surface_base_address
= iris_update_surface_base_address
;
7788 screen
->vtbl
.upload_compute_state
= iris_upload_compute_state
;
7789 screen
->vtbl
.emit_raw_pipe_control
= iris_emit_raw_pipe_control
;
7790 screen
->vtbl
.emit_mi_report_perf_count
= iris_emit_mi_report_perf_count
;
7791 screen
->vtbl
.rebind_buffer
= iris_rebind_buffer
;
7792 screen
->vtbl
.load_register_reg32
= iris_load_register_reg32
;
7793 screen
->vtbl
.load_register_reg64
= iris_load_register_reg64
;
7794 screen
->vtbl
.load_register_imm32
= iris_load_register_imm32
;
7795 screen
->vtbl
.load_register_imm64
= iris_load_register_imm64
;
7796 screen
->vtbl
.load_register_mem32
= iris_load_register_mem32
;
7797 screen
->vtbl
.load_register_mem64
= iris_load_register_mem64
;
7798 screen
->vtbl
.store_register_mem32
= iris_store_register_mem32
;
7799 screen
->vtbl
.store_register_mem64
= iris_store_register_mem64
;
7800 screen
->vtbl
.store_data_imm32
= iris_store_data_imm32
;
7801 screen
->vtbl
.store_data_imm64
= iris_store_data_imm64
;
7802 screen
->vtbl
.copy_mem_mem
= iris_copy_mem_mem
;
7803 screen
->vtbl
.derived_program_state_size
= iris_derived_program_state_size
;
7804 screen
->vtbl
.store_derived_program_state
= iris_store_derived_program_state
;
7805 screen
->vtbl
.create_so_decl_list
= iris_create_so_decl_list
;
7806 screen
->vtbl
.populate_vs_key
= iris_populate_vs_key
;
7807 screen
->vtbl
.populate_tcs_key
= iris_populate_tcs_key
;
7808 screen
->vtbl
.populate_tes_key
= iris_populate_tes_key
;
7809 screen
->vtbl
.populate_gs_key
= iris_populate_gs_key
;
7810 screen
->vtbl
.populate_fs_key
= iris_populate_fs_key
;
7811 screen
->vtbl
.populate_cs_key
= iris_populate_cs_key
;
7812 screen
->vtbl
.lost_genx_state
= iris_lost_genx_state
;
7814 ice
->state
.dirty
= ~0ull;
7815 ice
->state
.stage_dirty
= ~0ull;
7817 ice
->state
.statistics_counters_enabled
= true;
7819 ice
->state
.sample_mask
= 0xffff;
7820 ice
->state
.num_viewports
= 1;
7821 ice
->state
.prim_mode
= PIPE_PRIM_MAX
;
7822 ice
->state
.genx
= calloc(1, sizeof(struct iris_genx_state
));
7823 ice
->draw
.derived_params
.drawid
= -1;
7825 /* Make a 1x1x1 null surface for unbound textures */
7826 void *null_surf_map
=
7827 upload_state(ice
->state
.surface_uploader
, &ice
->state
.unbound_tex
,
7828 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
7829 isl_null_fill_state(&screen
->isl_dev
, null_surf_map
, isl_extent3d(1, 1, 1));
7830 ice
->state
.unbound_tex
.offset
+=
7831 iris_bo_offset_from_base_address(iris_resource_bo(ice
->state
.unbound_tex
.res
));
7833 /* Default all scissor rectangles to be empty regions. */
7834 for (int i
= 0; i
< IRIS_MAX_VIEWPORTS
; i
++) {
7835 ice
->state
.scissors
[i
] = (struct pipe_scissor_state
) {
7836 .minx
= 1, .maxx
= 0, .miny
= 1, .maxy
= 0,