2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
30 * This is the main state upload code.
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/gen_aux_map.h"
103 #include "intel/common/gen_l3_config.h"
104 #include "intel/common/gen_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
111 #include "iris_genx_macros.h"
112 #include "intel/common/gen_guardband.h"
115 * Statically assert that PIPE_* enums match the hardware packets.
116 * (As long as they match, we don't need to translate them.)
118 UNUSED
static void pipe_asserts()
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
122 /* pipe_logicop happens to match the hardware. */
123 PIPE_ASSERT(PIPE_LOGICOP_CLEAR
== LOGICOP_CLEAR
);
124 PIPE_ASSERT(PIPE_LOGICOP_NOR
== LOGICOP_NOR
);
125 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED
== LOGICOP_AND_INVERTED
);
126 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED
== LOGICOP_COPY_INVERTED
);
127 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE
== LOGICOP_AND_REVERSE
);
128 PIPE_ASSERT(PIPE_LOGICOP_INVERT
== LOGICOP_INVERT
);
129 PIPE_ASSERT(PIPE_LOGICOP_XOR
== LOGICOP_XOR
);
130 PIPE_ASSERT(PIPE_LOGICOP_NAND
== LOGICOP_NAND
);
131 PIPE_ASSERT(PIPE_LOGICOP_AND
== LOGICOP_AND
);
132 PIPE_ASSERT(PIPE_LOGICOP_EQUIV
== LOGICOP_EQUIV
);
133 PIPE_ASSERT(PIPE_LOGICOP_NOOP
== LOGICOP_NOOP
);
134 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED
== LOGICOP_OR_INVERTED
);
135 PIPE_ASSERT(PIPE_LOGICOP_COPY
== LOGICOP_COPY
);
136 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE
== LOGICOP_OR_REVERSE
);
137 PIPE_ASSERT(PIPE_LOGICOP_OR
== LOGICOP_OR
);
138 PIPE_ASSERT(PIPE_LOGICOP_SET
== LOGICOP_SET
);
140 /* pipe_blend_func happens to match the hardware. */
141 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE
== BLENDFACTOR_ONE
);
142 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR
== BLENDFACTOR_SRC_COLOR
);
143 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA
== BLENDFACTOR_SRC_ALPHA
);
144 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA
== BLENDFACTOR_DST_ALPHA
);
145 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR
== BLENDFACTOR_DST_COLOR
);
146 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
== BLENDFACTOR_SRC_ALPHA_SATURATE
);
147 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR
== BLENDFACTOR_CONST_COLOR
);
148 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA
== BLENDFACTOR_CONST_ALPHA
);
149 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR
== BLENDFACTOR_SRC1_COLOR
);
150 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA
== BLENDFACTOR_SRC1_ALPHA
);
151 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO
== BLENDFACTOR_ZERO
);
152 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR
== BLENDFACTOR_INV_SRC_COLOR
);
153 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA
== BLENDFACTOR_INV_SRC_ALPHA
);
154 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA
== BLENDFACTOR_INV_DST_ALPHA
);
155 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR
== BLENDFACTOR_INV_DST_COLOR
);
156 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR
== BLENDFACTOR_INV_CONST_COLOR
);
157 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA
== BLENDFACTOR_INV_CONST_ALPHA
);
158 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR
== BLENDFACTOR_INV_SRC1_COLOR
);
159 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA
== BLENDFACTOR_INV_SRC1_ALPHA
);
161 /* pipe_blend_func happens to match the hardware. */
162 PIPE_ASSERT(PIPE_BLEND_ADD
== BLENDFUNCTION_ADD
);
163 PIPE_ASSERT(PIPE_BLEND_SUBTRACT
== BLENDFUNCTION_SUBTRACT
);
164 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT
== BLENDFUNCTION_REVERSE_SUBTRACT
);
165 PIPE_ASSERT(PIPE_BLEND_MIN
== BLENDFUNCTION_MIN
);
166 PIPE_ASSERT(PIPE_BLEND_MAX
== BLENDFUNCTION_MAX
);
168 /* pipe_stencil_op happens to match the hardware. */
169 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP
== STENCILOP_KEEP
);
170 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO
== STENCILOP_ZERO
);
171 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE
== STENCILOP_REPLACE
);
172 PIPE_ASSERT(PIPE_STENCIL_OP_INCR
== STENCILOP_INCRSAT
);
173 PIPE_ASSERT(PIPE_STENCIL_OP_DECR
== STENCILOP_DECRSAT
);
174 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP
== STENCILOP_INCR
);
175 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP
== STENCILOP_DECR
);
176 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT
== STENCILOP_INVERT
);
178 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT
== UPPERLEFT
);
180 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT
== LOWERLEFT
);
185 translate_prim_type(enum pipe_prim_type prim
, uint8_t verts_per_patch
)
187 static const unsigned map
[] = {
188 [PIPE_PRIM_POINTS
] = _3DPRIM_POINTLIST
,
189 [PIPE_PRIM_LINES
] = _3DPRIM_LINELIST
,
190 [PIPE_PRIM_LINE_LOOP
] = _3DPRIM_LINELOOP
,
191 [PIPE_PRIM_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
192 [PIPE_PRIM_TRIANGLES
] = _3DPRIM_TRILIST
,
193 [PIPE_PRIM_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
194 [PIPE_PRIM_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
195 [PIPE_PRIM_QUADS
] = _3DPRIM_QUADLIST
,
196 [PIPE_PRIM_QUAD_STRIP
] = _3DPRIM_QUADSTRIP
,
197 [PIPE_PRIM_POLYGON
] = _3DPRIM_POLYGON
,
198 [PIPE_PRIM_LINES_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
199 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
200 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
201 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
202 [PIPE_PRIM_PATCHES
] = _3DPRIM_PATCHLIST_1
- 1,
205 return map
[prim
] + (prim
== PIPE_PRIM_PATCHES
? verts_per_patch
: 0);
209 translate_compare_func(enum pipe_compare_func pipe_func
)
211 static const unsigned map
[] = {
212 [PIPE_FUNC_NEVER
] = COMPAREFUNCTION_NEVER
,
213 [PIPE_FUNC_LESS
] = COMPAREFUNCTION_LESS
,
214 [PIPE_FUNC_EQUAL
] = COMPAREFUNCTION_EQUAL
,
215 [PIPE_FUNC_LEQUAL
] = COMPAREFUNCTION_LEQUAL
,
216 [PIPE_FUNC_GREATER
] = COMPAREFUNCTION_GREATER
,
217 [PIPE_FUNC_NOTEQUAL
] = COMPAREFUNCTION_NOTEQUAL
,
218 [PIPE_FUNC_GEQUAL
] = COMPAREFUNCTION_GEQUAL
,
219 [PIPE_FUNC_ALWAYS
] = COMPAREFUNCTION_ALWAYS
,
221 return map
[pipe_func
];
225 translate_shadow_func(enum pipe_compare_func pipe_func
)
227 /* Gallium specifies the result of shadow comparisons as:
229 * 1 if ref <op> texel,
234 * 0 if texel <op> ref,
237 * So we need to flip the operator and also negate.
239 static const unsigned map
[] = {
240 [PIPE_FUNC_NEVER
] = PREFILTEROPALWAYS
,
241 [PIPE_FUNC_LESS
] = PREFILTEROPLEQUAL
,
242 [PIPE_FUNC_EQUAL
] = PREFILTEROPNOTEQUAL
,
243 [PIPE_FUNC_LEQUAL
] = PREFILTEROPLESS
,
244 [PIPE_FUNC_GREATER
] = PREFILTEROPGEQUAL
,
245 [PIPE_FUNC_NOTEQUAL
] = PREFILTEROPEQUAL
,
246 [PIPE_FUNC_GEQUAL
] = PREFILTEROPGREATER
,
247 [PIPE_FUNC_ALWAYS
] = PREFILTEROPNEVER
,
249 return map
[pipe_func
];
253 translate_cull_mode(unsigned pipe_face
)
255 static const unsigned map
[4] = {
256 [PIPE_FACE_NONE
] = CULLMODE_NONE
,
257 [PIPE_FACE_FRONT
] = CULLMODE_FRONT
,
258 [PIPE_FACE_BACK
] = CULLMODE_BACK
,
259 [PIPE_FACE_FRONT_AND_BACK
] = CULLMODE_BOTH
,
261 return map
[pipe_face
];
265 translate_fill_mode(unsigned pipe_polymode
)
267 static const unsigned map
[4] = {
268 [PIPE_POLYGON_MODE_FILL
] = FILL_MODE_SOLID
,
269 [PIPE_POLYGON_MODE_LINE
] = FILL_MODE_WIREFRAME
,
270 [PIPE_POLYGON_MODE_POINT
] = FILL_MODE_POINT
,
271 [PIPE_POLYGON_MODE_FILL_RECTANGLE
] = FILL_MODE_SOLID
,
273 return map
[pipe_polymode
];
277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip
)
279 static const unsigned map
[] = {
280 [PIPE_TEX_MIPFILTER_NEAREST
] = MIPFILTER_NEAREST
,
281 [PIPE_TEX_MIPFILTER_LINEAR
] = MIPFILTER_LINEAR
,
282 [PIPE_TEX_MIPFILTER_NONE
] = MIPFILTER_NONE
,
284 return map
[pipe_mip
];
288 translate_wrap(unsigned pipe_wrap
)
290 static const unsigned map
[] = {
291 [PIPE_TEX_WRAP_REPEAT
] = TCM_WRAP
,
292 [PIPE_TEX_WRAP_CLAMP
] = TCM_HALF_BORDER
,
293 [PIPE_TEX_WRAP_CLAMP_TO_EDGE
] = TCM_CLAMP
,
294 [PIPE_TEX_WRAP_CLAMP_TO_BORDER
] = TCM_CLAMP_BORDER
,
295 [PIPE_TEX_WRAP_MIRROR_REPEAT
] = TCM_MIRROR
,
296 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
] = TCM_MIRROR_ONCE
,
298 /* These are unsupported. */
299 [PIPE_TEX_WRAP_MIRROR_CLAMP
] = -1,
300 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
] = -1,
302 return map
[pipe_wrap
];
306 * Allocate space for some indirect state.
308 * Return a pointer to the map (to fill it out) and a state ref (for
309 * referring to the state in GPU commands).
312 upload_state(struct u_upload_mgr
*uploader
,
313 struct iris_state_ref
*ref
,
318 u_upload_alloc(uploader
, 0, size
, alignment
, &ref
->offset
, &ref
->res
, &p
);
323 * Stream out temporary/short-lived state.
325 * This allocates space, pins the BO, and includes the BO address in the
326 * returned offset (which works because all state lives in 32-bit memory
330 stream_state(struct iris_batch
*batch
,
331 struct u_upload_mgr
*uploader
,
332 struct pipe_resource
**out_res
,
335 uint32_t *out_offset
)
339 u_upload_alloc(uploader
, 0, size
, alignment
, out_offset
, out_res
, &ptr
);
341 struct iris_bo
*bo
= iris_resource_bo(*out_res
);
342 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
344 iris_record_state_size(batch
->state_sizes
,
345 bo
->gtt_offset
+ *out_offset
, size
);
347 *out_offset
+= iris_bo_offset_from_base_address(bo
);
353 * stream_state() + memcpy.
356 emit_state(struct iris_batch
*batch
,
357 struct u_upload_mgr
*uploader
,
358 struct pipe_resource
**out_res
,
365 stream_state(batch
, uploader
, out_res
, size
, alignment
, &offset
);
368 memcpy(map
, data
, size
);
374 * Did field 'x' change between 'old_cso' and 'new_cso'?
376 * (If so, we may want to set some dirty flags.)
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
383 flush_before_state_base_change(struct iris_batch
*batch
)
385 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
387 /* Flush before emitting STATE_BASE_ADDRESS.
389 * This isn't documented anywhere in the PRM. However, it seems to be
390 * necessary prior to changing the surface state base adress. We've
391 * seen issues in Vulkan where we get GPU hangs when using multi-level
392 * command buffers which clear depth, reset state base address, and then
395 * Normally, in GL, we would trust the kernel to do sufficient stalls
396 * and flushes prior to executing our batch. However, it doesn't seem
397 * as if the kernel's flushing is always sufficient and we don't want to
400 * We make this an end-of-pipe sync instead of a normal flush because we
401 * do not know the current status of the GPU. On Haswell at least,
402 * having a fast-clear operation in flight at the same time as a normal
403 * rendering operation can cause hangs. Since the kernel's flushing is
404 * insufficient, we need to ensure that any rendering operations from
405 * other processes are definitely complete before we try to do our own
406 * rendering. It's a bit of a big hammer but it appears to work.
408 iris_emit_end_of_pipe_sync(batch
,
409 "change STATE_BASE_ADDRESS (flushes)",
410 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
411 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
412 PIPE_CONTROL_DATA_CACHE_FLUSH
|
413 /* GEN:BUG:1606662791:
415 * Software must program PIPE_CONTROL command
416 * with "HDC Pipeline Flush" prior to
417 * programming of the below two non-pipeline
419 * * STATE_BASE_ADDRESS
420 * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
422 ((GEN_GEN
== 12 && devinfo
->revision
== 0 /* A0 */ ?
423 PIPE_CONTROL_FLUSH_HDC
: 0)));
427 flush_after_state_base_change(struct iris_batch
*batch
)
429 /* After re-setting the surface state base address, we have to do some
430 * cache flusing so that the sampler engine will pick up the new
431 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432 * Shared Function > 3D Sampler > State > State Caching (page 96):
434 * Coherency with system memory in the state cache, like the texture
435 * cache is handled partially by software. It is expected that the
436 * command stream or shader will issue Cache Flush operation or
437 * Cache_Flush sampler message to ensure that the L1 cache remains
438 * coherent with system memory.
442 * Whenever the value of the Dynamic_State_Base_Addr,
443 * Surface_State_Base_Addr are altered, the L1 state cache must be
444 * invalidated to ensure the new surface or sampler state is fetched
445 * from system memory.
447 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448 * which, according the PIPE_CONTROL instruction documentation in the
451 * Setting this bit is independent of any other bit in this packet.
452 * This bit controls the invalidation of the L1 and L2 state caches
453 * at the top of the pipe i.e. at the parsing time.
455 * Unfortunately, experimentation seems to indicate that state cache
456 * invalidation through a PIPE_CONTROL does nothing whatsoever in
457 * regards to surface state and binding tables. In stead, it seems that
458 * invalidating the texture cache is what is actually needed.
460 * XXX: As far as we have been able to determine through
461 * experimentation, shows that flush the texture cache appears to be
462 * sufficient. The theory here is that all of the sampling/rendering
463 * units cache the binding table in the texture cache. However, we have
464 * yet to be able to actually confirm this.
466 iris_emit_end_of_pipe_sync(batch
,
467 "change STATE_BASE_ADDRESS (invalidates)",
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE
|
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
474 _iris_emit_lri(struct iris_batch
*batch
, uint32_t reg
, uint32_t val
)
476 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
477 lri
.RegisterOffset
= reg
;
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
484 _iris_emit_lrr(struct iris_batch
*batch
, uint32_t dst
, uint32_t src
)
486 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_REG
), lrr
) {
487 lrr
.SourceRegisterAddress
= src
;
488 lrr
.DestinationRegisterAddress
= dst
;
493 iris_load_register_reg32(struct iris_batch
*batch
, uint32_t dst
,
496 _iris_emit_lrr(batch
, dst
, src
);
500 iris_load_register_reg64(struct iris_batch
*batch
, uint32_t dst
,
503 _iris_emit_lrr(batch
, dst
, src
);
504 _iris_emit_lrr(batch
, dst
+ 4, src
+ 4);
508 iris_load_register_imm32(struct iris_batch
*batch
, uint32_t reg
,
511 _iris_emit_lri(batch
, reg
, val
);
515 iris_load_register_imm64(struct iris_batch
*batch
, uint32_t reg
,
518 _iris_emit_lri(batch
, reg
+ 0, val
& 0xffffffff);
519 _iris_emit_lri(batch
, reg
+ 4, val
>> 32);
523 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
526 iris_load_register_mem32(struct iris_batch
*batch
, uint32_t reg
,
527 struct iris_bo
*bo
, uint32_t offset
)
529 iris_batch_sync_region_start(batch
);
530 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
531 lrm
.RegisterAddress
= reg
;
532 lrm
.MemoryAddress
= ro_bo(bo
, offset
);
534 iris_batch_sync_region_end(batch
);
538 * Load a 64-bit value from a buffer into a MMIO register via
539 * two MI_LOAD_REGISTER_MEM commands.
542 iris_load_register_mem64(struct iris_batch
*batch
, uint32_t reg
,
543 struct iris_bo
*bo
, uint32_t offset
)
545 iris_load_register_mem32(batch
, reg
+ 0, bo
, offset
+ 0);
546 iris_load_register_mem32(batch
, reg
+ 4, bo
, offset
+ 4);
550 iris_store_register_mem32(struct iris_batch
*batch
, uint32_t reg
,
551 struct iris_bo
*bo
, uint32_t offset
,
554 iris_batch_sync_region_start(batch
);
555 iris_emit_cmd(batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
556 srm
.RegisterAddress
= reg
;
557 srm
.MemoryAddress
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
558 srm
.PredicateEnable
= predicated
;
560 iris_batch_sync_region_end(batch
);
564 iris_store_register_mem64(struct iris_batch
*batch
, uint32_t reg
,
565 struct iris_bo
*bo
, uint32_t offset
,
568 iris_store_register_mem32(batch
, reg
+ 0, bo
, offset
+ 0, predicated
);
569 iris_store_register_mem32(batch
, reg
+ 4, bo
, offset
+ 4, predicated
);
573 iris_store_data_imm32(struct iris_batch
*batch
,
574 struct iris_bo
*bo
, uint32_t offset
,
577 iris_batch_sync_region_start(batch
);
578 iris_emit_cmd(batch
, GENX(MI_STORE_DATA_IMM
), sdi
) {
579 sdi
.Address
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
580 sdi
.ImmediateData
= imm
;
582 iris_batch_sync_region_end(batch
);
586 iris_store_data_imm64(struct iris_batch
*batch
,
587 struct iris_bo
*bo
, uint32_t offset
,
590 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
591 * 2 in genxml but it's actually variable length and we need 5 DWords.
593 void *map
= iris_get_command_space(batch
, 4 * 5);
594 iris_batch_sync_region_start(batch
);
595 _iris_pack_command(batch
, GENX(MI_STORE_DATA_IMM
), map
, sdi
) {
596 sdi
.DWordLength
= 5 - 2;
597 sdi
.Address
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
598 sdi
.ImmediateData
= imm
;
600 iris_batch_sync_region_end(batch
);
604 iris_copy_mem_mem(struct iris_batch
*batch
,
605 struct iris_bo
*dst_bo
, uint32_t dst_offset
,
606 struct iris_bo
*src_bo
, uint32_t src_offset
,
609 /* MI_COPY_MEM_MEM operates on DWords. */
610 assert(bytes
% 4 == 0);
611 assert(dst_offset
% 4 == 0);
612 assert(src_offset
% 4 == 0);
613 iris_batch_sync_region_start(batch
);
615 for (unsigned i
= 0; i
< bytes
; i
+= 4) {
616 iris_emit_cmd(batch
, GENX(MI_COPY_MEM_MEM
), cp
) {
617 cp
.DestinationMemoryAddress
= rw_bo(dst_bo
, dst_offset
+ i
,
618 IRIS_DOMAIN_OTHER_WRITE
);
619 cp
.SourceMemoryAddress
= ro_bo(src_bo
, src_offset
+ i
);
623 iris_batch_sync_region_end(batch
);
627 emit_pipeline_select(struct iris_batch
*batch
, uint32_t pipeline
)
629 #if GEN_GEN >= 8 && GEN_GEN < 10
630 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
632 * Software must clear the COLOR_CALC_STATE Valid field in
633 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
634 * with Pipeline Select set to GPGPU.
636 * The internal hardware docs recommend the same workaround for Gen9
639 if (pipeline
== GPGPU
)
640 iris_emit_cmd(batch
, GENX(3DSTATE_CC_STATE_POINTERS
), t
);
644 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
645 * PIPELINE_SELECT [DevBWR+]":
649 * Software must ensure all the write caches are flushed through a
650 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
651 * command to invalidate read only caches prior to programming
652 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
654 iris_emit_pipe_control_flush(batch
,
655 "workaround: PIPELINE_SELECT flushes (1/2)",
656 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
657 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
658 PIPE_CONTROL_DATA_CACHE_FLUSH
|
659 PIPE_CONTROL_CS_STALL
);
661 iris_emit_pipe_control_flush(batch
,
662 "workaround: PIPELINE_SELECT flushes (2/2)",
663 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
664 PIPE_CONTROL_CONST_CACHE_INVALIDATE
|
665 PIPE_CONTROL_STATE_CACHE_INVALIDATE
|
666 PIPE_CONTROL_INSTRUCTION_INVALIDATE
);
668 iris_emit_cmd(batch
, GENX(PIPELINE_SELECT
), sel
) {
672 sel
.PipelineSelection
= pipeline
;
677 init_glk_barrier_mode(struct iris_batch
*batch
, uint32_t value
)
682 * "This chicken bit works around a hardware issue with barrier
683 * logic encountered when switching between GPGPU and 3D pipelines.
684 * To workaround the issue, this mode bit should be set after a
685 * pipeline is selected."
688 iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1
), ®_val
, reg
) {
689 reg
.GLKBarrierMode
= value
;
690 reg
.GLKBarrierModeMask
= 1;
692 iris_emit_lri(batch
, SLICE_COMMON_ECO_CHICKEN1
, reg_val
);
697 init_state_base_address(struct iris_batch
*batch
)
699 uint32_t mocs
= batch
->screen
->isl_dev
.mocs
.internal
;
700 flush_before_state_base_change(batch
);
702 /* We program most base addresses once at context initialization time.
703 * Each base address points at a 4GB memory zone, and never needs to
704 * change. See iris_bufmgr.h for a description of the memory zones.
706 * The one exception is Surface State Base Address, which needs to be
707 * updated occasionally. See iris_binder.c for the details there.
709 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
710 sba
.GeneralStateMOCS
= mocs
;
711 sba
.StatelessDataPortAccessMOCS
= mocs
;
712 sba
.DynamicStateMOCS
= mocs
;
713 sba
.IndirectObjectMOCS
= mocs
;
714 sba
.InstructionMOCS
= mocs
;
715 sba
.SurfaceStateMOCS
= mocs
;
717 sba
.GeneralStateBaseAddressModifyEnable
= true;
718 sba
.DynamicStateBaseAddressModifyEnable
= true;
719 sba
.IndirectObjectBaseAddressModifyEnable
= true;
720 sba
.InstructionBaseAddressModifyEnable
= true;
721 sba
.GeneralStateBufferSizeModifyEnable
= true;
722 sba
.DynamicStateBufferSizeModifyEnable
= true;
724 sba
.BindlessSurfaceStateBaseAddressModifyEnable
= true;
725 sba
.BindlessSurfaceStateMOCS
= mocs
;
727 sba
.IndirectObjectBufferSizeModifyEnable
= true;
728 sba
.InstructionBuffersizeModifyEnable
= true;
730 sba
.InstructionBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_SHADER_START
);
731 sba
.DynamicStateBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_DYNAMIC_START
);
733 sba
.GeneralStateBufferSize
= 0xfffff;
734 sba
.IndirectObjectBufferSize
= 0xfffff;
735 sba
.InstructionBufferSize
= 0xfffff;
736 sba
.DynamicStateBufferSize
= 0xfffff;
739 flush_after_state_base_change(batch
);
743 iris_emit_l3_config(struct iris_batch
*batch
,
744 const struct gen_l3_config
*cfg
)
747 assert(cfg
|| GEN_GEN
>= 12);
750 #define L3_ALLOCATION_REG GENX(L3ALLOC)
751 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
753 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
754 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
757 iris_pack_state(L3_ALLOCATION_REG
, ®_val
, reg
) {
759 reg
.SLMEnable
= cfg
->n
[GEN_L3P_SLM
] > 0;
762 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
763 * in L3CNTLREG register. The default setting of the bit is not the
764 * desirable behavior.
766 reg
.ErrorDetectionBehaviorControl
= true;
767 reg
.UseFullWays
= true;
769 if (GEN_GEN
< 12 || cfg
) {
770 reg
.URBAllocation
= cfg
->n
[GEN_L3P_URB
];
771 reg
.ROAllocation
= cfg
->n
[GEN_L3P_RO
];
772 reg
.DCAllocation
= cfg
->n
[GEN_L3P_DC
];
773 reg
.AllAllocation
= cfg
->n
[GEN_L3P_ALL
];
776 reg
.L3FullWayAllocationEnable
= true;
780 _iris_emit_lri(batch
, L3_ALLOCATION_REG_num
, reg_val
);
785 iris_enable_obj_preemption(struct iris_batch
*batch
, bool enable
)
789 /* A fixed function pipe flush is required before modifying this field */
790 iris_emit_end_of_pipe_sync(batch
, enable
? "enable preemption"
791 : "disable preemption",
792 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
794 /* enable object level preemption */
795 iris_pack_state(GENX(CS_CHICKEN1
), ®_val
, reg
) {
796 reg
.ReplayMode
= enable
;
797 reg
.ReplayModeMask
= true;
799 iris_emit_lri(batch
, CS_CHICKEN1
, reg_val
);
805 iris_upload_slice_hashing_state(struct iris_batch
*batch
)
807 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
808 int subslices_delta
=
809 devinfo
->ppipe_subslices
[0] - devinfo
->ppipe_subslices
[1];
810 if (subslices_delta
== 0)
813 struct iris_context
*ice
= NULL
;
814 ice
= container_of(batch
, ice
, batches
[IRIS_BATCH_RENDER
]);
815 assert(&ice
->batches
[IRIS_BATCH_RENDER
] == batch
);
817 unsigned size
= GENX(SLICE_HASH_TABLE_length
) * 4;
818 uint32_t hash_address
;
819 struct pipe_resource
*tmp
= NULL
;
821 stream_state(batch
, ice
->state
.dynamic_uploader
, &tmp
,
822 size
, 64, &hash_address
);
823 pipe_resource_reference(&tmp
, NULL
);
825 struct GENX(SLICE_HASH_TABLE
) table0
= {
827 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
828 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
829 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
830 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
831 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
832 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
833 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
834 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
835 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
836 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
837 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
838 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
839 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
840 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
841 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
842 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
846 struct GENX(SLICE_HASH_TABLE
) table1
= {
848 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
849 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
850 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
851 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
852 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
853 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
854 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
855 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
856 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
857 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
858 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
859 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
860 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
861 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
862 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
863 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
867 const struct GENX(SLICE_HASH_TABLE
) *table
=
868 subslices_delta
< 0 ? &table0
: &table1
;
869 GENX(SLICE_HASH_TABLE_pack
)(NULL
, map
, table
);
871 iris_emit_cmd(batch
, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS
), ptr
) {
872 ptr
.SliceHashStatePointerValid
= true;
873 ptr
.SliceHashTableStatePointer
= hash_address
;
876 iris_emit_cmd(batch
, GENX(3DSTATE_3D_MODE
), mode
) {
877 mode
.SliceHashingTableEnable
= true;
883 iris_alloc_push_constants(struct iris_batch
*batch
)
885 /* For now, we set a static partitioning of the push constant area,
886 * assuming that all stages could be in use.
888 * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
889 * see if that improves performance by offering more space to
890 * the VS/FS when those aren't in use. Also, try dynamically
891 * enabling/disabling it like i965 does. This would be more
892 * stalls and may not actually help; we don't know yet.
894 for (int i
= 0; i
<= MESA_SHADER_FRAGMENT
; i
++) {
895 iris_emit_cmd(batch
, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS
), alloc
) {
896 alloc
._3DCommandSubOpcode
= 18 + i
;
897 alloc
.ConstantBufferOffset
= 6 * i
;
898 alloc
.ConstantBufferSize
= i
== MESA_SHADER_FRAGMENT
? 8 : 6;
905 init_aux_map_state(struct iris_batch
*batch
);
909 * Upload initial GPU state for any kind of context.
911 * These need to happen for both render and compute.
914 iris_init_common_context(struct iris_batch
*batch
)
919 iris_pack_state(GENX(SAMPLER_MODE
), ®_val
, reg
) {
920 reg
.HeaderlessMessageforPreemptableContexts
= 1;
921 reg
.HeaderlessMessageforPreemptableContextsMask
= 1;
923 iris_emit_lri(batch
, SAMPLER_MODE
, reg_val
);
925 /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
926 iris_pack_state(GENX(HALF_SLICE_CHICKEN7
), ®_val
, reg
) {
927 reg
.EnabledTexelOffsetPrecisionFix
= 1;
928 reg
.EnabledTexelOffsetPrecisionFixMask
= 1;
930 iris_emit_lri(batch
, HALF_SLICE_CHICKEN7
, reg_val
);
935 * Upload the initial GPU state for a render context.
937 * This sets some invariant state that needs to be programmed a particular
938 * way, but we never actually change.
941 iris_init_render_context(struct iris_batch
*batch
)
943 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
946 iris_batch_sync_region_start(batch
);
948 emit_pipeline_select(batch
, _3D
);
950 iris_emit_l3_config(batch
, batch
->screen
->l3_config_3d
);
952 init_state_base_address(batch
);
954 iris_init_common_context(batch
);
957 iris_pack_state(GENX(CS_DEBUG_MODE2
), ®_val
, reg
) {
958 reg
.CONSTANT_BUFFERAddressOffsetDisable
= true;
959 reg
.CONSTANT_BUFFERAddressOffsetDisableMask
= true;
961 iris_emit_lri(batch
, CS_DEBUG_MODE2
, reg_val
);
963 iris_pack_state(GENX(INSTPM
), ®_val
, reg
) {
964 reg
.CONSTANT_BUFFERAddressOffsetDisable
= true;
965 reg
.CONSTANT_BUFFERAddressOffsetDisableMask
= true;
967 iris_emit_lri(batch
, INSTPM
, reg_val
);
971 iris_pack_state(GENX(CACHE_MODE_1
), ®_val
, reg
) {
972 reg
.FloatBlendOptimizationEnable
= true;
973 reg
.FloatBlendOptimizationEnableMask
= true;
974 reg
.PartialResolveDisableInVC
= true;
975 reg
.PartialResolveDisableInVCMask
= true;
977 iris_emit_lri(batch
, CACHE_MODE_1
, reg_val
);
979 if (devinfo
->is_geminilake
)
980 init_glk_barrier_mode(batch
, GLK_BARRIER_MODE_3D_HULL
);
984 iris_pack_state(GENX(TCCNTLREG
), ®_val
, reg
) {
985 reg
.L3DataPartialWriteMergingEnable
= true;
986 reg
.ColorZPartialWriteMergingEnable
= true;
987 reg
.URBPartialWriteMergingEnable
= true;
988 reg
.TCDisable
= true;
990 iris_emit_lri(batch
, TCCNTLREG
, reg_val
);
992 /* Hardware specification recommends disabling repacking for the
993 * compatibility with decompression mechanism in display controller.
995 if (devinfo
->disable_ccs_repack
) {
996 iris_pack_state(GENX(CACHE_MODE_0
), ®_val
, reg
) {
997 reg
.DisableRepackingforCompression
= true;
998 reg
.DisableRepackingforCompressionMask
= true;
1000 iris_emit_lri(batch
, CACHE_MODE_0
, reg_val
);
1003 iris_upload_slice_hashing_state(batch
);
1006 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
1007 * changing it dynamically. We set it to the maximum size here, and
1008 * instead include the render target dimensions in the viewport, so
1009 * viewport extents clipping takes care of pruning stray geometry.
1011 iris_emit_cmd(batch
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
1012 rect
.ClippedDrawingRectangleXMax
= UINT16_MAX
;
1013 rect
.ClippedDrawingRectangleYMax
= UINT16_MAX
;
1016 /* Set the initial MSAA sample positions. */
1017 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLE_PATTERN
), pat
) {
1018 GEN_SAMPLE_POS_1X(pat
._1xSample
);
1019 GEN_SAMPLE_POS_2X(pat
._2xSample
);
1020 GEN_SAMPLE_POS_4X(pat
._4xSample
);
1021 GEN_SAMPLE_POS_8X(pat
._8xSample
);
1023 GEN_SAMPLE_POS_16X(pat
._16xSample
);
1027 /* Use the legacy AA line coverage computation. */
1028 iris_emit_cmd(batch
, GENX(3DSTATE_AA_LINE_PARAMETERS
), foo
);
1030 /* Disable chromakeying (it's for media) */
1031 iris_emit_cmd(batch
, GENX(3DSTATE_WM_CHROMAKEY
), foo
);
1033 /* We want regular rendering, not special HiZ operations. */
1034 iris_emit_cmd(batch
, GENX(3DSTATE_WM_HZ_OP
), foo
);
1036 /* No polygon stippling offsets are necessary. */
1037 /* TODO: may need to set an offset for origin-UL framebuffers */
1038 iris_emit_cmd(batch
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), foo
);
1040 iris_alloc_push_constants(batch
);
1044 init_aux_map_state(batch
);
1047 iris_batch_sync_region_end(batch
);
1051 iris_init_compute_context(struct iris_batch
*batch
)
1053 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
1055 iris_batch_sync_region_start(batch
);
1057 /* GEN:BUG:1607854226:
1059 * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1062 emit_pipeline_select(batch
, _3D
);
1064 emit_pipeline_select(batch
, GPGPU
);
1067 iris_emit_l3_config(batch
, batch
->screen
->l3_config_cs
);
1069 init_state_base_address(batch
);
1071 iris_init_common_context(batch
);
1074 emit_pipeline_select(batch
, GPGPU
);
1078 if (devinfo
->is_geminilake
)
1079 init_glk_barrier_mode(batch
, GLK_BARRIER_MODE_GPGPU
);
1083 init_aux_map_state(batch
);
1086 iris_batch_sync_region_end(batch
);
1089 struct iris_vertex_buffer_state
{
1090 /** The VERTEX_BUFFER_STATE hardware structure. */
1091 uint32_t state
[GENX(VERTEX_BUFFER_STATE_length
)];
1093 /** The resource to source vertex data from. */
1094 struct pipe_resource
*resource
;
1099 struct iris_depth_buffer_state
{
1100 /* Depth/HiZ/Stencil related hardware packets. */
1101 uint32_t packets
[GENX(3DSTATE_DEPTH_BUFFER_length
) +
1102 GENX(3DSTATE_STENCIL_BUFFER_length
) +
1103 GENX(3DSTATE_HIER_DEPTH_BUFFER_length
) +
1104 GENX(3DSTATE_CLEAR_PARAMS_length
) +
1105 GENX(MI_LOAD_REGISTER_IMM_length
) * 2];
1109 * Generation-specific context state (ice->state.genx->...).
1111 * Most state can go in iris_context directly, but these encode hardware
1112 * packets which vary by generation.
1114 struct iris_genx_state
{
1115 struct iris_vertex_buffer_state vertex_buffers
[33];
1116 uint32_t last_index_buffer
[GENX(3DSTATE_INDEX_BUFFER_length
)];
1118 struct iris_depth_buffer_state depth_buffer
;
1120 uint32_t so_buffers
[4 * GENX(3DSTATE_SO_BUFFER_length
)];
1123 bool pma_fix_enabled
;
1127 /* Is object level preemption enabled? */
1128 bool object_preemption
;
1133 struct brw_image_param image_param
[PIPE_MAX_SHADER_IMAGES
];
1135 } shaders
[MESA_SHADER_STAGES
];
1139 * The pipe->set_blend_color() driver hook.
1141 * This corresponds to our COLOR_CALC_STATE.
1144 iris_set_blend_color(struct pipe_context
*ctx
,
1145 const struct pipe_blend_color
*state
)
1147 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1149 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1150 memcpy(&ice
->state
.blend_color
, state
, sizeof(struct pipe_blend_color
));
1151 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
1155 * Gallium CSO for blend state (see pipe_blend_state).
1157 struct iris_blend_state
{
1158 /** Partial 3DSTATE_PS_BLEND */
1159 uint32_t ps_blend
[GENX(3DSTATE_PS_BLEND_length
)];
1161 /** Partial BLEND_STATE */
1162 uint32_t blend_state
[GENX(BLEND_STATE_length
) +
1163 BRW_MAX_DRAW_BUFFERS
* GENX(BLEND_STATE_ENTRY_length
)];
1165 bool alpha_to_coverage
; /* for shader key */
1167 /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1168 uint8_t blend_enables
;
1170 /** Bitfield of whether color writes are enabled for RT[i] */
1171 uint8_t color_write_enables
;
1173 /** Does RT[0] use dual color blending? */
1174 bool dual_color_blending
;
1177 static enum pipe_blendfactor
1178 fix_blendfactor(enum pipe_blendfactor f
, bool alpha_to_one
)
1181 if (f
== PIPE_BLENDFACTOR_SRC1_ALPHA
)
1182 return PIPE_BLENDFACTOR_ONE
;
1184 if (f
== PIPE_BLENDFACTOR_INV_SRC1_ALPHA
)
1185 return PIPE_BLENDFACTOR_ZERO
;
1192 * The pipe->create_blend_state() driver hook.
1194 * Translates a pipe_blend_state into iris_blend_state.
1197 iris_create_blend_state(struct pipe_context
*ctx
,
1198 const struct pipe_blend_state
*state
)
1200 struct iris_blend_state
*cso
= malloc(sizeof(struct iris_blend_state
));
1201 uint32_t *blend_entry
= cso
->blend_state
+ GENX(BLEND_STATE_length
);
1203 cso
->blend_enables
= 0;
1204 cso
->color_write_enables
= 0;
1205 STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS
<= 8);
1207 cso
->alpha_to_coverage
= state
->alpha_to_coverage
;
1209 bool indep_alpha_blend
= false;
1211 for (int i
= 0; i
< BRW_MAX_DRAW_BUFFERS
; i
++) {
1212 const struct pipe_rt_blend_state
*rt
=
1213 &state
->rt
[state
->independent_blend_enable
? i
: 0];
1215 enum pipe_blendfactor src_rgb
=
1216 fix_blendfactor(rt
->rgb_src_factor
, state
->alpha_to_one
);
1217 enum pipe_blendfactor src_alpha
=
1218 fix_blendfactor(rt
->alpha_src_factor
, state
->alpha_to_one
);
1219 enum pipe_blendfactor dst_rgb
=
1220 fix_blendfactor(rt
->rgb_dst_factor
, state
->alpha_to_one
);
1221 enum pipe_blendfactor dst_alpha
=
1222 fix_blendfactor(rt
->alpha_dst_factor
, state
->alpha_to_one
);
1224 if (rt
->rgb_func
!= rt
->alpha_func
||
1225 src_rgb
!= src_alpha
|| dst_rgb
!= dst_alpha
)
1226 indep_alpha_blend
= true;
1228 if (rt
->blend_enable
)
1229 cso
->blend_enables
|= 1u << i
;
1232 cso
->color_write_enables
|= 1u << i
;
1234 iris_pack_state(GENX(BLEND_STATE_ENTRY
), blend_entry
, be
) {
1235 be
.LogicOpEnable
= state
->logicop_enable
;
1236 be
.LogicOpFunction
= state
->logicop_func
;
1238 be
.PreBlendSourceOnlyClampEnable
= false;
1239 be
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
1240 be
.PreBlendColorClampEnable
= true;
1241 be
.PostBlendColorClampEnable
= true;
1243 be
.ColorBufferBlendEnable
= rt
->blend_enable
;
1245 be
.ColorBlendFunction
= rt
->rgb_func
;
1246 be
.AlphaBlendFunction
= rt
->alpha_func
;
1247 be
.SourceBlendFactor
= src_rgb
;
1248 be
.SourceAlphaBlendFactor
= src_alpha
;
1249 be
.DestinationBlendFactor
= dst_rgb
;
1250 be
.DestinationAlphaBlendFactor
= dst_alpha
;
1252 be
.WriteDisableRed
= !(rt
->colormask
& PIPE_MASK_R
);
1253 be
.WriteDisableGreen
= !(rt
->colormask
& PIPE_MASK_G
);
1254 be
.WriteDisableBlue
= !(rt
->colormask
& PIPE_MASK_B
);
1255 be
.WriteDisableAlpha
= !(rt
->colormask
& PIPE_MASK_A
);
1257 blend_entry
+= GENX(BLEND_STATE_ENTRY_length
);
1260 iris_pack_command(GENX(3DSTATE_PS_BLEND
), cso
->ps_blend
, pb
) {
1261 /* pb.HasWriteableRT is filled in at draw time.
1262 * pb.AlphaTestEnable is filled in at draw time.
1264 * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1265 * setting it when dual color blending without an appropriate shader.
1268 pb
.AlphaToCoverageEnable
= state
->alpha_to_coverage
;
1269 pb
.IndependentAlphaBlendEnable
= indep_alpha_blend
;
1271 pb
.SourceBlendFactor
=
1272 fix_blendfactor(state
->rt
[0].rgb_src_factor
, state
->alpha_to_one
);
1273 pb
.SourceAlphaBlendFactor
=
1274 fix_blendfactor(state
->rt
[0].alpha_src_factor
, state
->alpha_to_one
);
1275 pb
.DestinationBlendFactor
=
1276 fix_blendfactor(state
->rt
[0].rgb_dst_factor
, state
->alpha_to_one
);
1277 pb
.DestinationAlphaBlendFactor
=
1278 fix_blendfactor(state
->rt
[0].alpha_dst_factor
, state
->alpha_to_one
);
1281 iris_pack_state(GENX(BLEND_STATE
), cso
->blend_state
, bs
) {
1282 bs
.AlphaToCoverageEnable
= state
->alpha_to_coverage
;
1283 bs
.IndependentAlphaBlendEnable
= indep_alpha_blend
;
1284 bs
.AlphaToOneEnable
= state
->alpha_to_one
;
1285 bs
.AlphaToCoverageDitherEnable
= state
->alpha_to_coverage
;
1286 bs
.ColorDitherEnable
= state
->dither
;
1287 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1290 cso
->dual_color_blending
= util_blend_state_is_dual(state
, 0);
1296 * The pipe->bind_blend_state() driver hook.
1298 * Bind a blending CSO and flag related dirty bits.
1301 iris_bind_blend_state(struct pipe_context
*ctx
, void *state
)
1303 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1304 struct iris_blend_state
*cso
= state
;
1306 ice
->state
.cso_blend
= cso
;
1308 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
;
1309 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
1310 ice
->state
.stage_dirty
|= ice
->state
.stage_dirty_for_nos
[IRIS_NOS_BLEND
];
1313 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
1317 * Return true if the FS writes to any color outputs which are not disabled
1318 * via color masking.
1321 has_writeable_rt(const struct iris_blend_state
*cso_blend
,
1322 const struct shader_info
*fs_info
)
1327 unsigned rt_outputs
= fs_info
->outputs_written
>> FRAG_RESULT_DATA0
;
1329 if (fs_info
->outputs_written
& BITFIELD64_BIT(FRAG_RESULT_COLOR
))
1330 rt_outputs
= (1 << BRW_MAX_DRAW_BUFFERS
) - 1;
1332 return cso_blend
->color_write_enables
& rt_outputs
;
1336 * Gallium CSO for depth, stencil, and alpha testing state.
1338 struct iris_depth_stencil_alpha_state
{
1339 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1340 uint32_t wmds
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
1343 uint32_t depth_bounds
[GENX(3DSTATE_DEPTH_BOUNDS_length
)];
1346 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1347 struct pipe_alpha_state alpha
;
1349 /** Outbound to resolve and cache set tracking. */
1350 bool depth_writes_enabled
;
1351 bool stencil_writes_enabled
;
1353 /** Outbound to Gen8-9 PMA stall equations */
1354 bool depth_test_enabled
;
1358 * The pipe->create_depth_stencil_alpha_state() driver hook.
1360 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1361 * testing state since we need pieces of it in a variety of places.
1364 iris_create_zsa_state(struct pipe_context
*ctx
,
1365 const struct pipe_depth_stencil_alpha_state
*state
)
1367 struct iris_depth_stencil_alpha_state
*cso
=
1368 malloc(sizeof(struct iris_depth_stencil_alpha_state
));
1370 bool two_sided_stencil
= state
->stencil
[1].enabled
;
1372 cso
->alpha
= state
->alpha
;
1373 cso
->depth_writes_enabled
= state
->depth
.writemask
;
1374 cso
->depth_test_enabled
= state
->depth
.enabled
;
1375 cso
->stencil_writes_enabled
=
1376 state
->stencil
[0].writemask
!= 0 ||
1377 (two_sided_stencil
&& state
->stencil
[1].writemask
!= 0);
1379 /* gallium frontends need to optimize away EQUAL writes for us. */
1380 assert(!(state
->depth
.func
== PIPE_FUNC_EQUAL
&& state
->depth
.writemask
));
1382 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), cso
->wmds
, wmds
) {
1383 wmds
.StencilFailOp
= state
->stencil
[0].fail_op
;
1384 wmds
.StencilPassDepthFailOp
= state
->stencil
[0].zfail_op
;
1385 wmds
.StencilPassDepthPassOp
= state
->stencil
[0].zpass_op
;
1386 wmds
.StencilTestFunction
=
1387 translate_compare_func(state
->stencil
[0].func
);
1388 wmds
.BackfaceStencilFailOp
= state
->stencil
[1].fail_op
;
1389 wmds
.BackfaceStencilPassDepthFailOp
= state
->stencil
[1].zfail_op
;
1390 wmds
.BackfaceStencilPassDepthPassOp
= state
->stencil
[1].zpass_op
;
1391 wmds
.BackfaceStencilTestFunction
=
1392 translate_compare_func(state
->stencil
[1].func
);
1393 wmds
.DepthTestFunction
= translate_compare_func(state
->depth
.func
);
1394 wmds
.DoubleSidedStencilEnable
= two_sided_stencil
;
1395 wmds
.StencilTestEnable
= state
->stencil
[0].enabled
;
1396 wmds
.StencilBufferWriteEnable
=
1397 state
->stencil
[0].writemask
!= 0 ||
1398 (two_sided_stencil
&& state
->stencil
[1].writemask
!= 0);
1399 wmds
.DepthTestEnable
= state
->depth
.enabled
;
1400 wmds
.DepthBufferWriteEnable
= state
->depth
.writemask
;
1401 wmds
.StencilTestMask
= state
->stencil
[0].valuemask
;
1402 wmds
.StencilWriteMask
= state
->stencil
[0].writemask
;
1403 wmds
.BackfaceStencilTestMask
= state
->stencil
[1].valuemask
;
1404 wmds
.BackfaceStencilWriteMask
= state
->stencil
[1].writemask
;
1405 /* wmds.[Backface]StencilReferenceValue are merged later */
1407 wmds
.StencilReferenceValueModifyDisable
= true;
1412 iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS
), cso
->depth_bounds
, depth_bounds
) {
1413 depth_bounds
.DepthBoundsTestValueModifyDisable
= false;
1414 depth_bounds
.DepthBoundsTestEnableModifyDisable
= false;
1415 depth_bounds
.DepthBoundsTestEnable
= state
->depth
.bounds_test
;
1416 depth_bounds
.DepthBoundsTestMinValue
= state
->depth
.bounds_min
;
1417 depth_bounds
.DepthBoundsTestMaxValue
= state
->depth
.bounds_max
;
1425 * The pipe->bind_depth_stencil_alpha_state() driver hook.
1427 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1430 iris_bind_zsa_state(struct pipe_context
*ctx
, void *state
)
1432 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1433 struct iris_depth_stencil_alpha_state
*old_cso
= ice
->state
.cso_zsa
;
1434 struct iris_depth_stencil_alpha_state
*new_cso
= state
;
1437 if (cso_changed(alpha
.ref_value
))
1438 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
1440 if (cso_changed(alpha
.enabled
))
1441 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
| IRIS_DIRTY_BLEND_STATE
;
1443 if (cso_changed(alpha
.func
))
1444 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
1446 if (cso_changed(depth_writes_enabled
))
1447 ice
->state
.dirty
|= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
1449 ice
->state
.depth_writes_enabled
= new_cso
->depth_writes_enabled
;
1450 ice
->state
.stencil_writes_enabled
= new_cso
->stencil_writes_enabled
;
1453 if (cso_changed(depth_bounds
))
1454 ice
->state
.dirty
|= IRIS_DIRTY_DEPTH_BOUNDS
;
1458 ice
->state
.cso_zsa
= new_cso
;
1459 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
1460 ice
->state
.dirty
|= IRIS_DIRTY_WM_DEPTH_STENCIL
;
1461 ice
->state
.stage_dirty
|=
1462 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_DEPTH_STENCIL_ALPHA
];
1465 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
1470 want_pma_fix(struct iris_context
*ice
)
1472 UNUSED
struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1473 UNUSED
const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1474 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
1475 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
1476 const struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
1477 const struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
1478 const struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
1480 /* In very specific combinations of state, we can instruct Gen8-9 hardware
1481 * to avoid stalling at the pixel mask array. The state equations are
1482 * documented in these places:
1484 * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
1485 * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1487 * Both equations share some common elements:
1490 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1491 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1492 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1493 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1496 * 3DSTATE_WM::ForceKillPix != ForceOff &&
1497 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1498 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1499 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1500 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1501 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1503 * (Technically the stencil PMA treats ForceKillPix differently,
1504 * but I think this is a documentation oversight, and we don't
1505 * ever use it in this way, so it doesn't matter).
1508 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
1509 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1510 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1511 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1512 * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1513 * 3DSTATE_PS_EXTRA::PixelShaderValid &&
1516 * These are always true:
1518 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1519 * 3DSTATE_PS_EXTRA::PixelShaderValid
1521 * Also, we never use the normal drawing path for HiZ ops; these are true:
1523 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1524 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1525 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1526 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
1528 * This happens sometimes:
1530 * 3DSTATE_WM::ForceThreadDispatch != 1
1532 * However, we choose to ignore it as it either agrees with the signal
1533 * (dispatch was already enabled, so nothing out of the ordinary), or
1534 * there are no framebuffer attachments (so no depth or HiZ anyway,
1535 * meaning the PMA signal will already be disabled).
1541 struct iris_resource
*zres
, *sres
;
1542 iris_get_depth_stencil_resources(cso_fb
->zsbuf
->texture
, &zres
, &sres
);
1544 /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1545 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1547 if (!zres
|| !iris_resource_level_has_hiz(zres
, cso_fb
->zsbuf
->u
.tex
.level
))
1550 /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1551 if (wm_prog_data
->early_fragment_tests
)
1554 /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1555 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1556 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1557 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1558 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1559 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1561 bool killpixels
= wm_prog_data
->uses_kill
|| wm_prog_data
->uses_omask
||
1562 cso_blend
->alpha_to_coverage
|| cso_zsa
->alpha
.enabled
;
1564 /* The Gen8 depth PMA equation becomes:
1567 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1568 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1571 * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1572 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1573 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1577 * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1578 * ((killpixels && (depth_writes || stencil_writes)) ||
1579 * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1582 if (!cso_zsa
->depth_test_enabled
)
1585 return wm_prog_data
->computed_depth_mode
!= PSCDEPTH_OFF
||
1586 (killpixels
&& (cso_zsa
->depth_writes_enabled
||
1587 (sres
&& cso_zsa
->stencil_writes_enabled
)));
1592 genX(update_pma_fix
)(struct iris_context
*ice
,
1593 struct iris_batch
*batch
,
1597 struct iris_genx_state
*genx
= ice
->state
.genx
;
1599 if (genx
->pma_fix_enabled
== enable
)
1602 genx
->pma_fix_enabled
= enable
;
1604 /* According to the Broadwell PIPE_CONTROL documentation, software should
1605 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1606 * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
1608 * The Gen9 docs say to use a depth stall rather than a command streamer
1609 * stall. However, the hardware seems to violently disagree. A full
1610 * command streamer stall seems to be needed in both cases.
1612 iris_emit_pipe_control_flush(batch
, "PMA fix change (1/2)",
1613 PIPE_CONTROL_CS_STALL
|
1614 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1615 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1618 iris_pack_state(GENX(CACHE_MODE_1
), ®_val
, reg
) {
1619 reg
.NPPMAFixEnable
= enable
;
1620 reg
.NPEarlyZFailsDisable
= enable
;
1621 reg
.NPPMAFixEnableMask
= true;
1622 reg
.NPEarlyZFailsDisableMask
= true;
1624 iris_emit_lri(batch
, CACHE_MODE_1
, reg_val
);
1626 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1627 * Flush bits is often necessary. We do it regardless because it's easier.
1628 * The render cache flush is also necessary if stencil writes are enabled.
1630 * Again, the Gen9 docs give a different set of flushes but the Broadwell
1631 * flushes seem to work just as well.
1633 iris_emit_pipe_control_flush(batch
, "PMA fix change (1/2)",
1634 PIPE_CONTROL_DEPTH_STALL
|
1635 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
1636 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
1641 * Gallium CSO for rasterizer state.
1643 struct iris_rasterizer_state
{
1644 uint32_t sf
[GENX(3DSTATE_SF_length
)];
1645 uint32_t clip
[GENX(3DSTATE_CLIP_length
)];
1646 uint32_t raster
[GENX(3DSTATE_RASTER_length
)];
1647 uint32_t wm
[GENX(3DSTATE_WM_length
)];
1648 uint32_t line_stipple
[GENX(3DSTATE_LINE_STIPPLE_length
)];
1650 uint8_t num_clip_plane_consts
;
1651 bool clip_halfz
; /* for CC_VIEWPORT */
1652 bool depth_clip_near
; /* for CC_VIEWPORT */
1653 bool depth_clip_far
; /* for CC_VIEWPORT */
1654 bool flatshade
; /* for shader state */
1655 bool flatshade_first
; /* for stream output */
1656 bool clamp_fragment_color
; /* for shader state */
1657 bool light_twoside
; /* for shader state */
1658 bool rasterizer_discard
; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1659 bool half_pixel_center
; /* for 3DSTATE_MULTISAMPLE */
1660 bool line_stipple_enable
;
1661 bool poly_stipple_enable
;
1663 bool force_persample_interp
;
1664 bool conservative_rasterization
;
1665 bool fill_mode_point_or_line
;
1666 enum pipe_sprite_coord_mode sprite_coord_mode
; /* PIPE_SPRITE_* */
1667 uint16_t sprite_coord_enable
;
1671 get_line_width(const struct pipe_rasterizer_state
*state
)
1673 float line_width
= state
->line_width
;
1675 /* From the OpenGL 4.4 spec:
1677 * "The actual width of non-antialiased lines is determined by rounding
1678 * the supplied width to the nearest integer, then clamping it to the
1679 * implementation-dependent maximum non-antialiased line width."
1681 if (!state
->multisample
&& !state
->line_smooth
)
1682 line_width
= roundf(state
->line_width
);
1684 if (!state
->multisample
&& state
->line_smooth
&& line_width
< 1.5f
) {
1685 /* For 1 pixel line thickness or less, the general anti-aliasing
1686 * algorithm gives up, and a garbage line is generated. Setting a
1687 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1688 * (one-pixel-wide), non-antialiased lines.
1690 * Lines rendered with zero Line Width are rasterized using the
1691 * "Grid Intersection Quantization" rules as specified by the
1692 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1701 * The pipe->create_rasterizer_state() driver hook.
1704 iris_create_rasterizer_state(struct pipe_context
*ctx
,
1705 const struct pipe_rasterizer_state
*state
)
1707 struct iris_rasterizer_state
*cso
=
1708 malloc(sizeof(struct iris_rasterizer_state
));
1710 cso
->multisample
= state
->multisample
;
1711 cso
->force_persample_interp
= state
->force_persample_interp
;
1712 cso
->clip_halfz
= state
->clip_halfz
;
1713 cso
->depth_clip_near
= state
->depth_clip_near
;
1714 cso
->depth_clip_far
= state
->depth_clip_far
;
1715 cso
->flatshade
= state
->flatshade
;
1716 cso
->flatshade_first
= state
->flatshade_first
;
1717 cso
->clamp_fragment_color
= state
->clamp_fragment_color
;
1718 cso
->light_twoside
= state
->light_twoside
;
1719 cso
->rasterizer_discard
= state
->rasterizer_discard
;
1720 cso
->half_pixel_center
= state
->half_pixel_center
;
1721 cso
->sprite_coord_mode
= state
->sprite_coord_mode
;
1722 cso
->sprite_coord_enable
= state
->sprite_coord_enable
;
1723 cso
->line_stipple_enable
= state
->line_stipple_enable
;
1724 cso
->poly_stipple_enable
= state
->poly_stipple_enable
;
1725 cso
->conservative_rasterization
=
1726 state
->conservative_raster_mode
== PIPE_CONSERVATIVE_RASTER_POST_SNAP
;
1728 cso
->fill_mode_point_or_line
=
1729 state
->fill_front
== PIPE_POLYGON_MODE_LINE
||
1730 state
->fill_front
== PIPE_POLYGON_MODE_POINT
||
1731 state
->fill_back
== PIPE_POLYGON_MODE_LINE
||
1732 state
->fill_back
== PIPE_POLYGON_MODE_POINT
;
1734 if (state
->clip_plane_enable
!= 0)
1735 cso
->num_clip_plane_consts
= util_logbase2(state
->clip_plane_enable
) + 1;
1737 cso
->num_clip_plane_consts
= 0;
1739 float line_width
= get_line_width(state
);
1741 iris_pack_command(GENX(3DSTATE_SF
), cso
->sf
, sf
) {
1742 sf
.StatisticsEnable
= true;
1743 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1744 sf
.LineEndCapAntialiasingRegionWidth
=
1745 state
->line_smooth
? _10pixels
: _05pixels
;
1746 sf
.LastPixelEnable
= state
->line_last_pixel
;
1747 sf
.LineWidth
= line_width
;
1748 sf
.SmoothPointEnable
= (state
->point_smooth
|| state
->multisample
) &&
1749 !state
->point_quad_rasterization
;
1750 sf
.PointWidthSource
= state
->point_size_per_vertex
? Vertex
: State
;
1751 sf
.PointWidth
= state
->point_size
;
1753 if (state
->flatshade_first
) {
1754 sf
.TriangleFanProvokingVertexSelect
= 1;
1756 sf
.TriangleStripListProvokingVertexSelect
= 2;
1757 sf
.TriangleFanProvokingVertexSelect
= 2;
1758 sf
.LineStripListProvokingVertexSelect
= 1;
1762 iris_pack_command(GENX(3DSTATE_RASTER
), cso
->raster
, rr
) {
1763 rr
.FrontWinding
= state
->front_ccw
? CounterClockwise
: Clockwise
;
1764 rr
.CullMode
= translate_cull_mode(state
->cull_face
);
1765 rr
.FrontFaceFillMode
= translate_fill_mode(state
->fill_front
);
1766 rr
.BackFaceFillMode
= translate_fill_mode(state
->fill_back
);
1767 rr
.DXMultisampleRasterizationEnable
= state
->multisample
;
1768 rr
.GlobalDepthOffsetEnableSolid
= state
->offset_tri
;
1769 rr
.GlobalDepthOffsetEnableWireframe
= state
->offset_line
;
1770 rr
.GlobalDepthOffsetEnablePoint
= state
->offset_point
;
1771 rr
.GlobalDepthOffsetConstant
= state
->offset_units
* 2;
1772 rr
.GlobalDepthOffsetScale
= state
->offset_scale
;
1773 rr
.GlobalDepthOffsetClamp
= state
->offset_clamp
;
1774 rr
.SmoothPointEnable
= state
->point_smooth
;
1775 rr
.AntialiasingEnable
= state
->line_smooth
;
1776 rr
.ScissorRectangleEnable
= state
->scissor
;
1778 rr
.ViewportZNearClipTestEnable
= state
->depth_clip_near
;
1779 rr
.ViewportZFarClipTestEnable
= state
->depth_clip_far
;
1780 rr
.ConservativeRasterizationEnable
=
1781 cso
->conservative_rasterization
;
1783 rr
.ViewportZClipTestEnable
= (state
->depth_clip_near
|| state
->depth_clip_far
);
1787 iris_pack_command(GENX(3DSTATE_CLIP
), cso
->clip
, cl
) {
1788 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1789 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1791 cl
.EarlyCullEnable
= true;
1792 cl
.UserClipDistanceClipTestEnableBitmask
= state
->clip_plane_enable
;
1793 cl
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1794 cl
.APIMode
= state
->clip_halfz
? APIMODE_D3D
: APIMODE_OGL
;
1795 cl
.GuardbandClipTestEnable
= true;
1796 cl
.ClipEnable
= true;
1797 cl
.MinimumPointWidth
= 0.125;
1798 cl
.MaximumPointWidth
= 255.875;
1800 if (state
->flatshade_first
) {
1801 cl
.TriangleFanProvokingVertexSelect
= 1;
1803 cl
.TriangleStripListProvokingVertexSelect
= 2;
1804 cl
.TriangleFanProvokingVertexSelect
= 2;
1805 cl
.LineStripListProvokingVertexSelect
= 1;
1809 iris_pack_command(GENX(3DSTATE_WM
), cso
->wm
, wm
) {
1810 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1811 * filled in at draw time from the FS program.
1813 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1814 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1815 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1816 wm
.LineStippleEnable
= state
->line_stipple_enable
;
1817 wm
.PolygonStippleEnable
= state
->poly_stipple_enable
;
1820 /* Remap from 0..255 back to 1..256 */
1821 const unsigned line_stipple_factor
= state
->line_stipple_factor
+ 1;
1823 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE
), cso
->line_stipple
, line
) {
1824 if (state
->line_stipple_enable
) {
1825 line
.LineStipplePattern
= state
->line_stipple_pattern
;
1826 line
.LineStippleInverseRepeatCount
= 1.0f
/ line_stipple_factor
;
1827 line
.LineStippleRepeatCount
= line_stipple_factor
;
1835 * The pipe->bind_rasterizer_state() driver hook.
1837 * Bind a rasterizer CSO and flag related dirty bits.
1840 iris_bind_rasterizer_state(struct pipe_context
*ctx
, void *state
)
1842 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1843 struct iris_rasterizer_state
*old_cso
= ice
->state
.cso_rast
;
1844 struct iris_rasterizer_state
*new_cso
= state
;
1847 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1848 if (cso_changed_memcmp(line_stipple
))
1849 ice
->state
.dirty
|= IRIS_DIRTY_LINE_STIPPLE
;
1851 if (cso_changed(half_pixel_center
))
1852 ice
->state
.dirty
|= IRIS_DIRTY_MULTISAMPLE
;
1854 if (cso_changed(line_stipple_enable
) || cso_changed(poly_stipple_enable
))
1855 ice
->state
.dirty
|= IRIS_DIRTY_WM
;
1857 if (cso_changed(rasterizer_discard
))
1858 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
| IRIS_DIRTY_CLIP
;
1860 if (cso_changed(flatshade_first
))
1861 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
;
1863 if (cso_changed(depth_clip_near
) || cso_changed(depth_clip_far
) ||
1864 cso_changed(clip_halfz
))
1865 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
1867 if (cso_changed(sprite_coord_enable
) ||
1868 cso_changed(sprite_coord_mode
) ||
1869 cso_changed(light_twoside
))
1870 ice
->state
.dirty
|= IRIS_DIRTY_SBE
;
1872 if (cso_changed(conservative_rasterization
))
1873 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_FS
;
1876 ice
->state
.cso_rast
= new_cso
;
1877 ice
->state
.dirty
|= IRIS_DIRTY_RASTER
;
1878 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
1879 ice
->state
.stage_dirty
|=
1880 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_RASTERIZER
];
1884 * Return true if the given wrap mode requires the border color to exist.
1886 * (We can skip uploading it if the sampler isn't going to use it.)
1889 wrap_mode_needs_border_color(unsigned wrap_mode
)
1891 return wrap_mode
== TCM_CLAMP_BORDER
|| wrap_mode
== TCM_HALF_BORDER
;
1895 * Gallium CSO for sampler state.
1897 struct iris_sampler_state
{
1898 union pipe_color_union border_color
;
1899 bool needs_border_color
;
1901 uint32_t sampler_state
[GENX(SAMPLER_STATE_length
)];
1905 * The pipe->create_sampler_state() driver hook.
1907 * We fill out SAMPLER_STATE (except for the border color pointer), and
1908 * store that on the CPU. It doesn't make sense to upload it to a GPU
1909 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1910 * all bound sampler states to be in contiguous memor.
1913 iris_create_sampler_state(struct pipe_context
*ctx
,
1914 const struct pipe_sampler_state
*state
)
1916 struct iris_sampler_state
*cso
= CALLOC_STRUCT(iris_sampler_state
);
1921 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST
== MAPFILTER_NEAREST
);
1922 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR
== MAPFILTER_LINEAR
);
1924 unsigned wrap_s
= translate_wrap(state
->wrap_s
);
1925 unsigned wrap_t
= translate_wrap(state
->wrap_t
);
1926 unsigned wrap_r
= translate_wrap(state
->wrap_r
);
1928 memcpy(&cso
->border_color
, &state
->border_color
, sizeof(cso
->border_color
));
1930 cso
->needs_border_color
= wrap_mode_needs_border_color(wrap_s
) ||
1931 wrap_mode_needs_border_color(wrap_t
) ||
1932 wrap_mode_needs_border_color(wrap_r
);
1934 float min_lod
= state
->min_lod
;
1935 unsigned mag_img_filter
= state
->mag_img_filter
;
1937 // XXX: explain this code ported from ilo...I don't get it at all...
1938 if (state
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
&&
1939 state
->min_lod
> 0.0f
) {
1941 mag_img_filter
= state
->min_img_filter
;
1944 iris_pack_state(GENX(SAMPLER_STATE
), cso
->sampler_state
, samp
) {
1945 samp
.TCXAddressControlMode
= wrap_s
;
1946 samp
.TCYAddressControlMode
= wrap_t
;
1947 samp
.TCZAddressControlMode
= wrap_r
;
1948 samp
.CubeSurfaceControlMode
= state
->seamless_cube_map
;
1949 samp
.NonnormalizedCoordinateEnable
= !state
->normalized_coords
;
1950 samp
.MinModeFilter
= state
->min_img_filter
;
1951 samp
.MagModeFilter
= mag_img_filter
;
1952 samp
.MipModeFilter
= translate_mip_filter(state
->min_mip_filter
);
1953 samp
.MaximumAnisotropy
= RATIO21
;
1955 if (state
->max_anisotropy
>= 2) {
1956 if (state
->min_img_filter
== PIPE_TEX_FILTER_LINEAR
) {
1957 samp
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
1958 samp
.AnisotropicAlgorithm
= EWAApproximation
;
1961 if (state
->mag_img_filter
== PIPE_TEX_FILTER_LINEAR
)
1962 samp
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
1964 samp
.MaximumAnisotropy
=
1965 MIN2((state
->max_anisotropy
- 2) / 2, RATIO161
);
1968 /* Set address rounding bits if not using nearest filtering. */
1969 if (state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
) {
1970 samp
.UAddressMinFilterRoundingEnable
= true;
1971 samp
.VAddressMinFilterRoundingEnable
= true;
1972 samp
.RAddressMinFilterRoundingEnable
= true;
1975 if (state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
) {
1976 samp
.UAddressMagFilterRoundingEnable
= true;
1977 samp
.VAddressMagFilterRoundingEnable
= true;
1978 samp
.RAddressMagFilterRoundingEnable
= true;
1981 if (state
->compare_mode
== PIPE_TEX_COMPARE_R_TO_TEXTURE
)
1982 samp
.ShadowFunction
= translate_shadow_func(state
->compare_func
);
1984 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
1986 samp
.LODPreClampMode
= CLAMP_MODE_OGL
;
1987 samp
.MinLOD
= CLAMP(min_lod
, 0, hw_max_lod
);
1988 samp
.MaxLOD
= CLAMP(state
->max_lod
, 0, hw_max_lod
);
1989 samp
.TextureLODBias
= CLAMP(state
->lod_bias
, -16, 15);
1991 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1998 * The pipe->bind_sampler_states() driver hook.
2001 iris_bind_sampler_states(struct pipe_context
*ctx
,
2002 enum pipe_shader_type p_stage
,
2003 unsigned start
, unsigned count
,
2006 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2007 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2008 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2010 assert(start
+ count
<= IRIS_MAX_TEXTURE_SAMPLERS
);
2014 for (int i
= 0; i
< count
; i
++) {
2015 if (shs
->samplers
[start
+ i
] != states
[i
]) {
2016 shs
->samplers
[start
+ i
] = states
[i
];
2022 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS
<< stage
;
2026 * Upload the sampler states into a contiguous area of GPU memory, for
2027 * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2029 * Also fill out the border color state pointers.
2032 iris_upload_sampler_states(struct iris_context
*ice
, gl_shader_stage stage
)
2034 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2035 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
2037 /* We assume gallium frontends will call pipe->bind_sampler_states()
2038 * if the program's number of textures changes.
2040 unsigned count
= info
? util_last_bit(info
->textures_used
) : 0;
2045 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2046 * in the dynamic state memory zone, so we can point to it via the
2047 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2049 unsigned size
= count
* 4 * GENX(SAMPLER_STATE_length
);
2051 upload_state(ice
->state
.dynamic_uploader
, &shs
->sampler_table
, size
, 32);
2055 struct pipe_resource
*res
= shs
->sampler_table
.res
;
2056 struct iris_bo
*bo
= iris_resource_bo(res
);
2058 iris_record_state_size(ice
->state
.sizes
,
2059 bo
->gtt_offset
+ shs
->sampler_table
.offset
, size
);
2061 shs
->sampler_table
.offset
+= iris_bo_offset_from_base_address(bo
);
2063 /* Make sure all land in the same BO */
2064 iris_border_color_pool_reserve(ice
, IRIS_MAX_TEXTURE_SAMPLERS
);
2066 ice
->state
.need_border_colors
&= ~(1 << stage
);
2068 for (int i
= 0; i
< count
; i
++) {
2069 struct iris_sampler_state
*state
= shs
->samplers
[i
];
2070 struct iris_sampler_view
*tex
= shs
->textures
[i
];
2073 memset(map
, 0, 4 * GENX(SAMPLER_STATE_length
));
2074 } else if (!state
->needs_border_color
) {
2075 memcpy(map
, state
->sampler_state
, 4 * GENX(SAMPLER_STATE_length
));
2077 ice
->state
.need_border_colors
|= 1 << stage
;
2079 /* We may need to swizzle the border color for format faking.
2080 * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2081 * This means we need to move the border color's A channel into
2082 * the R or G channels so that those read swizzles will move it
2085 union pipe_color_union
*color
= &state
->border_color
;
2086 union pipe_color_union tmp
;
2088 enum pipe_format internal_format
= tex
->res
->internal_format
;
2090 if (util_format_is_alpha(internal_format
)) {
2091 unsigned char swz
[4] = {
2092 PIPE_SWIZZLE_W
, PIPE_SWIZZLE_0
,
2093 PIPE_SWIZZLE_0
, PIPE_SWIZZLE_0
2095 util_format_apply_color_swizzle(&tmp
, color
, swz
, true);
2097 } else if (util_format_is_luminance_alpha(internal_format
) &&
2098 internal_format
!= PIPE_FORMAT_L8A8_SRGB
) {
2099 unsigned char swz
[4] = {
2100 PIPE_SWIZZLE_X
, PIPE_SWIZZLE_W
,
2101 PIPE_SWIZZLE_0
, PIPE_SWIZZLE_0
2103 util_format_apply_color_swizzle(&tmp
, color
, swz
, true);
2108 /* Stream out the border color and merge the pointer. */
2109 uint32_t offset
= iris_upload_border_color(ice
, color
);
2111 uint32_t dynamic
[GENX(SAMPLER_STATE_length
)];
2112 iris_pack_state(GENX(SAMPLER_STATE
), dynamic
, dyns
) {
2113 dyns
.BorderColorPointer
= offset
;
2116 for (uint32_t j
= 0; j
< GENX(SAMPLER_STATE_length
); j
++)
2117 map
[j
] = state
->sampler_state
[j
] | dynamic
[j
];
2120 map
+= GENX(SAMPLER_STATE_length
);
2124 static enum isl_channel_select
2125 fmt_swizzle(const struct iris_format_info
*fmt
, enum pipe_swizzle swz
)
2128 case PIPE_SWIZZLE_X
: return fmt
->swizzle
.r
;
2129 case PIPE_SWIZZLE_Y
: return fmt
->swizzle
.g
;
2130 case PIPE_SWIZZLE_Z
: return fmt
->swizzle
.b
;
2131 case PIPE_SWIZZLE_W
: return fmt
->swizzle
.a
;
2132 case PIPE_SWIZZLE_1
: return SCS_ONE
;
2133 case PIPE_SWIZZLE_0
: return SCS_ZERO
;
2134 default: unreachable("invalid swizzle");
2139 fill_buffer_surface_state(struct isl_device
*isl_dev
,
2140 struct iris_resource
*res
,
2142 enum isl_format format
,
2143 struct isl_swizzle swizzle
,
2147 const struct isl_format_layout
*fmtl
= isl_format_get_layout(format
);
2148 const unsigned cpp
= format
== ISL_FORMAT_RAW
? 1 : fmtl
->bpb
/ 8;
2150 /* The ARB_texture_buffer_specification says:
2152 * "The number of texels in the buffer texture's texel array is given by
2154 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2156 * where <buffer_size> is the size of the buffer object, in basic
2157 * machine units and <components> and <base_type> are the element count
2158 * and base data type for elements, as specified in Table X.1. The
2159 * number of texels in the texel array is then clamped to the
2160 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2162 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2163 * so that when ISL divides by stride to obtain the number of texels, that
2164 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2166 unsigned final_size
=
2167 MIN3(size
, res
->bo
->size
- res
->offset
- offset
,
2168 IRIS_MAX_TEXTURE_BUFFER_SIZE
* cpp
);
2170 isl_buffer_fill_state(isl_dev
, map
,
2171 .address
= res
->bo
->gtt_offset
+ res
->offset
+ offset
,
2172 .size_B
= final_size
,
2176 .mocs
= iris_mocs(res
->bo
, isl_dev
));
2179 #define SURFACE_STATE_ALIGNMENT 64
2182 * Allocate several contiguous SURFACE_STATE structures, one for each
2183 * supported auxiliary surface mode. This only allocates the CPU-side
2184 * copy, they will need to be uploaded later after they're filled in.
2187 alloc_surface_states(struct iris_surface_state
*surf_state
,
2188 unsigned aux_usages
)
2190 const unsigned surf_size
= 4 * GENX(RENDER_SURFACE_STATE_length
);
2192 /* If this changes, update this to explicitly align pointers */
2193 STATIC_ASSERT(surf_size
== SURFACE_STATE_ALIGNMENT
);
2195 assert(aux_usages
!= 0);
2197 /* In case we're re-allocating them... */
2198 free(surf_state
->cpu
);
2200 surf_state
->num_states
= util_bitcount(aux_usages
);
2201 surf_state
->cpu
= calloc(surf_state
->num_states
, surf_size
);
2202 surf_state
->ref
.offset
= 0;
2203 pipe_resource_reference(&surf_state
->ref
.res
, NULL
);
2205 assert(surf_state
->cpu
);
2209 * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2212 upload_surface_states(struct u_upload_mgr
*mgr
,
2213 struct iris_surface_state
*surf_state
)
2215 const unsigned surf_size
= 4 * GENX(RENDER_SURFACE_STATE_length
);
2216 const unsigned bytes
= surf_state
->num_states
* surf_size
;
2219 upload_state(mgr
, &surf_state
->ref
, bytes
, SURFACE_STATE_ALIGNMENT
);
2221 surf_state
->ref
.offset
+=
2222 iris_bo_offset_from_base_address(iris_resource_bo(surf_state
->ref
.res
));
2225 memcpy(map
, surf_state
->cpu
, bytes
);
2229 * Update resource addresses in a set of SURFACE_STATE descriptors,
2230 * and re-upload them if necessary.
2233 update_surface_state_addrs(struct u_upload_mgr
*mgr
,
2234 struct iris_surface_state
*surf_state
,
2237 if (surf_state
->bo_address
== bo
->gtt_offset
)
2240 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start
) % 64 == 0);
2241 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits
) == 64);
2243 uint64_t *ss_addr
= (uint64_t *) &surf_state
->cpu
[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start
) / 32];
2245 /* First, update the CPU copies. We assume no other fields exist in
2246 * the QWord containing Surface Base Address.
2248 for (unsigned i
= 0; i
< surf_state
->num_states
; i
++) {
2249 *ss_addr
= *ss_addr
- surf_state
->bo_address
+ bo
->gtt_offset
;
2250 ss_addr
= ((void *) ss_addr
) + SURFACE_STATE_ALIGNMENT
;
2253 /* Next, upload the updated copies to a GPU buffer. */
2254 upload_surface_states(mgr
, surf_state
);
2256 surf_state
->bo_address
= bo
->gtt_offset
;
2263 * Return an ISL surface for use with non-coherent render target reads.
2265 * In a few complex cases, we can't use the SURFACE_STATE for normal render
2266 * target writes. We need to make a separate one for sampling which refers
2267 * to the single slice of the texture being read.
2270 get_rt_read_isl_surf(const struct gen_device_info
*devinfo
,
2271 struct iris_resource
*res
,
2272 enum pipe_texture_target target
,
2273 struct isl_view
*view
,
2274 uint32_t *offset_to_tile
,
2275 uint32_t *tile_x_sa
,
2276 uint32_t *tile_y_sa
,
2277 struct isl_surf
*surf
)
2281 const enum isl_dim_layout dim_layout
=
2282 iris_get_isl_dim_layout(devinfo
, res
->surf
.tiling
, target
);
2284 surf
->dim
= target_to_isl_surf_dim(target
);
2286 if (surf
->dim_layout
== dim_layout
)
2289 /* The layout of the specified texture target is not compatible with the
2290 * actual layout of the miptree structure in memory -- You're entering
2291 * dangerous territory, this can only possibly work if you only intended
2292 * to access a single level and slice of the texture, and the hardware
2293 * supports the tile offset feature in order to allow non-tile-aligned
2294 * base offsets, since we'll have to point the hardware to the first
2295 * texel of the level instead of relying on the usual base level/layer
2298 assert(view
->levels
== 1 && view
->array_len
== 1);
2299 assert(*tile_x_sa
== 0 && *tile_y_sa
== 0);
2301 *offset_to_tile
= iris_resource_get_tile_offsets(res
, view
->base_level
,
2302 view
->base_array_layer
,
2303 tile_x_sa
, tile_y_sa
);
2304 const unsigned l
= view
->base_level
;
2306 surf
->logical_level0_px
.width
= minify(surf
->logical_level0_px
.width
, l
);
2307 surf
->logical_level0_px
.height
= surf
->dim
<= ISL_SURF_DIM_1D
? 1 :
2308 minify(surf
->logical_level0_px
.height
, l
);
2309 surf
->logical_level0_px
.depth
= surf
->dim
<= ISL_SURF_DIM_2D
? 1 :
2310 minify(surf
->logical_level0_px
.depth
, l
);
2312 surf
->logical_level0_px
.array_len
= 1;
2314 surf
->dim_layout
= dim_layout
;
2316 view
->base_level
= 0;
2317 view
->base_array_layer
= 0;
2322 fill_surface_state(struct isl_device
*isl_dev
,
2324 struct iris_resource
*res
,
2325 struct isl_surf
*surf
,
2326 struct isl_view
*view
,
2328 uint32_t extra_main_offset
,
2332 struct isl_surf_fill_state_info f
= {
2335 .mocs
= iris_mocs(res
->bo
, isl_dev
),
2336 .address
= res
->bo
->gtt_offset
+ res
->offset
+ extra_main_offset
,
2337 .x_offset_sa
= tile_x_sa
,
2338 .y_offset_sa
= tile_y_sa
,
2341 assert(!iris_resource_unfinished_aux_import(res
));
2343 if (aux_usage
!= ISL_AUX_USAGE_NONE
) {
2344 f
.aux_surf
= &res
->aux
.surf
;
2345 f
.aux_usage
= aux_usage
;
2346 f
.aux_address
= res
->aux
.bo
->gtt_offset
+ res
->aux
.offset
;
2348 struct iris_bo
*clear_bo
= NULL
;
2349 uint64_t clear_offset
= 0;
2351 iris_resource_get_clear_color(res
, &clear_bo
, &clear_offset
);
2353 f
.clear_address
= clear_bo
->gtt_offset
+ clear_offset
;
2354 f
.use_clear_address
= isl_dev
->info
->gen
> 9;
2358 isl_surf_fill_state_s(isl_dev
, map
, &f
);
2362 * The pipe->create_sampler_view() driver hook.
2364 static struct pipe_sampler_view
*
2365 iris_create_sampler_view(struct pipe_context
*ctx
,
2366 struct pipe_resource
*tex
,
2367 const struct pipe_sampler_view
*tmpl
)
2369 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2370 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2371 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2372 struct iris_sampler_view
*isv
= calloc(1, sizeof(struct iris_sampler_view
));
2377 /* initialize base object */
2379 isv
->base
.context
= ctx
;
2380 isv
->base
.texture
= NULL
;
2381 pipe_reference_init(&isv
->base
.reference
, 1);
2382 pipe_resource_reference(&isv
->base
.texture
, tex
);
2384 if (util_format_is_depth_or_stencil(tmpl
->format
)) {
2385 struct iris_resource
*zres
, *sres
;
2386 const struct util_format_description
*desc
=
2387 util_format_description(tmpl
->format
);
2389 iris_get_depth_stencil_resources(tex
, &zres
, &sres
);
2391 tex
= util_format_has_depth(desc
) ? &zres
->base
: &sres
->base
;
2394 isv
->res
= (struct iris_resource
*) tex
;
2396 alloc_surface_states(&isv
->surface_state
, isv
->res
->aux
.sampler_usages
);
2398 isv
->surface_state
.bo_address
= isv
->res
->bo
->gtt_offset
;
2400 isl_surf_usage_flags_t usage
= ISL_SURF_USAGE_TEXTURE_BIT
;
2402 if (isv
->base
.target
== PIPE_TEXTURE_CUBE
||
2403 isv
->base
.target
== PIPE_TEXTURE_CUBE_ARRAY
)
2404 usage
|= ISL_SURF_USAGE_CUBE_BIT
;
2406 const struct iris_format_info fmt
=
2407 iris_format_for_usage(devinfo
, tmpl
->format
, usage
);
2409 isv
->clear_color
= isv
->res
->aux
.clear_color
;
2411 isv
->view
= (struct isl_view
) {
2413 .swizzle
= (struct isl_swizzle
) {
2414 .r
= fmt_swizzle(&fmt
, tmpl
->swizzle_r
),
2415 .g
= fmt_swizzle(&fmt
, tmpl
->swizzle_g
),
2416 .b
= fmt_swizzle(&fmt
, tmpl
->swizzle_b
),
2417 .a
= fmt_swizzle(&fmt
, tmpl
->swizzle_a
),
2422 void *map
= isv
->surface_state
.cpu
;
2424 /* Fill out SURFACE_STATE for this view. */
2425 if (tmpl
->target
!= PIPE_BUFFER
) {
2426 isv
->view
.base_level
= tmpl
->u
.tex
.first_level
;
2427 isv
->view
.levels
= tmpl
->u
.tex
.last_level
- tmpl
->u
.tex
.first_level
+ 1;
2428 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2429 isv
->view
.base_array_layer
= tmpl
->u
.tex
.first_layer
;
2430 isv
->view
.array_len
=
2431 tmpl
->u
.tex
.last_layer
- tmpl
->u
.tex
.first_layer
+ 1;
2433 if (iris_resource_unfinished_aux_import(isv
->res
))
2434 iris_resource_finish_aux_import(&screen
->base
, isv
->res
);
2436 unsigned aux_modes
= isv
->res
->aux
.sampler_usages
;
2438 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
2440 /* If we have a multisampled depth buffer, do not create a sampler
2441 * surface state with HiZ.
2443 fill_surface_state(&screen
->isl_dev
, map
, isv
->res
, &isv
->res
->surf
,
2444 &isv
->view
, aux_usage
, 0, 0, 0);
2446 map
+= SURFACE_STATE_ALIGNMENT
;
2449 fill_buffer_surface_state(&screen
->isl_dev
, isv
->res
, map
,
2450 isv
->view
.format
, isv
->view
.swizzle
,
2451 tmpl
->u
.buf
.offset
, tmpl
->u
.buf
.size
);
2454 upload_surface_states(ice
->state
.surface_uploader
, &isv
->surface_state
);
2460 iris_sampler_view_destroy(struct pipe_context
*ctx
,
2461 struct pipe_sampler_view
*state
)
2463 struct iris_sampler_view
*isv
= (void *) state
;
2464 pipe_resource_reference(&state
->texture
, NULL
);
2465 pipe_resource_reference(&isv
->surface_state
.ref
.res
, NULL
);
2466 free(isv
->surface_state
.cpu
);
2471 * The pipe->create_surface() driver hook.
2473 * In Gallium nomenclature, "surfaces" are a view of a resource that
2474 * can be bound as a render target or depth/stencil buffer.
2476 static struct pipe_surface
*
2477 iris_create_surface(struct pipe_context
*ctx
,
2478 struct pipe_resource
*tex
,
2479 const struct pipe_surface
*tmpl
)
2481 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2482 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2483 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2485 isl_surf_usage_flags_t usage
= 0;
2487 usage
= ISL_SURF_USAGE_STORAGE_BIT
;
2488 else if (util_format_is_depth_or_stencil(tmpl
->format
))
2489 usage
= ISL_SURF_USAGE_DEPTH_BIT
;
2491 usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
;
2493 const struct iris_format_info fmt
=
2494 iris_format_for_usage(devinfo
, tmpl
->format
, usage
);
2496 if ((usage
& ISL_SURF_USAGE_RENDER_TARGET_BIT
) &&
2497 !isl_format_supports_rendering(devinfo
, fmt
.fmt
)) {
2498 /* Framebuffer validation will reject this invalid case, but it
2499 * hasn't had the opportunity yet. In the meantime, we need to
2500 * avoid hitting ISL asserts about unsupported formats below.
2505 struct iris_surface
*surf
= calloc(1, sizeof(struct iris_surface
));
2506 struct pipe_surface
*psurf
= &surf
->base
;
2507 struct iris_resource
*res
= (struct iris_resource
*) tex
;
2512 pipe_reference_init(&psurf
->reference
, 1);
2513 pipe_resource_reference(&psurf
->texture
, tex
);
2514 psurf
->context
= ctx
;
2515 psurf
->format
= tmpl
->format
;
2516 psurf
->width
= tex
->width0
;
2517 psurf
->height
= tex
->height0
;
2518 psurf
->texture
= tex
;
2519 psurf
->u
.tex
.first_layer
= tmpl
->u
.tex
.first_layer
;
2520 psurf
->u
.tex
.last_layer
= tmpl
->u
.tex
.last_layer
;
2521 psurf
->u
.tex
.level
= tmpl
->u
.tex
.level
;
2523 uint32_t array_len
= tmpl
->u
.tex
.last_layer
- tmpl
->u
.tex
.first_layer
+ 1;
2525 struct isl_view
*view
= &surf
->view
;
2526 *view
= (struct isl_view
) {
2528 .base_level
= tmpl
->u
.tex
.level
,
2530 .base_array_layer
= tmpl
->u
.tex
.first_layer
,
2531 .array_len
= array_len
,
2532 .swizzle
= ISL_SWIZZLE_IDENTITY
,
2537 enum pipe_texture_target target
= (tex
->target
== PIPE_TEXTURE_3D
&&
2538 array_len
== 1) ? PIPE_TEXTURE_2D
:
2539 tex
->target
== PIPE_TEXTURE_1D_ARRAY
?
2540 PIPE_TEXTURE_2D_ARRAY
: tex
->target
;
2542 struct isl_view
*read_view
= &surf
->read_view
;
2543 *read_view
= (struct isl_view
) {
2545 .base_level
= tmpl
->u
.tex
.level
,
2547 .base_array_layer
= tmpl
->u
.tex
.first_layer
,
2548 .array_len
= array_len
,
2549 .swizzle
= ISL_SWIZZLE_IDENTITY
,
2550 .usage
= ISL_SURF_USAGE_TEXTURE_BIT
,
2554 surf
->clear_color
= res
->aux
.clear_color
;
2556 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2557 if (res
->surf
.usage
& (ISL_SURF_USAGE_DEPTH_BIT
|
2558 ISL_SURF_USAGE_STENCIL_BIT
))
2562 alloc_surface_states(&surf
->surface_state
, res
->aux
.possible_usages
);
2563 surf
->surface_state
.bo_address
= res
->bo
->gtt_offset
;
2566 alloc_surface_states(&surf
->surface_state_read
, res
->aux
.possible_usages
);
2567 surf
->surface_state_read
.bo_address
= res
->bo
->gtt_offset
;
2570 if (!isl_format_is_compressed(res
->surf
.format
)) {
2571 if (iris_resource_unfinished_aux_import(res
))
2572 iris_resource_finish_aux_import(&screen
->base
, res
);
2574 void *map
= surf
->surface_state
.cpu
;
2575 UNUSED
void *map_read
= surf
->surface_state_read
.cpu
;
2577 /* This is a normal surface. Fill out a SURFACE_STATE for each possible
2578 * auxiliary surface mode and return the pipe_surface.
2580 unsigned aux_modes
= res
->aux
.possible_usages
;
2582 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
2583 fill_surface_state(&screen
->isl_dev
, map
, res
, &res
->surf
,
2584 view
, aux_usage
, 0, 0, 0);
2585 map
+= SURFACE_STATE_ALIGNMENT
;
2588 struct isl_surf surf
;
2589 uint32_t offset_to_tile
= 0, tile_x_sa
= 0, tile_y_sa
= 0;
2590 get_rt_read_isl_surf(devinfo
, res
, target
, read_view
,
2591 &offset_to_tile
, &tile_x_sa
, &tile_y_sa
, &surf
);
2592 fill_surface_state(&screen
->isl_dev
, map_read
, res
, &surf
, read_view
,
2593 aux_usage
, offset_to_tile
, tile_x_sa
, tile_y_sa
);
2594 map_read
+= SURFACE_STATE_ALIGNMENT
;
2598 upload_surface_states(ice
->state
.surface_uploader
, &surf
->surface_state
);
2601 upload_surface_states(ice
->state
.surface_uploader
,
2602 &surf
->surface_state_read
);
2608 /* The resource has a compressed format, which is not renderable, but we
2609 * have a renderable view format. We must be attempting to upload blocks
2610 * of compressed data via an uncompressed view.
2612 * In this case, we can assume there are no auxiliary buffers, a single
2613 * miplevel, and that the resource is single-sampled. Gallium may try
2614 * and create an uncompressed view with multiple layers, however.
2616 assert(!isl_format_is_compressed(fmt
.fmt
));
2617 assert(res
->aux
.possible_usages
== 1 << ISL_AUX_USAGE_NONE
);
2618 assert(res
->surf
.samples
== 1);
2619 assert(view
->levels
== 1);
2621 struct isl_surf isl_surf
;
2622 uint32_t offset_B
= 0, tile_x_sa
= 0, tile_y_sa
= 0;
2624 if (view
->base_level
> 0) {
2625 /* We can't rely on the hardware's miplevel selection with such
2626 * a substantial lie about the format, so we select a single image
2627 * using the Tile X/Y Offset fields. In this case, we can't handle
2628 * multiple array slices.
2630 * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2631 * hard-coded to align to exactly the block size of the compressed
2632 * texture. This means that, when reinterpreted as a non-compressed
2633 * texture, the tile offsets may be anything and we can't rely on
2636 * Return NULL to force gallium frontends to take fallback paths.
2638 if (view
->array_len
> 1 || GEN_GEN
== 8)
2641 const bool is_3d
= res
->surf
.dim
== ISL_SURF_DIM_3D
;
2642 isl_surf_get_image_surf(&screen
->isl_dev
, &res
->surf
,
2644 is_3d
? 0 : view
->base_array_layer
,
2645 is_3d
? view
->base_array_layer
: 0,
2647 &offset_B
, &tile_x_sa
, &tile_y_sa
);
2649 /* We use address and tile offsets to access a single level/layer
2650 * as a subimage, so reset level/layer so it doesn't offset again.
2652 view
->base_array_layer
= 0;
2653 view
->base_level
= 0;
2655 /* Level 0 doesn't require tile offsets, and the hardware can find
2656 * array slices using QPitch even with the format override, so we
2657 * can allow layers in this case. Copy the original ISL surface.
2659 memcpy(&isl_surf
, &res
->surf
, sizeof(isl_surf
));
2662 /* Scale down the image dimensions by the block size. */
2663 const struct isl_format_layout
*fmtl
=
2664 isl_format_get_layout(res
->surf
.format
);
2665 isl_surf
.format
= fmt
.fmt
;
2666 isl_surf
.logical_level0_px
= isl_surf_get_logical_level0_el(&isl_surf
);
2667 isl_surf
.phys_level0_sa
= isl_surf_get_phys_level0_el(&isl_surf
);
2668 tile_x_sa
/= fmtl
->bw
;
2669 tile_y_sa
/= fmtl
->bh
;
2671 psurf
->width
= isl_surf
.logical_level0_px
.width
;
2672 psurf
->height
= isl_surf
.logical_level0_px
.height
;
2674 struct isl_surf_fill_state_info f
= {
2677 .mocs
= iris_mocs(res
->bo
, &screen
->isl_dev
),
2678 .address
= res
->bo
->gtt_offset
+ offset_B
,
2679 .x_offset_sa
= tile_x_sa
,
2680 .y_offset_sa
= tile_y_sa
,
2683 isl_surf_fill_state_s(&screen
->isl_dev
, surf
->surface_state
.cpu
, &f
);
2685 upload_surface_states(ice
->state
.surface_uploader
, &surf
->surface_state
);
2692 fill_default_image_param(struct brw_image_param
*param
)
2694 memset(param
, 0, sizeof(*param
));
2695 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2696 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2697 * detailed explanation of these parameters.
2699 param
->swizzling
[0] = 0xff;
2700 param
->swizzling
[1] = 0xff;
2704 fill_buffer_image_param(struct brw_image_param
*param
,
2705 enum pipe_format pfmt
,
2708 const unsigned cpp
= util_format_get_blocksize(pfmt
);
2710 fill_default_image_param(param
);
2711 param
->size
[0] = size
/ cpp
;
2712 param
->stride
[0] = cpp
;
2715 #define isl_surf_fill_image_param(x, ...)
2716 #define fill_default_image_param(x, ...)
2717 #define fill_buffer_image_param(x, ...)
2721 * The pipe->set_shader_images() driver hook.
2724 iris_set_shader_images(struct pipe_context
*ctx
,
2725 enum pipe_shader_type p_stage
,
2726 unsigned start_slot
, unsigned count
,
2727 const struct pipe_image_view
*p_images
)
2729 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2730 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2731 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2732 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2734 struct iris_genx_state
*genx
= ice
->state
.genx
;
2735 struct brw_image_param
*image_params
= genx
->shaders
[stage
].image_param
;
2738 shs
->bound_image_views
&= ~u_bit_consecutive(start_slot
, count
);
2740 for (unsigned i
= 0; i
< count
; i
++) {
2741 struct iris_image_view
*iv
= &shs
->image
[start_slot
+ i
];
2743 if (p_images
&& p_images
[i
].resource
) {
2744 const struct pipe_image_view
*img
= &p_images
[i
];
2745 struct iris_resource
*res
= (void *) img
->resource
;
2747 util_copy_image_view(&iv
->base
, img
);
2749 shs
->bound_image_views
|= 1 << (start_slot
+ i
);
2751 res
->bind_history
|= PIPE_BIND_SHADER_IMAGE
;
2752 res
->bind_stages
|= 1 << stage
;
2754 enum isl_format isl_fmt
= iris_image_view_get_format(ice
, img
);
2756 /* Render compression with images supported on gen12+ only. */
2757 unsigned aux_usages
= GEN_GEN
>= 12 ? res
->aux
.possible_usages
:
2758 1 << ISL_AUX_USAGE_NONE
;
2760 alloc_surface_states(&iv
->surface_state
, aux_usages
);
2761 iv
->surface_state
.bo_address
= res
->bo
->gtt_offset
;
2763 void *map
= iv
->surface_state
.cpu
;
2765 if (res
->base
.target
!= PIPE_BUFFER
) {
2766 struct isl_view view
= {
2768 .base_level
= img
->u
.tex
.level
,
2770 .base_array_layer
= img
->u
.tex
.first_layer
,
2771 .array_len
= img
->u
.tex
.last_layer
- img
->u
.tex
.first_layer
+ 1,
2772 .swizzle
= ISL_SWIZZLE_IDENTITY
,
2773 .usage
= ISL_SURF_USAGE_STORAGE_BIT
,
2776 /* If using untyped fallback. */
2777 if (isl_fmt
== ISL_FORMAT_RAW
) {
2778 fill_buffer_surface_state(&screen
->isl_dev
, res
, map
,
2779 isl_fmt
, ISL_SWIZZLE_IDENTITY
,
2782 unsigned aux_modes
= aux_usages
;
2784 enum isl_aux_usage usage
= u_bit_scan(&aux_modes
);
2786 fill_surface_state(&screen
->isl_dev
, map
, res
, &res
->surf
,
2787 &view
, usage
, 0, 0, 0);
2789 map
+= SURFACE_STATE_ALIGNMENT
;
2793 isl_surf_fill_image_param(&screen
->isl_dev
,
2794 &image_params
[start_slot
+ i
],
2797 util_range_add(&res
->base
, &res
->valid_buffer_range
, img
->u
.buf
.offset
,
2798 img
->u
.buf
.offset
+ img
->u
.buf
.size
);
2800 fill_buffer_surface_state(&screen
->isl_dev
, res
, map
,
2801 isl_fmt
, ISL_SWIZZLE_IDENTITY
,
2802 img
->u
.buf
.offset
, img
->u
.buf
.size
);
2803 fill_buffer_image_param(&image_params
[start_slot
+ i
],
2804 img
->format
, img
->u
.buf
.size
);
2807 upload_surface_states(ice
->state
.surface_uploader
, &iv
->surface_state
);
2809 pipe_resource_reference(&iv
->base
.resource
, NULL
);
2810 pipe_resource_reference(&iv
->surface_state
.ref
.res
, NULL
);
2811 fill_default_image_param(&image_params
[start_slot
+ i
]);
2815 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
;
2817 stage
== MESA_SHADER_COMPUTE
? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2818 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
2820 /* Broadwell also needs brw_image_params re-uploaded */
2822 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
;
2823 shs
->sysvals_need_upload
= true;
2829 * The pipe->set_sampler_views() driver hook.
2832 iris_set_sampler_views(struct pipe_context
*ctx
,
2833 enum pipe_shader_type p_stage
,
2834 unsigned start
, unsigned count
,
2835 struct pipe_sampler_view
**views
)
2837 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2838 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2839 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2841 shs
->bound_sampler_views
&= ~u_bit_consecutive(start
, count
);
2843 for (unsigned i
= 0; i
< count
; i
++) {
2844 struct pipe_sampler_view
*pview
= views
? views
[i
] : NULL
;
2845 pipe_sampler_view_reference((struct pipe_sampler_view
**)
2846 &shs
->textures
[start
+ i
], pview
);
2847 struct iris_sampler_view
*view
= (void *) pview
;
2849 view
->res
->bind_history
|= PIPE_BIND_SAMPLER_VIEW
;
2850 view
->res
->bind_stages
|= 1 << stage
;
2852 shs
->bound_sampler_views
|= 1 << (start
+ i
);
2854 update_surface_state_addrs(ice
->state
.surface_uploader
,
2855 &view
->surface_state
, view
->res
->bo
);
2859 ice
->state
.stage_dirty
|= (IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
);
2861 stage
== MESA_SHADER_COMPUTE
? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2862 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
2866 iris_set_compute_resources(struct pipe_context
*ctx
,
2867 unsigned start
, unsigned count
,
2868 struct pipe_surface
**resources
)
2874 iris_set_global_binding(struct pipe_context
*ctx
,
2875 unsigned start_slot
, unsigned count
,
2876 struct pipe_resource
**resources
,
2879 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2881 assert(start_slot
+ count
<= IRIS_MAX_GLOBAL_BINDINGS
);
2882 for (unsigned i
= 0; i
< count
; i
++) {
2883 if (resources
&& resources
[i
]) {
2884 pipe_resource_reference(&ice
->state
.global_bindings
[start_slot
+ i
],
2886 struct iris_resource
*res
= (void *) resources
[i
];
2887 uint64_t addr
= res
->bo
->gtt_offset
;
2888 memcpy(handles
[i
], &addr
, sizeof(addr
));
2890 pipe_resource_reference(&ice
->state
.global_bindings
[start_slot
+ i
],
2895 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_CS
;
2899 * The pipe->set_tess_state() driver hook.
2902 iris_set_tess_state(struct pipe_context
*ctx
,
2903 const float default_outer_level
[4],
2904 const float default_inner_level
[2])
2906 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2907 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_TESS_CTRL
];
2909 memcpy(&ice
->state
.default_outer_level
[0], &default_outer_level
[0], 4 * sizeof(float));
2910 memcpy(&ice
->state
.default_inner_level
[0], &default_inner_level
[0], 2 * sizeof(float));
2912 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_TCS
;
2913 shs
->sysvals_need_upload
= true;
2917 iris_surface_destroy(struct pipe_context
*ctx
, struct pipe_surface
*p_surf
)
2919 struct iris_surface
*surf
= (void *) p_surf
;
2920 pipe_resource_reference(&p_surf
->texture
, NULL
);
2921 pipe_resource_reference(&surf
->surface_state
.ref
.res
, NULL
);
2922 pipe_resource_reference(&surf
->surface_state_read
.ref
.res
, NULL
);
2923 free(surf
->surface_state
.cpu
);
2928 iris_set_clip_state(struct pipe_context
*ctx
,
2929 const struct pipe_clip_state
*state
)
2931 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2932 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_VERTEX
];
2933 struct iris_shader_state
*gshs
= &ice
->state
.shaders
[MESA_SHADER_GEOMETRY
];
2934 struct iris_shader_state
*tshs
= &ice
->state
.shaders
[MESA_SHADER_TESS_EVAL
];
2936 memcpy(&ice
->state
.clip_planes
, state
, sizeof(*state
));
2938 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
|
2939 IRIS_STAGE_DIRTY_CONSTANTS_GS
|
2940 IRIS_STAGE_DIRTY_CONSTANTS_TES
;
2941 shs
->sysvals_need_upload
= true;
2942 gshs
->sysvals_need_upload
= true;
2943 tshs
->sysvals_need_upload
= true;
2947 * The pipe->set_polygon_stipple() driver hook.
2950 iris_set_polygon_stipple(struct pipe_context
*ctx
,
2951 const struct pipe_poly_stipple
*state
)
2953 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2954 memcpy(&ice
->state
.poly_stipple
, state
, sizeof(*state
));
2955 ice
->state
.dirty
|= IRIS_DIRTY_POLYGON_STIPPLE
;
2959 * The pipe->set_sample_mask() driver hook.
2962 iris_set_sample_mask(struct pipe_context
*ctx
, unsigned sample_mask
)
2964 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2966 /* We only support 16x MSAA, so we have 16 bits of sample maks.
2967 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2969 ice
->state
.sample_mask
= sample_mask
& 0xffff;
2970 ice
->state
.dirty
|= IRIS_DIRTY_SAMPLE_MASK
;
2974 * The pipe->set_scissor_states() driver hook.
2976 * This corresponds to our SCISSOR_RECT state structures. It's an
2977 * exact match, so we just store them, and memcpy them out later.
2980 iris_set_scissor_states(struct pipe_context
*ctx
,
2981 unsigned start_slot
,
2982 unsigned num_scissors
,
2983 const struct pipe_scissor_state
*rects
)
2985 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2987 for (unsigned i
= 0; i
< num_scissors
; i
++) {
2988 if (rects
[i
].minx
== rects
[i
].maxx
|| rects
[i
].miny
== rects
[i
].maxy
) {
2989 /* If the scissor was out of bounds and got clamped to 0 width/height
2990 * at the bounds, the subtraction of 1 from maximums could produce a
2991 * negative number and thus not clip anything. Instead, just provide
2992 * a min > max scissor inside the bounds, which produces the expected
2995 ice
->state
.scissors
[start_slot
+ i
] = (struct pipe_scissor_state
) {
2996 .minx
= 1, .maxx
= 0, .miny
= 1, .maxy
= 0,
2999 ice
->state
.scissors
[start_slot
+ i
] = (struct pipe_scissor_state
) {
3000 .minx
= rects
[i
].minx
, .miny
= rects
[i
].miny
,
3001 .maxx
= rects
[i
].maxx
- 1, .maxy
= rects
[i
].maxy
- 1,
3006 ice
->state
.dirty
|= IRIS_DIRTY_SCISSOR_RECT
;
3010 * The pipe->set_stencil_ref() driver hook.
3012 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
3015 iris_set_stencil_ref(struct pipe_context
*ctx
,
3016 const struct pipe_stencil_ref
*state
)
3018 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3019 memcpy(&ice
->state
.stencil_ref
, state
, sizeof(*state
));
3021 ice
->state
.dirty
|= IRIS_DIRTY_STENCIL_REF
;
3022 else if (GEN_GEN
>= 9)
3023 ice
->state
.dirty
|= IRIS_DIRTY_WM_DEPTH_STENCIL
;
3025 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
3029 viewport_extent(const struct pipe_viewport_state
*state
, int axis
, float sign
)
3031 return copysignf(state
->scale
[axis
], sign
) + state
->translate
[axis
];
3035 * The pipe->set_viewport_states() driver hook.
3037 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
3038 * the guardband yet, as we need the framebuffer dimensions, but we can
3039 * at least fill out the rest.
3042 iris_set_viewport_states(struct pipe_context
*ctx
,
3043 unsigned start_slot
,
3045 const struct pipe_viewport_state
*states
)
3047 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3049 memcpy(&ice
->state
.viewports
[start_slot
], states
, sizeof(*states
) * count
);
3051 ice
->state
.dirty
|= IRIS_DIRTY_SF_CL_VIEWPORT
;
3053 if (ice
->state
.cso_rast
&& (!ice
->state
.cso_rast
->depth_clip_near
||
3054 !ice
->state
.cso_rast
->depth_clip_far
))
3055 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
3059 * The pipe->set_framebuffer_state() driver hook.
3061 * Sets the current draw FBO, including color render targets, depth,
3062 * and stencil buffers.
3065 iris_set_framebuffer_state(struct pipe_context
*ctx
,
3066 const struct pipe_framebuffer_state
*state
)
3068 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3069 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3070 struct isl_device
*isl_dev
= &screen
->isl_dev
;
3071 struct pipe_framebuffer_state
*cso
= &ice
->state
.framebuffer
;
3072 struct iris_resource
*zres
;
3073 struct iris_resource
*stencil_res
;
3075 unsigned samples
= util_framebuffer_get_num_samples(state
);
3076 unsigned layers
= util_framebuffer_get_num_layers(state
);
3078 if (cso
->samples
!= samples
) {
3079 ice
->state
.dirty
|= IRIS_DIRTY_MULTISAMPLE
;
3081 /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3082 if (GEN_GEN
>= 9 && (cso
->samples
== 16 || samples
== 16))
3083 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_FS
;
3086 if (cso
->nr_cbufs
!= state
->nr_cbufs
) {
3087 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
3090 if ((cso
->layers
== 0) != (layers
== 0)) {
3091 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
3094 if (cso
->width
!= state
->width
|| cso
->height
!= state
->height
) {
3095 ice
->state
.dirty
|= IRIS_DIRTY_SF_CL_VIEWPORT
;
3098 if (cso
->zsbuf
|| state
->zsbuf
) {
3099 ice
->state
.dirty
|= IRIS_DIRTY_DEPTH_BUFFER
;
3102 util_copy_framebuffer_state(cso
, state
);
3103 cso
->samples
= samples
;
3104 cso
->layers
= layers
;
3106 struct iris_depth_buffer_state
*cso_z
= &ice
->state
.genx
->depth_buffer
;
3108 struct isl_view view
= {
3111 .base_array_layer
= 0,
3113 .swizzle
= ISL_SWIZZLE_IDENTITY
,
3116 struct isl_depth_stencil_hiz_emit_info info
= { .view
= &view
};
3119 iris_get_depth_stencil_resources(cso
->zsbuf
->texture
, &zres
,
3122 view
.base_level
= cso
->zsbuf
->u
.tex
.level
;
3123 view
.base_array_layer
= cso
->zsbuf
->u
.tex
.first_layer
;
3125 cso
->zsbuf
->u
.tex
.last_layer
- cso
->zsbuf
->u
.tex
.first_layer
+ 1;
3128 view
.usage
|= ISL_SURF_USAGE_DEPTH_BIT
;
3130 info
.depth_surf
= &zres
->surf
;
3131 info
.depth_address
= zres
->bo
->gtt_offset
+ zres
->offset
;
3132 info
.mocs
= iris_mocs(zres
->bo
, isl_dev
);
3134 view
.format
= zres
->surf
.format
;
3136 if (iris_resource_level_has_hiz(zres
, view
.base_level
)) {
3137 info
.hiz_usage
= zres
->aux
.usage
;
3138 info
.hiz_surf
= &zres
->aux
.surf
;
3139 info
.hiz_address
= zres
->aux
.bo
->gtt_offset
+ zres
->aux
.offset
;
3144 view
.usage
|= ISL_SURF_USAGE_STENCIL_BIT
;
3145 info
.stencil_aux_usage
= stencil_res
->aux
.usage
;
3146 info
.stencil_surf
= &stencil_res
->surf
;
3147 info
.stencil_address
= stencil_res
->bo
->gtt_offset
+ stencil_res
->offset
;
3149 view
.format
= stencil_res
->surf
.format
;
3150 info
.mocs
= iris_mocs(stencil_res
->bo
, isl_dev
);
3155 isl_emit_depth_stencil_hiz_s(isl_dev
, cso_z
->packets
, &info
);
3157 /* Make a null surface for unbound buffers */
3158 void *null_surf_map
=
3159 upload_state(ice
->state
.surface_uploader
, &ice
->state
.null_fb
,
3160 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
3161 isl_null_fill_state(&screen
->isl_dev
, null_surf_map
,
3162 isl_extent3d(MAX2(cso
->width
, 1),
3163 MAX2(cso
->height
, 1),
3164 cso
->layers
? cso
->layers
: 1));
3165 ice
->state
.null_fb
.offset
+=
3166 iris_bo_offset_from_base_address(iris_resource_bo(ice
->state
.null_fb
.res
));
3168 /* Render target change */
3169 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_FS
;
3171 ice
->state
.dirty
|= IRIS_DIRTY_RENDER_BUFFER
;
3173 ice
->state
.dirty
|= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES
;
3175 ice
->state
.stage_dirty
|=
3176 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_FRAMEBUFFER
];
3179 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
3183 * The pipe->set_constant_buffer() driver hook.
3185 * This uploads any constant data in user buffers, and references
3186 * any UBO resources containing constant data.
3189 iris_set_constant_buffer(struct pipe_context
*ctx
,
3190 enum pipe_shader_type p_stage
, unsigned index
,
3191 const struct pipe_constant_buffer
*input
)
3193 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3194 gl_shader_stage stage
= stage_from_pipe(p_stage
);
3195 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3196 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[index
];
3198 /* TODO: Only do this if the buffer changes? */
3199 pipe_resource_reference(&shs
->constbuf_surf_state
[index
].res
, NULL
);
3201 if (input
&& input
->buffer_size
&& (input
->buffer
|| input
->user_buffer
)) {
3202 shs
->bound_cbufs
|= 1u << index
;
3204 if (input
->user_buffer
) {
3206 pipe_resource_reference(&cbuf
->buffer
, NULL
);
3207 u_upload_alloc(ice
->ctx
.const_uploader
, 0, input
->buffer_size
, 64,
3208 &cbuf
->buffer_offset
, &cbuf
->buffer
, (void **) &map
);
3210 if (!cbuf
->buffer
) {
3211 /* Allocation was unsuccessful - just unbind */
3212 iris_set_constant_buffer(ctx
, p_stage
, index
, NULL
);
3217 memcpy(map
, input
->user_buffer
, input
->buffer_size
);
3218 } else if (input
->buffer
) {
3219 pipe_resource_reference(&cbuf
->buffer
, input
->buffer
);
3221 cbuf
->buffer_offset
= input
->buffer_offset
;
3225 MIN2(input
->buffer_size
,
3226 iris_resource_bo(cbuf
->buffer
)->size
- cbuf
->buffer_offset
);
3228 struct iris_resource
*res
= (void *) cbuf
->buffer
;
3229 res
->bind_history
|= PIPE_BIND_CONSTANT_BUFFER
;
3230 res
->bind_stages
|= 1 << stage
;
3232 shs
->bound_cbufs
&= ~(1u << index
);
3233 pipe_resource_reference(&cbuf
->buffer
, NULL
);
3236 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
;
3240 upload_sysvals(struct iris_context
*ice
,
3241 gl_shader_stage stage
,
3242 const struct pipe_grid_info
*grid
)
3244 UNUSED
struct iris_genx_state
*genx
= ice
->state
.genx
;
3245 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3247 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3248 if (!shader
|| (shader
->num_system_values
== 0 &&
3249 shader
->kernel_input_size
== 0))
3252 assert(shader
->num_cbufs
> 0);
3254 unsigned sysval_cbuf_index
= shader
->num_cbufs
- 1;
3255 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[sysval_cbuf_index
];
3256 unsigned system_values_start
=
3257 ALIGN(shader
->kernel_input_size
, sizeof(uint32_t));
3258 unsigned upload_size
= system_values_start
+
3259 shader
->num_system_values
* sizeof(uint32_t);
3262 assert(sysval_cbuf_index
< PIPE_MAX_CONSTANT_BUFFERS
);
3263 u_upload_alloc(ice
->ctx
.const_uploader
, 0, upload_size
, 64,
3264 &cbuf
->buffer_offset
, &cbuf
->buffer
, &map
);
3266 if (shader
->kernel_input_size
> 0)
3267 memcpy(map
, grid
->input
, shader
->kernel_input_size
);
3269 uint32_t *sysval_map
= map
+ system_values_start
;
3270 for (int i
= 0; i
< shader
->num_system_values
; i
++) {
3271 uint32_t sysval
= shader
->system_values
[i
];
3274 if (BRW_PARAM_DOMAIN(sysval
) == BRW_PARAM_DOMAIN_IMAGE
) {
3276 unsigned img
= BRW_PARAM_IMAGE_IDX(sysval
);
3277 unsigned offset
= BRW_PARAM_IMAGE_OFFSET(sysval
);
3278 struct brw_image_param
*param
=
3279 &genx
->shaders
[stage
].image_param
[img
];
3281 assert(offset
< sizeof(struct brw_image_param
));
3282 value
= ((uint32_t *) param
)[offset
];
3284 } else if (sysval
== BRW_PARAM_BUILTIN_ZERO
) {
3286 } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval
)) {
3287 int plane
= BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval
);
3288 int comp
= BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval
);
3289 value
= fui(ice
->state
.clip_planes
.ucp
[plane
][comp
]);
3290 } else if (sysval
== BRW_PARAM_BUILTIN_PATCH_VERTICES_IN
) {
3291 if (stage
== MESA_SHADER_TESS_CTRL
) {
3292 value
= ice
->state
.vertices_per_patch
;
3294 assert(stage
== MESA_SHADER_TESS_EVAL
);
3295 const struct shader_info
*tcs_info
=
3296 iris_get_shader_info(ice
, MESA_SHADER_TESS_CTRL
);
3298 value
= tcs_info
->tess
.tcs_vertices_out
;
3300 value
= ice
->state
.vertices_per_patch
;
3302 } else if (sysval
>= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
&&
3303 sysval
<= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W
) {
3304 unsigned i
= sysval
- BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
;
3305 value
= fui(ice
->state
.default_outer_level
[i
]);
3306 } else if (sysval
== BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X
) {
3307 value
= fui(ice
->state
.default_inner_level
[0]);
3308 } else if (sysval
== BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y
) {
3309 value
= fui(ice
->state
.default_inner_level
[1]);
3310 } else if (sysval
>= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X
&&
3311 sysval
<= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z
) {
3312 unsigned i
= sysval
- BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X
;
3313 value
= ice
->state
.last_block
[i
];
3315 assert(!"unhandled system value");
3318 *sysval_map
++ = value
;
3321 cbuf
->buffer_size
= upload_size
;
3322 iris_upload_ubo_ssbo_surf_state(ice
, cbuf
,
3323 &shs
->constbuf_surf_state
[sysval_cbuf_index
], false);
3325 shs
->sysvals_need_upload
= false;
3329 * The pipe->set_shader_buffers() driver hook.
3331 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
3332 * SURFACE_STATE here, as the buffer offset may change each time.
3335 iris_set_shader_buffers(struct pipe_context
*ctx
,
3336 enum pipe_shader_type p_stage
,
3337 unsigned start_slot
, unsigned count
,
3338 const struct pipe_shader_buffer
*buffers
,
3339 unsigned writable_bitmask
)
3341 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3342 gl_shader_stage stage
= stage_from_pipe(p_stage
);
3343 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3345 unsigned modified_bits
= u_bit_consecutive(start_slot
, count
);
3347 shs
->bound_ssbos
&= ~modified_bits
;
3348 shs
->writable_ssbos
&= ~modified_bits
;
3349 shs
->writable_ssbos
|= writable_bitmask
<< start_slot
;
3351 for (unsigned i
= 0; i
< count
; i
++) {
3352 if (buffers
&& buffers
[i
].buffer
) {
3353 struct iris_resource
*res
= (void *) buffers
[i
].buffer
;
3354 struct pipe_shader_buffer
*ssbo
= &shs
->ssbo
[start_slot
+ i
];
3355 struct iris_state_ref
*surf_state
=
3356 &shs
->ssbo_surf_state
[start_slot
+ i
];
3357 pipe_resource_reference(&ssbo
->buffer
, &res
->base
);
3358 ssbo
->buffer_offset
= buffers
[i
].buffer_offset
;
3360 MIN2(buffers
[i
].buffer_size
, res
->bo
->size
- ssbo
->buffer_offset
);
3362 shs
->bound_ssbos
|= 1 << (start_slot
+ i
);
3364 iris_upload_ubo_ssbo_surf_state(ice
, ssbo
, surf_state
, true);
3366 res
->bind_history
|= PIPE_BIND_SHADER_BUFFER
;
3367 res
->bind_stages
|= 1 << stage
;
3369 util_range_add(&res
->base
, &res
->valid_buffer_range
, ssbo
->buffer_offset
,
3370 ssbo
->buffer_offset
+ ssbo
->buffer_size
);
3372 pipe_resource_reference(&shs
->ssbo
[start_slot
+ i
].buffer
, NULL
);
3373 pipe_resource_reference(&shs
->ssbo_surf_state
[start_slot
+ i
].res
,
3378 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
;
3382 iris_delete_state(struct pipe_context
*ctx
, void *state
)
3388 * The pipe->set_vertex_buffers() driver hook.
3390 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3393 iris_set_vertex_buffers(struct pipe_context
*ctx
,
3394 unsigned start_slot
, unsigned count
,
3395 const struct pipe_vertex_buffer
*buffers
)
3397 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3398 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3399 struct iris_genx_state
*genx
= ice
->state
.genx
;
3401 ice
->state
.bound_vertex_buffers
&= ~u_bit_consecutive64(start_slot
, count
);
3403 for (unsigned i
= 0; i
< count
; i
++) {
3404 const struct pipe_vertex_buffer
*buffer
= buffers
? &buffers
[i
] : NULL
;
3405 struct iris_vertex_buffer_state
*state
=
3406 &genx
->vertex_buffers
[start_slot
+ i
];
3409 pipe_resource_reference(&state
->resource
, NULL
);
3413 /* We may see user buffers that are NULL bindings. */
3414 assert(!(buffer
->is_user_buffer
&& buffer
->buffer
.user
!= NULL
));
3416 pipe_resource_reference(&state
->resource
, buffer
->buffer
.resource
);
3417 struct iris_resource
*res
= (void *) state
->resource
;
3419 state
->offset
= (int) buffer
->buffer_offset
;
3422 ice
->state
.bound_vertex_buffers
|= 1ull << (start_slot
+ i
);
3423 res
->bind_history
|= PIPE_BIND_VERTEX_BUFFER
;
3426 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), state
->state
, vb
) {
3427 vb
.VertexBufferIndex
= start_slot
+ i
;
3428 vb
.AddressModifyEnable
= true;
3429 vb
.BufferPitch
= buffer
->stride
;
3431 vb
.BufferSize
= res
->base
.width0
- (int) buffer
->buffer_offset
;
3432 vb
.BufferStartingAddress
=
3433 ro_bo(NULL
, res
->bo
->gtt_offset
+ (int) buffer
->buffer_offset
);
3434 vb
.MOCS
= iris_mocs(res
->bo
, &screen
->isl_dev
);
3436 vb
.NullVertexBuffer
= true;
3441 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_BUFFERS
;
3445 * Gallium CSO for vertex elements.
3447 struct iris_vertex_element_state
{
3448 uint32_t vertex_elements
[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length
)];
3449 uint32_t vf_instancing
[33 * GENX(3DSTATE_VF_INSTANCING_length
)];
3450 uint32_t edgeflag_ve
[GENX(VERTEX_ELEMENT_STATE_length
)];
3451 uint32_t edgeflag_vfi
[GENX(3DSTATE_VF_INSTANCING_length
)];
3456 * The pipe->create_vertex_elements() driver hook.
3458 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3459 * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3460 * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3461 * needed. In these cases we will need information available at draw time.
3462 * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3463 * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3464 * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3467 iris_create_vertex_elements(struct pipe_context
*ctx
,
3469 const struct pipe_vertex_element
*state
)
3471 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3472 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
3473 struct iris_vertex_element_state
*cso
=
3474 malloc(sizeof(struct iris_vertex_element_state
));
3478 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS
), cso
->vertex_elements
, ve
) {
3480 1 + GENX(VERTEX_ELEMENT_STATE_length
) * MAX2(count
, 1) - 2;
3483 uint32_t *ve_pack_dest
= &cso
->vertex_elements
[1];
3484 uint32_t *vfi_pack_dest
= cso
->vf_instancing
;
3487 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
3489 ve
.SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
;
3490 ve
.Component0Control
= VFCOMP_STORE_0
;
3491 ve
.Component1Control
= VFCOMP_STORE_0
;
3492 ve
.Component2Control
= VFCOMP_STORE_0
;
3493 ve
.Component3Control
= VFCOMP_STORE_1_FP
;
3496 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
3500 for (int i
= 0; i
< count
; i
++) {
3501 const struct iris_format_info fmt
=
3502 iris_format_for_usage(devinfo
, state
[i
].src_format
, 0);
3503 unsigned comp
[4] = { VFCOMP_STORE_SRC
, VFCOMP_STORE_SRC
,
3504 VFCOMP_STORE_SRC
, VFCOMP_STORE_SRC
};
3506 switch (isl_format_get_num_channels(fmt
.fmt
)) {
3507 case 0: comp
[0] = VFCOMP_STORE_0
; /* fallthrough */
3508 case 1: comp
[1] = VFCOMP_STORE_0
; /* fallthrough */
3509 case 2: comp
[2] = VFCOMP_STORE_0
; /* fallthrough */
3511 comp
[3] = isl_format_has_int_channel(fmt
.fmt
) ? VFCOMP_STORE_1_INT
3512 : VFCOMP_STORE_1_FP
;
3515 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
3516 ve
.EdgeFlagEnable
= false;
3517 ve
.VertexBufferIndex
= state
[i
].vertex_buffer_index
;
3519 ve
.SourceElementOffset
= state
[i
].src_offset
;
3520 ve
.SourceElementFormat
= fmt
.fmt
;
3521 ve
.Component0Control
= comp
[0];
3522 ve
.Component1Control
= comp
[1];
3523 ve
.Component2Control
= comp
[2];
3524 ve
.Component3Control
= comp
[3];
3527 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
3528 vi
.VertexElementIndex
= i
;
3529 vi
.InstancingEnable
= state
[i
].instance_divisor
> 0;
3530 vi
.InstanceDataStepRate
= state
[i
].instance_divisor
;
3533 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
3534 vfi_pack_dest
+= GENX(3DSTATE_VF_INSTANCING_length
);
3537 /* An alternative version of the last VE and VFI is stored so it
3538 * can be used at draw time in case Vertex Shader uses EdgeFlag
3541 const unsigned edgeflag_index
= count
- 1;
3542 const struct iris_format_info fmt
=
3543 iris_format_for_usage(devinfo
, state
[edgeflag_index
].src_format
, 0);
3544 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), cso
->edgeflag_ve
, ve
) {
3545 ve
.EdgeFlagEnable
= true ;
3546 ve
.VertexBufferIndex
= state
[edgeflag_index
].vertex_buffer_index
;
3548 ve
.SourceElementOffset
= state
[edgeflag_index
].src_offset
;
3549 ve
.SourceElementFormat
= fmt
.fmt
;
3550 ve
.Component0Control
= VFCOMP_STORE_SRC
;
3551 ve
.Component1Control
= VFCOMP_STORE_0
;
3552 ve
.Component2Control
= VFCOMP_STORE_0
;
3553 ve
.Component3Control
= VFCOMP_STORE_0
;
3555 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), cso
->edgeflag_vfi
, vi
) {
3556 /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3557 * at draw time, as it should change if SGVs are emitted.
3559 vi
.InstancingEnable
= state
[edgeflag_index
].instance_divisor
> 0;
3560 vi
.InstanceDataStepRate
= state
[edgeflag_index
].instance_divisor
;
3568 * The pipe->bind_vertex_elements_state() driver hook.
3571 iris_bind_vertex_elements_state(struct pipe_context
*ctx
, void *state
)
3573 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3574 struct iris_vertex_element_state
*old_cso
= ice
->state
.cso_vertex_elements
;
3575 struct iris_vertex_element_state
*new_cso
= state
;
3577 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3578 * we need to re-emit it to ensure we're overriding the right one.
3580 if (new_cso
&& cso_changed(count
))
3581 ice
->state
.dirty
|= IRIS_DIRTY_VF_SGVS
;
3583 ice
->state
.cso_vertex_elements
= state
;
3584 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_ELEMENTS
;
3588 * The pipe->create_stream_output_target() driver hook.
3590 * "Target" here refers to a destination buffer. We translate this into
3591 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
3592 * know which buffer this represents, or whether we ought to zero the
3593 * write-offsets, or append. Those are handled in the set() hook.
3595 static struct pipe_stream_output_target
*
3596 iris_create_stream_output_target(struct pipe_context
*ctx
,
3597 struct pipe_resource
*p_res
,
3598 unsigned buffer_offset
,
3599 unsigned buffer_size
)
3601 struct iris_resource
*res
= (void *) p_res
;
3602 struct iris_stream_output_target
*cso
= calloc(1, sizeof(*cso
));
3606 res
->bind_history
|= PIPE_BIND_STREAM_OUTPUT
;
3608 pipe_reference_init(&cso
->base
.reference
, 1);
3609 pipe_resource_reference(&cso
->base
.buffer
, p_res
);
3610 cso
->base
.buffer_offset
= buffer_offset
;
3611 cso
->base
.buffer_size
= buffer_size
;
3612 cso
->base
.context
= ctx
;
3614 util_range_add(&res
->base
, &res
->valid_buffer_range
, buffer_offset
,
3615 buffer_offset
+ buffer_size
);
3617 upload_state(ctx
->stream_uploader
, &cso
->offset
, sizeof(uint32_t), 4);
3623 iris_stream_output_target_destroy(struct pipe_context
*ctx
,
3624 struct pipe_stream_output_target
*state
)
3626 struct iris_stream_output_target
*cso
= (void *) state
;
3628 pipe_resource_reference(&cso
->base
.buffer
, NULL
);
3629 pipe_resource_reference(&cso
->offset
.res
, NULL
);
3635 * The pipe->set_stream_output_targets() driver hook.
3637 * At this point, we know which targets are bound to a particular index,
3638 * and also whether we want to append or start over. We can finish the
3639 * 3DSTATE_SO_BUFFER packets we started earlier.
3642 iris_set_stream_output_targets(struct pipe_context
*ctx
,
3643 unsigned num_targets
,
3644 struct pipe_stream_output_target
**targets
,
3645 const unsigned *offsets
)
3647 struct iris_context
*ice
= (struct iris_context
*) ctx
;
3648 struct iris_genx_state
*genx
= ice
->state
.genx
;
3649 uint32_t *so_buffers
= genx
->so_buffers
;
3650 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
3652 const bool active
= num_targets
> 0;
3653 if (ice
->state
.streamout_active
!= active
) {
3654 ice
->state
.streamout_active
= active
;
3655 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
;
3657 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3658 * it's a non-pipelined command. If we're switching streamout on, we
3659 * may have missed emitting it earlier, so do so now. (We're already
3660 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3663 ice
->state
.dirty
|= IRIS_DIRTY_SO_DECL_LIST
;
3666 for (int i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
3667 struct iris_stream_output_target
*tgt
=
3668 (void *) ice
->state
.so_target
[i
];
3670 struct iris_resource
*res
= (void *) tgt
->base
.buffer
;
3672 flush
|= iris_flush_bits_for_history(res
);
3673 iris_dirty_for_history(ice
, res
);
3676 iris_emit_pipe_control_flush(&ice
->batches
[IRIS_BATCH_RENDER
],
3677 "make streamout results visible", flush
);
3681 for (int i
= 0; i
< 4; i
++) {
3682 pipe_so_target_reference(&ice
->state
.so_target
[i
],
3683 i
< num_targets
? targets
[i
] : NULL
);
3686 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3690 for (unsigned i
= 0; i
< 4; i
++,
3691 so_buffers
+= GENX(3DSTATE_SO_BUFFER_length
)) {
3693 struct iris_stream_output_target
*tgt
= (void *) ice
->state
.so_target
[i
];
3694 unsigned offset
= offsets
[i
];
3697 iris_pack_command(GENX(3DSTATE_SO_BUFFER
), so_buffers
, sob
) {
3699 sob
.SOBufferIndex
= i
;
3701 sob
._3DCommandOpcode
= 0;
3702 sob
._3DCommandSubOpcode
= SO_BUFFER_INDEX_0_CMD
+ i
;
3708 struct iris_resource
*res
= (void *) tgt
->base
.buffer
;
3710 /* Note that offsets[i] will either be 0, causing us to zero
3711 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3712 * "continue appending at the existing offset."
3714 assert(offset
== 0 || offset
== 0xFFFFFFFF);
3716 /* We might be called by Begin (offset = 0), Pause, then Resume
3717 * (offset = 0xFFFFFFFF) before ever drawing (where these commands
3718 * will actually be sent to the GPU). In this case, we don't want
3719 * to append - we still want to do our initial zeroing.
3724 iris_pack_command(GENX(3DSTATE_SO_BUFFER
), so_buffers
, sob
) {
3726 sob
.SOBufferIndex
= i
;
3728 sob
._3DCommandOpcode
= 0;
3729 sob
._3DCommandSubOpcode
= SO_BUFFER_INDEX_0_CMD
+ i
;
3731 sob
.SurfaceBaseAddress
=
3732 rw_bo(NULL
, res
->bo
->gtt_offset
+ tgt
->base
.buffer_offset
,
3733 IRIS_DOMAIN_OTHER_WRITE
);
3734 sob
.SOBufferEnable
= true;
3735 sob
.StreamOffsetWriteEnable
= true;
3736 sob
.StreamOutputBufferOffsetAddressEnable
= true;
3737 sob
.MOCS
= iris_mocs(res
->bo
, &screen
->isl_dev
);
3739 sob
.SurfaceSize
= MAX2(tgt
->base
.buffer_size
/ 4, 1) - 1;
3740 sob
.StreamOffset
= offset
;
3741 sob
.StreamOutputBufferOffsetAddress
=
3742 rw_bo(NULL
, iris_resource_bo(tgt
->offset
.res
)->gtt_offset
+
3743 tgt
->offset
.offset
, IRIS_DOMAIN_OTHER_WRITE
);
3747 ice
->state
.dirty
|= IRIS_DIRTY_SO_BUFFERS
;
3751 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3752 * 3DSTATE_STREAMOUT packets.
3754 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3755 * hardware to record. We can create it entirely based on the shader, with
3756 * no dynamic state dependencies.
3758 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3759 * state-based settings. We capture the shader-related ones here, and merge
3760 * the rest in at draw time.
3763 iris_create_so_decl_list(const struct pipe_stream_output_info
*info
,
3764 const struct brw_vue_map
*vue_map
)
3766 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
3767 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3768 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3769 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
3771 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
3773 memset(so_decl
, 0, sizeof(so_decl
));
3775 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3776 * command feels strange -- each dword pair contains a SO_DECL per stream.
3778 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
3779 const struct pipe_stream_output
*output
= &info
->output
[i
];
3780 const int buffer
= output
->output_buffer
;
3781 const int varying
= output
->register_index
;
3782 const unsigned stream_id
= output
->stream
;
3783 assert(stream_id
< MAX_VERTEX_STREAMS
);
3785 buffer_mask
[stream_id
] |= 1 << buffer
;
3787 assert(vue_map
->varying_to_slot
[varying
] >= 0);
3789 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3790 * array. Instead, it simply increments DstOffset for the following
3791 * input by the number of components that should be skipped.
3793 * Our hardware is unusual in that it requires us to program SO_DECLs
3794 * for fake "hole" components, rather than simply taking the offset
3795 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3796 * program as many size = 4 holes as we can, then a final hole to
3797 * accommodate the final 1, 2, or 3 remaining.
3799 int skip_components
= output
->dst_offset
- next_offset
[buffer
];
3801 while (skip_components
> 0) {
3802 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3804 .OutputBufferSlot
= output
->output_buffer
,
3805 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
3807 skip_components
-= 4;
3810 next_offset
[buffer
] = output
->dst_offset
+ output
->num_components
;
3812 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
3813 .OutputBufferSlot
= output
->output_buffer
,
3814 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
3816 ((1 << output
->num_components
) - 1) << output
->start_component
,
3819 if (decls
[stream_id
] > max_decls
)
3820 max_decls
= decls
[stream_id
];
3823 unsigned dwords
= GENX(3DSTATE_STREAMOUT_length
) + (3 + 2 * max_decls
);
3824 uint32_t *map
= ralloc_size(NULL
, sizeof(uint32_t) * dwords
);
3825 uint32_t *so_decl_map
= map
+ GENX(3DSTATE_STREAMOUT_length
);
3827 iris_pack_command(GENX(3DSTATE_STREAMOUT
), map
, sol
) {
3828 int urb_entry_read_offset
= 0;
3829 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
3830 urb_entry_read_offset
;
3832 /* We always read the whole vertex. This could be reduced at some
3833 * point by reading less and offsetting the register index in the
3836 sol
.Stream0VertexReadOffset
= urb_entry_read_offset
;
3837 sol
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
3838 sol
.Stream1VertexReadOffset
= urb_entry_read_offset
;
3839 sol
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
3840 sol
.Stream2VertexReadOffset
= urb_entry_read_offset
;
3841 sol
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
3842 sol
.Stream3VertexReadOffset
= urb_entry_read_offset
;
3843 sol
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
3845 /* Set buffer pitches; 0 means unbound. */
3846 sol
.Buffer0SurfacePitch
= 4 * info
->stride
[0];
3847 sol
.Buffer1SurfacePitch
= 4 * info
->stride
[1];
3848 sol
.Buffer2SurfacePitch
= 4 * info
->stride
[2];
3849 sol
.Buffer3SurfacePitch
= 4 * info
->stride
[3];
3852 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST
), so_decl_map
, list
) {
3853 list
.DWordLength
= 3 + 2 * max_decls
- 2;
3854 list
.StreamtoBufferSelects0
= buffer_mask
[0];
3855 list
.StreamtoBufferSelects1
= buffer_mask
[1];
3856 list
.StreamtoBufferSelects2
= buffer_mask
[2];
3857 list
.StreamtoBufferSelects3
= buffer_mask
[3];
3858 list
.NumEntries0
= decls
[0];
3859 list
.NumEntries1
= decls
[1];
3860 list
.NumEntries2
= decls
[2];
3861 list
.NumEntries3
= decls
[3];
3864 for (int i
= 0; i
< max_decls
; i
++) {
3865 iris_pack_state(GENX(SO_DECL_ENTRY
), so_decl_map
+ 3 + i
* 2, entry
) {
3866 entry
.Stream0Decl
= so_decl
[0][i
];
3867 entry
.Stream1Decl
= so_decl
[1][i
];
3868 entry
.Stream2Decl
= so_decl
[2][i
];
3869 entry
.Stream3Decl
= so_decl
[3][i
];
3877 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots
,
3878 const struct brw_vue_map
*last_vue_map
,
3879 bool two_sided_color
,
3880 unsigned *out_offset
,
3881 unsigned *out_length
)
3883 /* The compiler computes the first URB slot without considering COL/BFC
3884 * swizzling (because it doesn't know whether it's enabled), so we need
3885 * to do that here too. This may result in a smaller offset, which
3888 const unsigned first_slot
=
3889 brw_compute_first_urb_slot_required(fs_input_slots
, last_vue_map
);
3891 /* This becomes the URB read offset (counted in pairs of slots). */
3892 assert(first_slot
% 2 == 0);
3893 *out_offset
= first_slot
/ 2;
3895 /* We need to adjust the inputs read to account for front/back color
3896 * swizzling, as it can make the URB length longer.
3898 for (int c
= 0; c
<= 1; c
++) {
3899 if (fs_input_slots
& (VARYING_BIT_COL0
<< c
)) {
3900 /* If two sided color is enabled, the fragment shader's gl_Color
3901 * (COL0) input comes from either the gl_FrontColor (COL0) or
3902 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
3904 if (two_sided_color
)
3905 fs_input_slots
|= (VARYING_BIT_BFC0
<< c
);
3907 /* If front color isn't written, we opt to give them back color
3908 * instead of an undefined value. Switch from COL to BFC.
3910 if (last_vue_map
->varying_to_slot
[VARYING_SLOT_COL0
+ c
] == -1) {
3911 fs_input_slots
&= ~(VARYING_BIT_COL0
<< c
);
3912 fs_input_slots
|= (VARYING_BIT_BFC0
<< c
);
3917 /* Compute the minimum URB Read Length necessary for the FS inputs.
3919 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3920 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3922 * "This field should be set to the minimum length required to read the
3923 * maximum source attribute. The maximum source attribute is indicated
3924 * by the maximum value of the enabled Attribute # Source Attribute if
3925 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3926 * enable is not set.
3927 * read_length = ceiling((max_source_attr + 1) / 2)
3929 * [errata] Corruption/Hang possible if length programmed larger than
3932 * Similar text exists for Ivy Bridge.
3934 * We find the last URB slot that's actually read by the FS.
3936 unsigned last_read_slot
= last_vue_map
->num_slots
- 1;
3937 while (last_read_slot
> first_slot
&& !(fs_input_slots
&
3938 (1ull << last_vue_map
->slot_to_varying
[last_read_slot
])))
3941 /* The URB read length is the difference of the two, counted in pairs. */
3942 *out_length
= DIV_ROUND_UP(last_read_slot
- first_slot
+ 1, 2);
3946 iris_emit_sbe_swiz(struct iris_batch
*batch
,
3947 const struct iris_context
*ice
,
3948 unsigned urb_read_offset
,
3949 unsigned sprite_coord_enables
)
3951 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = {};
3952 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
3953 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
3954 const struct brw_vue_map
*vue_map
= ice
->shaders
.last_vue_map
;
3955 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
3957 /* XXX: this should be generated when putting programs in place */
3959 for (uint8_t idx
= 0; idx
< wm_prog_data
->urb_setup_attribs_count
; idx
++) {
3960 const uint8_t fs_attr
= wm_prog_data
->urb_setup_attribs
[idx
];
3961 const int input_index
= wm_prog_data
->urb_setup
[fs_attr
];
3962 if (input_index
< 0 || input_index
>= 16)
3965 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
=
3966 &attr_overrides
[input_index
];
3967 int slot
= vue_map
->varying_to_slot
[fs_attr
];
3969 /* Viewport and Layer are stored in the VUE header. We need to override
3970 * them to zero if earlier stages didn't write them, as GL requires that
3971 * they read back as zero when not explicitly set.
3974 case VARYING_SLOT_VIEWPORT
:
3975 case VARYING_SLOT_LAYER
:
3976 attr
->ComponentOverrideX
= true;
3977 attr
->ComponentOverrideW
= true;
3978 attr
->ConstantSource
= CONST_0000
;
3980 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
3981 attr
->ComponentOverrideY
= true;
3982 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
3983 attr
->ComponentOverrideZ
= true;
3986 case VARYING_SLOT_PRIMITIVE_ID
:
3987 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
3989 attr
->ComponentOverrideX
= true;
3990 attr
->ComponentOverrideY
= true;
3991 attr
->ComponentOverrideZ
= true;
3992 attr
->ComponentOverrideW
= true;
3993 attr
->ConstantSource
= PRIM_ID
;
4001 if (sprite_coord_enables
& (1 << input_index
))
4004 /* If there was only a back color written but not front, use back
4005 * as the color instead of undefined.
4007 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
4008 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
4009 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
4010 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
4012 /* Not written by the previous stage - undefined. */
4014 attr
->ComponentOverrideX
= true;
4015 attr
->ComponentOverrideY
= true;
4016 attr
->ComponentOverrideZ
= true;
4017 attr
->ComponentOverrideW
= true;
4018 attr
->ConstantSource
= CONST_0001_FLOAT
;
4022 /* Compute the location of the attribute relative to the read offset,
4023 * which is counted in 256-bit increments (two 128-bit VUE slots).
4025 const int source_attr
= slot
- 2 * urb_read_offset
;
4026 assert(source_attr
>= 0 && source_attr
<= 32);
4027 attr
->SourceAttribute
= source_attr
;
4029 /* If we are doing two-sided color, and the VUE slot following this one
4030 * represents a back-facing color, then we need to instruct the SF unit
4031 * to do back-facing swizzling.
4033 if (cso_rast
->light_twoside
&&
4034 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
4035 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
4036 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
4037 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
)))
4038 attr
->SwizzleSelect
= INPUTATTR_FACING
;
4041 iris_emit_cmd(batch
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
4042 for (int i
= 0; i
< 16; i
++)
4043 sbes
.Attribute
[i
] = attr_overrides
[i
];
4048 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data
*prog_data
,
4049 const struct iris_rasterizer_state
*cso
)
4051 unsigned overrides
= 0;
4053 if (prog_data
->urb_setup
[VARYING_SLOT_PNTC
] != -1)
4054 overrides
|= 1 << prog_data
->urb_setup
[VARYING_SLOT_PNTC
];
4056 for (int i
= 0; i
< 8; i
++) {
4057 if ((cso
->sprite_coord_enable
& (1 << i
)) &&
4058 prog_data
->urb_setup
[VARYING_SLOT_TEX0
+ i
] != -1)
4059 overrides
|= 1 << prog_data
->urb_setup
[VARYING_SLOT_TEX0
+ i
];
4066 iris_emit_sbe(struct iris_batch
*batch
, const struct iris_context
*ice
)
4068 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4069 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
4070 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
4071 const struct shader_info
*fs_info
=
4072 iris_get_shader_info(ice
, MESA_SHADER_FRAGMENT
);
4074 unsigned urb_read_offset
, urb_read_length
;
4075 iris_compute_sbe_urb_read_interval(fs_info
->inputs_read
,
4076 ice
->shaders
.last_vue_map
,
4077 cso_rast
->light_twoside
,
4078 &urb_read_offset
, &urb_read_length
);
4080 unsigned sprite_coord_overrides
=
4081 iris_calculate_point_sprite_overrides(wm_prog_data
, cso_rast
);
4083 iris_emit_cmd(batch
, GENX(3DSTATE_SBE
), sbe
) {
4084 sbe
.AttributeSwizzleEnable
= true;
4085 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
4086 sbe
.PointSpriteTextureCoordinateOrigin
= cso_rast
->sprite_coord_mode
;
4087 sbe
.VertexURBEntryReadOffset
= urb_read_offset
;
4088 sbe
.VertexURBEntryReadLength
= urb_read_length
;
4089 sbe
.ForceVertexURBEntryReadOffset
= true;
4090 sbe
.ForceVertexURBEntryReadLength
= true;
4091 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
4092 sbe
.PointSpriteTextureCoordinateEnable
= sprite_coord_overrides
;
4094 for (int i
= 0; i
< 32; i
++) {
4095 sbe
.AttributeActiveComponentFormat
[i
] = ACTIVE_COMPONENT_XYZW
;
4100 iris_emit_sbe_swiz(batch
, ice
, urb_read_offset
, sprite_coord_overrides
);
4103 /* ------------------------------------------------------------------- */
4106 * Populate VS program key fields based on the current state.
4109 iris_populate_vs_key(const struct iris_context
*ice
,
4110 const struct shader_info
*info
,
4111 gl_shader_stage last_stage
,
4112 struct iris_vs_prog_key
*key
)
4114 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4116 if (info
->clip_distance_array_size
== 0 &&
4117 (info
->outputs_written
& (VARYING_BIT_POS
| VARYING_BIT_CLIP_VERTEX
)) &&
4118 last_stage
== MESA_SHADER_VERTEX
)
4119 key
->vue
.nr_userclip_plane_consts
= cso_rast
->num_clip_plane_consts
;
4123 * Populate TCS program key fields based on the current state.
4126 iris_populate_tcs_key(const struct iris_context
*ice
,
4127 struct iris_tcs_prog_key
*key
)
4132 * Populate TES program key fields based on the current state.
4135 iris_populate_tes_key(const struct iris_context
*ice
,
4136 const struct shader_info
*info
,
4137 gl_shader_stage last_stage
,
4138 struct iris_tes_prog_key
*key
)
4140 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4142 if (info
->clip_distance_array_size
== 0 &&
4143 (info
->outputs_written
& (VARYING_BIT_POS
| VARYING_BIT_CLIP_VERTEX
)) &&
4144 last_stage
== MESA_SHADER_TESS_EVAL
)
4145 key
->vue
.nr_userclip_plane_consts
= cso_rast
->num_clip_plane_consts
;
4149 * Populate GS program key fields based on the current state.
4152 iris_populate_gs_key(const struct iris_context
*ice
,
4153 const struct shader_info
*info
,
4154 gl_shader_stage last_stage
,
4155 struct iris_gs_prog_key
*key
)
4157 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
4159 if (info
->clip_distance_array_size
== 0 &&
4160 (info
->outputs_written
& (VARYING_BIT_POS
| VARYING_BIT_CLIP_VERTEX
)) &&
4161 last_stage
== MESA_SHADER_GEOMETRY
)
4162 key
->vue
.nr_userclip_plane_consts
= cso_rast
->num_clip_plane_consts
;
4166 * Populate FS program key fields based on the current state.
4169 iris_populate_fs_key(const struct iris_context
*ice
,
4170 const struct shader_info
*info
,
4171 struct iris_fs_prog_key
*key
)
4173 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
4174 const struct pipe_framebuffer_state
*fb
= &ice
->state
.framebuffer
;
4175 const struct iris_depth_stencil_alpha_state
*zsa
= ice
->state
.cso_zsa
;
4176 const struct iris_rasterizer_state
*rast
= ice
->state
.cso_rast
;
4177 const struct iris_blend_state
*blend
= ice
->state
.cso_blend
;
4179 key
->nr_color_regions
= fb
->nr_cbufs
;
4181 key
->clamp_fragment_color
= rast
->clamp_fragment_color
;
4183 key
->alpha_to_coverage
= blend
->alpha_to_coverage
;
4185 key
->alpha_test_replicate_alpha
= fb
->nr_cbufs
> 1 && zsa
->alpha
.enabled
;
4187 key
->flat_shade
= rast
->flatshade
&&
4188 (info
->inputs_read
& (VARYING_BIT_COL0
| VARYING_BIT_COL1
));
4190 key
->persample_interp
= rast
->force_persample_interp
;
4191 key
->multisample_fbo
= rast
->multisample
&& fb
->samples
> 1;
4193 key
->coherent_fb_fetch
= GEN_GEN
>= 9;
4195 key
->force_dual_color_blend
=
4196 screen
->driconf
.dual_color_blend_by_location
&&
4197 (blend
->blend_enables
& 1) && blend
->dual_color_blending
;
4199 /* TODO: Respect glHint for key->high_quality_derivatives */
4203 iris_populate_cs_key(const struct iris_context
*ice
,
4204 struct iris_cs_prog_key
*key
)
4209 KSP(const struct iris_compiled_shader
*shader
)
4211 struct iris_resource
*res
= (void *) shader
->assembly
.res
;
4212 return iris_bo_offset_from_base_address(res
->bo
) + shader
->assembly
.offset
;
4215 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
4216 pkt.KernelStartPointer = KSP(shader); \
4217 pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
4218 pkt.FloatingPointMode = prog_data->use_alt_mode; \
4220 pkt.DispatchGRFStartRegisterForURBData = \
4221 prog_data->dispatch_grf_start_reg; \
4222 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
4223 pkt.prefix##URBEntryReadOffset = 0; \
4225 pkt.StatisticsEnable = true; \
4226 pkt.Enable = true; \
4228 if (prog_data->total_scratch) { \
4229 struct iris_bo *bo = \
4230 iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
4231 uint32_t scratch_addr = bo->gtt_offset; \
4232 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
4233 pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr, \
4234 IRIS_DOMAIN_NONE); \
4238 * Encode most of 3DSTATE_VS based on the compiled shader.
4241 iris_store_vs_state(struct iris_context
*ice
,
4242 const struct gen_device_info
*devinfo
,
4243 struct iris_compiled_shader
*shader
)
4245 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4246 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4248 iris_pack_command(GENX(3DSTATE_VS
), shader
->derived_data
, vs
) {
4249 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
, MESA_SHADER_VERTEX
);
4250 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
4251 vs
.SIMD8DispatchEnable
= true;
4252 vs
.UserClipDistanceCullTestEnableBitmask
=
4253 vue_prog_data
->cull_distance_mask
;
4258 * Encode most of 3DSTATE_HS based on the compiled shader.
4261 iris_store_tcs_state(struct iris_context
*ice
,
4262 const struct gen_device_info
*devinfo
,
4263 struct iris_compiled_shader
*shader
)
4265 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4266 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4267 struct brw_tcs_prog_data
*tcs_prog_data
= (void *) prog_data
;
4269 iris_pack_command(GENX(3DSTATE_HS
), shader
->derived_data
, hs
) {
4270 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
, MESA_SHADER_TESS_CTRL
);
4273 /* GEN:BUG:1604578095:
4275 * Hang occurs when the number of max threads is less than 2 times
4276 * the number of instance count. The number of max threads must be
4277 * more than 2 times the number of instance count.
4279 assert((devinfo
->max_tcs_threads
/ 2) > tcs_prog_data
->instances
);
4280 hs
.DispatchGRFStartRegisterForURBData
= prog_data
->dispatch_grf_start_reg
& 0x1f;
4281 hs
.DispatchGRFStartRegisterForURBData5
= prog_data
->dispatch_grf_start_reg
>> 5;
4284 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
4285 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
4286 hs
.IncludeVertexHandles
= true;
4289 /* Patch Count threshold specifies the maximum number of patches that
4290 * will be accumulated before a thread dispatch is forced.
4292 hs
.PatchCountThreshold
= tcs_prog_data
->patch_count_threshold
;
4296 hs
.DispatchMode
= vue_prog_data
->dispatch_mode
;
4297 hs
.IncludePrimitiveID
= tcs_prog_data
->include_primitive_id
;
4303 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4306 iris_store_tes_state(struct iris_context
*ice
,
4307 const struct gen_device_info
*devinfo
,
4308 struct iris_compiled_shader
*shader
)
4310 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4311 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4312 struct brw_tes_prog_data
*tes_prog_data
= (void *) prog_data
;
4314 uint32_t *te_state
= (void *) shader
->derived_data
;
4315 uint32_t *ds_state
= te_state
+ GENX(3DSTATE_TE_length
);
4317 iris_pack_command(GENX(3DSTATE_TE
), te_state
, te
) {
4318 te
.Partitioning
= tes_prog_data
->partitioning
;
4319 te
.OutputTopology
= tes_prog_data
->output_topology
;
4320 te
.TEDomain
= tes_prog_data
->domain
;
4322 te
.MaximumTessellationFactorOdd
= 63.0;
4323 te
.MaximumTessellationFactorNotOdd
= 64.0;
4326 iris_pack_command(GENX(3DSTATE_DS
), ds_state
, ds
) {
4327 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
, MESA_SHADER_TESS_EVAL
);
4329 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
4330 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
4331 ds
.ComputeWCoordinateEnable
=
4332 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
4334 ds
.UserClipDistanceCullTestEnableBitmask
=
4335 vue_prog_data
->cull_distance_mask
;
4341 * Encode most of 3DSTATE_GS based on the compiled shader.
4344 iris_store_gs_state(struct iris_context
*ice
,
4345 const struct gen_device_info
*devinfo
,
4346 struct iris_compiled_shader
*shader
)
4348 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4349 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
4350 struct brw_gs_prog_data
*gs_prog_data
= (void *) prog_data
;
4352 iris_pack_command(GENX(3DSTATE_GS
), shader
->derived_data
, gs
) {
4353 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
, MESA_SHADER_GEOMETRY
);
4355 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
4356 gs
.OutputTopology
= gs_prog_data
->output_topology
;
4357 gs
.ControlDataHeaderSize
=
4358 gs_prog_data
->control_data_header_size_hwords
;
4359 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
4360 gs
.DispatchMode
= DISPATCH_MODE_SIMD8
;
4361 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
4362 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
4363 gs
.ReorderMode
= TRAILING
;
4364 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
4365 gs
.MaximumNumberofThreads
=
4366 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
4367 : (devinfo
->max_gs_threads
- 1);
4369 if (gs_prog_data
->static_vertex_count
!= -1) {
4370 gs
.StaticOutput
= true;
4371 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
4373 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
4375 gs
.UserClipDistanceCullTestEnableBitmask
=
4376 vue_prog_data
->cull_distance_mask
;
4378 const int urb_entry_write_offset
= 1;
4379 const uint32_t urb_entry_output_length
=
4380 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
4381 urb_entry_write_offset
;
4383 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
4384 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
4389 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4392 iris_store_fs_state(struct iris_context
*ice
,
4393 const struct gen_device_info
*devinfo
,
4394 struct iris_compiled_shader
*shader
)
4396 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4397 struct brw_wm_prog_data
*wm_prog_data
= (void *) shader
->prog_data
;
4399 uint32_t *ps_state
= (void *) shader
->derived_data
;
4400 uint32_t *psx_state
= ps_state
+ GENX(3DSTATE_PS_length
);
4402 iris_pack_command(GENX(3DSTATE_PS
), ps_state
, ps
) {
4403 ps
.VectorMaskEnable
= true;
4404 ps
.BindingTableEntryCount
= shader
->bt
.size_bytes
/ 4;
4405 ps
.FloatingPointMode
= prog_data
->use_alt_mode
;
4406 ps
.MaximumNumberofThreadsPerPSD
= 64 - (GEN_GEN
== 8 ? 2 : 1);
4408 ps
.PushConstantEnable
= prog_data
->ubo_ranges
[0].length
> 0;
4410 /* From the documentation for this packet:
4411 * "If the PS kernel does not need the Position XY Offsets to
4412 * compute a Position Value, then this field should be programmed
4413 * to POSOFFSET_NONE."
4415 * "SW Recommendation: If the PS kernel needs the Position Offsets
4416 * to compute a Position XY value, this field should match Position
4417 * ZW Interpolation Mode to ensure a consistent position.xyzw
4420 * We only require XY sample offsets. So, this recommendation doesn't
4421 * look useful at the moment. We might need this in future.
4423 ps
.PositionXYOffsetSelect
=
4424 wm_prog_data
->uses_pos_offset
? POSOFFSET_SAMPLE
: POSOFFSET_NONE
;
4426 if (prog_data
->total_scratch
) {
4427 struct iris_bo
*bo
=
4428 iris_get_scratch_space(ice
, prog_data
->total_scratch
,
4429 MESA_SHADER_FRAGMENT
);
4430 uint32_t scratch_addr
= bo
->gtt_offset
;
4431 ps
.PerThreadScratchSpace
= ffs(prog_data
->total_scratch
) - 11;
4432 ps
.ScratchSpaceBasePointer
= rw_bo(NULL
, scratch_addr
,
4437 iris_pack_command(GENX(3DSTATE_PS_EXTRA
), psx_state
, psx
) {
4438 psx
.PixelShaderValid
= true;
4439 psx
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
4440 psx
.PixelShaderKillsPixel
= wm_prog_data
->uses_kill
;
4441 psx
.AttributeEnable
= wm_prog_data
->num_varying_inputs
!= 0;
4442 psx
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
4443 psx
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
4444 psx
.PixelShaderIsPerSample
= wm_prog_data
->persample_dispatch
;
4445 psx
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
4448 psx
.PixelShaderPullsBary
= wm_prog_data
->pulls_bary
;
4449 psx
.PixelShaderComputesStencil
= wm_prog_data
->computed_stencil
;
4455 * Compute the size of the derived data (shader command packets).
4457 * This must match the data written by the iris_store_xs_state() functions.
4460 iris_store_cs_state(struct iris_context
*ice
,
4461 const struct gen_device_info
*devinfo
,
4462 struct iris_compiled_shader
*shader
)
4464 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4465 struct brw_cs_prog_data
*cs_prog_data
= (void *) shader
->prog_data
;
4466 void *map
= shader
->derived_data
;
4468 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA
), map
, desc
) {
4469 desc
.ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
;
4470 desc
.SharedLocalMemorySize
=
4471 encode_slm_size(GEN_GEN
, prog_data
->total_shared
);
4472 desc
.BarrierEnable
= cs_prog_data
->uses_barrier
;
4473 desc
.CrossThreadConstantDataReadLength
=
4474 cs_prog_data
->push
.cross_thread
.regs
;
4476 /* TODO: Check if we are missing workarounds and enable mid-thread
4479 * We still have issues with mid-thread preemption (it was already
4480 * disabled by the kernel on gen11, due to missing workarounds). It's
4481 * possible that we are just missing some workarounds, and could enable
4482 * it later, but for now let's disable it to fix a GPU in compute in Car
4483 * Chase (and possibly more).
4485 desc
.ThreadPreemptionDisable
= true;
4491 iris_derived_program_state_size(enum iris_program_cache_id cache_id
)
4493 assert(cache_id
<= IRIS_CACHE_BLORP
);
4495 static const unsigned dwords
[] = {
4496 [IRIS_CACHE_VS
] = GENX(3DSTATE_VS_length
),
4497 [IRIS_CACHE_TCS
] = GENX(3DSTATE_HS_length
),
4498 [IRIS_CACHE_TES
] = GENX(3DSTATE_TE_length
) + GENX(3DSTATE_DS_length
),
4499 [IRIS_CACHE_GS
] = GENX(3DSTATE_GS_length
),
4501 GENX(3DSTATE_PS_length
) + GENX(3DSTATE_PS_EXTRA_length
),
4502 [IRIS_CACHE_CS
] = GENX(INTERFACE_DESCRIPTOR_DATA_length
),
4503 [IRIS_CACHE_BLORP
] = 0,
4506 return sizeof(uint32_t) * dwords
[cache_id
];
4510 * Create any state packets corresponding to the given shader stage
4511 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4512 * This means that we can look up a program in the in-memory cache and
4513 * get most of the state packet without having to reconstruct it.
4516 iris_store_derived_program_state(struct iris_context
*ice
,
4517 enum iris_program_cache_id cache_id
,
4518 struct iris_compiled_shader
*shader
)
4520 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
4521 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
4525 iris_store_vs_state(ice
, devinfo
, shader
);
4527 case IRIS_CACHE_TCS
:
4528 iris_store_tcs_state(ice
, devinfo
, shader
);
4530 case IRIS_CACHE_TES
:
4531 iris_store_tes_state(ice
, devinfo
, shader
);
4534 iris_store_gs_state(ice
, devinfo
, shader
);
4537 iris_store_fs_state(ice
, devinfo
, shader
);
4540 iris_store_cs_state(ice
, devinfo
, shader
);
4541 case IRIS_CACHE_BLORP
:
4548 /* ------------------------------------------------------------------- */
4550 static const uint32_t push_constant_opcodes
[] = {
4551 [MESA_SHADER_VERTEX
] = 21,
4552 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
4553 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
4554 [MESA_SHADER_GEOMETRY
] = 22,
4555 [MESA_SHADER_FRAGMENT
] = 23,
4556 [MESA_SHADER_COMPUTE
] = 0,
4560 use_null_surface(struct iris_batch
*batch
, struct iris_context
*ice
)
4562 struct iris_bo
*state_bo
= iris_resource_bo(ice
->state
.unbound_tex
.res
);
4564 iris_use_pinned_bo(batch
, state_bo
, false, IRIS_DOMAIN_NONE
);
4566 return ice
->state
.unbound_tex
.offset
;
4570 use_null_fb_surface(struct iris_batch
*batch
, struct iris_context
*ice
)
4572 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4573 if (!ice
->state
.null_fb
.res
)
4574 return use_null_surface(batch
, ice
);
4576 struct iris_bo
*state_bo
= iris_resource_bo(ice
->state
.null_fb
.res
);
4578 iris_use_pinned_bo(batch
, state_bo
, false, IRIS_DOMAIN_NONE
);
4580 return ice
->state
.null_fb
.offset
;
4584 surf_state_offset_for_aux(struct iris_resource
*res
,
4586 enum isl_aux_usage aux_usage
)
4588 assert(aux_modes
& (1 << aux_usage
));
4589 return SURFACE_STATE_ALIGNMENT
*
4590 util_bitcount(aux_modes
& ((1 << aux_usage
) - 1));
4595 surf_state_update_clear_value(struct iris_batch
*batch
,
4596 struct iris_resource
*res
,
4597 struct iris_state_ref
*state
,
4599 enum isl_aux_usage aux_usage
)
4601 struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
4602 struct iris_bo
*state_bo
= iris_resource_bo(state
->res
);
4603 uint64_t real_offset
= state
->offset
+ IRIS_MEMZONE_BINDER_START
;
4604 uint32_t offset_into_bo
= real_offset
- state_bo
->gtt_offset
;
4605 uint32_t clear_offset
= offset_into_bo
+
4606 isl_dev
->ss
.clear_value_offset
+
4607 surf_state_offset_for_aux(res
, aux_modes
, aux_usage
);
4608 uint32_t *color
= res
->aux
.clear_color
.u32
;
4610 assert(isl_dev
->ss
.clear_value_size
== 16);
4612 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
4613 iris_emit_pipe_control_write(batch
, "update fast clear value (Z)",
4614 PIPE_CONTROL_WRITE_IMMEDIATE
,
4615 state_bo
, clear_offset
, color
[0]);
4617 iris_emit_pipe_control_write(batch
, "update fast clear color (RG__)",
4618 PIPE_CONTROL_WRITE_IMMEDIATE
,
4619 state_bo
, clear_offset
,
4620 (uint64_t) color
[0] |
4621 (uint64_t) color
[1] << 32);
4622 iris_emit_pipe_control_write(batch
, "update fast clear color (__BA)",
4623 PIPE_CONTROL_WRITE_IMMEDIATE
,
4624 state_bo
, clear_offset
+ 8,
4625 (uint64_t) color
[2] |
4626 (uint64_t) color
[3] << 32);
4629 iris_emit_pipe_control_flush(batch
,
4630 "update fast clear: state cache invalidate",
4631 PIPE_CONTROL_FLUSH_ENABLE
|
4632 PIPE_CONTROL_STATE_CACHE_INVALIDATE
);
4637 update_clear_value(struct iris_context
*ice
,
4638 struct iris_batch
*batch
,
4639 struct iris_resource
*res
,
4640 struct iris_surface_state
*surf_state
,
4641 unsigned all_aux_modes
,
4642 struct isl_view
*view
)
4644 UNUSED
struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
4645 UNUSED
unsigned aux_modes
= all_aux_modes
;
4647 /* We only need to update the clear color in the surface state for gen8 and
4648 * gen9. Newer gens can read it directly from the clear color state buffer.
4651 /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4652 aux_modes
&= ~(1 << ISL_AUX_USAGE_NONE
);
4655 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
4657 surf_state_update_clear_value(batch
, res
, &surf_state
->ref
,
4658 all_aux_modes
, aux_usage
);
4661 /* TODO: Could update rather than re-filling */
4662 alloc_surface_states(surf_state
, all_aux_modes
);
4664 void *map
= surf_state
->cpu
;
4667 enum isl_aux_usage aux_usage
= u_bit_scan(&aux_modes
);
4668 fill_surface_state(isl_dev
, map
, res
, &res
->surf
, view
, aux_usage
,
4670 map
+= SURFACE_STATE_ALIGNMENT
;
4673 upload_surface_states(ice
->state
.surface_uploader
, surf_state
);
4678 * Add a surface to the validation list, as well as the buffer containing
4679 * the corresponding SURFACE_STATE.
4681 * Returns the binding table entry (offset to SURFACE_STATE).
4684 use_surface(struct iris_context
*ice
,
4685 struct iris_batch
*batch
,
4686 struct pipe_surface
*p_surf
,
4688 enum isl_aux_usage aux_usage
,
4689 bool is_read_surface
,
4690 enum iris_domain access
)
4692 struct iris_surface
*surf
= (void *) p_surf
;
4693 struct iris_resource
*res
= (void *) p_surf
->texture
;
4694 uint32_t offset
= 0;
4696 iris_use_pinned_bo(batch
, iris_resource_bo(p_surf
->texture
),
4698 if (GEN_GEN
== 8 && is_read_surface
) {
4699 iris_use_pinned_bo(batch
, iris_resource_bo(surf
->surface_state_read
.ref
.res
), false,
4702 iris_use_pinned_bo(batch
, iris_resource_bo(surf
->surface_state
.ref
.res
), false,
4707 iris_use_pinned_bo(batch
, res
->aux
.bo
, writeable
, access
);
4708 if (res
->aux
.clear_color_bo
)
4709 iris_use_pinned_bo(batch
, res
->aux
.clear_color_bo
, false, access
);
4711 if (memcmp(&res
->aux
.clear_color
, &surf
->clear_color
,
4712 sizeof(surf
->clear_color
)) != 0) {
4713 update_clear_value(ice
, batch
, res
, &surf
->surface_state
,
4714 res
->aux
.possible_usages
, &surf
->view
);
4716 update_clear_value(ice
, batch
, res
, &surf
->surface_state_read
,
4717 res
->aux
.possible_usages
, &surf
->read_view
);
4719 surf
->clear_color
= res
->aux
.clear_color
;
4723 offset
= (GEN_GEN
== 8 && is_read_surface
)
4724 ? surf
->surface_state_read
.ref
.offset
4725 : surf
->surface_state
.ref
.offset
;
4728 surf_state_offset_for_aux(res
, res
->aux
.possible_usages
, aux_usage
);
4732 use_sampler_view(struct iris_context
*ice
,
4733 struct iris_batch
*batch
,
4734 struct iris_sampler_view
*isv
)
4736 enum isl_aux_usage aux_usage
=
4737 iris_resource_texture_aux_usage(ice
, isv
->res
, isv
->view
.format
);
4739 iris_use_pinned_bo(batch
, isv
->res
->bo
, false, IRIS_DOMAIN_OTHER_READ
);
4740 iris_use_pinned_bo(batch
, iris_resource_bo(isv
->surface_state
.ref
.res
), false,
4743 if (isv
->res
->aux
.bo
) {
4744 iris_use_pinned_bo(batch
, isv
->res
->aux
.bo
,
4745 false, IRIS_DOMAIN_OTHER_READ
);
4746 if (isv
->res
->aux
.clear_color_bo
)
4747 iris_use_pinned_bo(batch
, isv
->res
->aux
.clear_color_bo
,
4748 false, IRIS_DOMAIN_OTHER_READ
);
4749 if (memcmp(&isv
->res
->aux
.clear_color
, &isv
->clear_color
,
4750 sizeof(isv
->clear_color
)) != 0) {
4751 update_clear_value(ice
, batch
, isv
->res
, &isv
->surface_state
,
4752 isv
->res
->aux
.sampler_usages
, &isv
->view
);
4753 isv
->clear_color
= isv
->res
->aux
.clear_color
;
4757 return isv
->surface_state
.ref
.offset
+
4758 surf_state_offset_for_aux(isv
->res
, isv
->res
->aux
.sampler_usages
,
4763 use_ubo_ssbo(struct iris_batch
*batch
,
4764 struct iris_context
*ice
,
4765 struct pipe_shader_buffer
*buf
,
4766 struct iris_state_ref
*surf_state
,
4767 bool writable
, enum iris_domain access
)
4769 if (!buf
->buffer
|| !surf_state
->res
)
4770 return use_null_surface(batch
, ice
);
4772 iris_use_pinned_bo(batch
, iris_resource_bo(buf
->buffer
), writable
, access
);
4773 iris_use_pinned_bo(batch
, iris_resource_bo(surf_state
->res
), false,
4776 return surf_state
->offset
;
4780 use_image(struct iris_batch
*batch
, struct iris_context
*ice
,
4781 struct iris_shader_state
*shs
, const struct shader_info
*info
,
4784 struct iris_image_view
*iv
= &shs
->image
[i
];
4785 struct iris_resource
*res
= (void *) iv
->base
.resource
;
4788 return use_null_surface(batch
, ice
);
4790 bool write
= iv
->base
.shader_access
& PIPE_IMAGE_ACCESS_WRITE
;
4792 iris_use_pinned_bo(batch
, res
->bo
, write
, IRIS_DOMAIN_NONE
);
4793 iris_use_pinned_bo(batch
, iris_resource_bo(iv
->surface_state
.ref
.res
),
4794 false, IRIS_DOMAIN_NONE
);
4797 iris_use_pinned_bo(batch
, res
->aux
.bo
, write
, IRIS_DOMAIN_NONE
);
4799 enum isl_aux_usage aux_usage
=
4800 iris_image_view_aux_usage(ice
, &iv
->base
, info
);
4802 return iv
->surface_state
.ref
.offset
+
4803 surf_state_offset_for_aux(res
, res
->aux
.possible_usages
, aux_usage
);
4806 #define push_bt_entry(addr) \
4807 assert(addr >= binder_addr); \
4808 assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4809 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4811 #define bt_assert(section) \
4812 if (!pin_only && shader->bt.used_mask[section] != 0) \
4813 assert(shader->bt.offsets[section] == s);
4816 * Populate the binding table for a given shader stage.
4818 * This fills out the table of pointers to surfaces required by the shader,
4819 * and also adds those buffers to the validation list so the kernel can make
4820 * resident before running our batch.
4823 iris_populate_binding_table(struct iris_context
*ice
,
4824 struct iris_batch
*batch
,
4825 gl_shader_stage stage
,
4828 const struct iris_binder
*binder
= &ice
->state
.binder
;
4829 struct iris_uncompiled_shader
*ish
= ice
->shaders
.uncompiled
[stage
];
4830 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
4834 struct iris_binding_table
*bt
= &shader
->bt
;
4835 UNUSED
struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4836 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
4837 uint32_t binder_addr
= binder
->bo
->gtt_offset
;
4839 uint32_t *bt_map
= binder
->map
+ binder
->bt_offset
[stage
];
4842 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
4844 /* TCS passthrough doesn't need a binding table. */
4845 assert(stage
== MESA_SHADER_TESS_CTRL
);
4849 if (stage
== MESA_SHADER_COMPUTE
&&
4850 shader
->bt
.used_mask
[IRIS_SURFACE_GROUP_CS_WORK_GROUPS
]) {
4851 /* surface for gl_NumWorkGroups */
4852 struct iris_state_ref
*grid_data
= &ice
->state
.grid_size
;
4853 struct iris_state_ref
*grid_state
= &ice
->state
.grid_surf_state
;
4854 iris_use_pinned_bo(batch
, iris_resource_bo(grid_data
->res
), false,
4855 IRIS_DOMAIN_OTHER_READ
);
4856 iris_use_pinned_bo(batch
, iris_resource_bo(grid_state
->res
), false,
4858 push_bt_entry(grid_state
->offset
);
4861 if (stage
== MESA_SHADER_FRAGMENT
) {
4862 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
4863 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4864 if (cso_fb
->nr_cbufs
) {
4865 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
4867 if (cso_fb
->cbufs
[i
]) {
4868 addr
= use_surface(ice
, batch
, cso_fb
->cbufs
[i
], true,
4869 ice
->state
.draw_aux_usage
[i
], false,
4870 IRIS_DOMAIN_RENDER_WRITE
);
4872 addr
= use_null_fb_surface(batch
, ice
);
4874 push_bt_entry(addr
);
4876 } else if (GEN_GEN
< 11) {
4877 uint32_t addr
= use_null_fb_surface(batch
, ice
);
4878 push_bt_entry(addr
);
4882 #define foreach_surface_used(index, group) \
4884 for (int index = 0; index < bt->sizes[group]; index++) \
4885 if (iris_group_index_to_bti(bt, group, index) != \
4886 IRIS_SURFACE_NOT_USED)
4888 foreach_surface_used(i
, IRIS_SURFACE_GROUP_RENDER_TARGET_READ
) {
4889 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
4891 if (cso_fb
->cbufs
[i
]) {
4892 addr
= use_surface(ice
, batch
, cso_fb
->cbufs
[i
],
4893 false, ice
->state
.draw_aux_usage
[i
], true,
4894 IRIS_DOMAIN_OTHER_READ
);
4895 push_bt_entry(addr
);
4899 foreach_surface_used(i
, IRIS_SURFACE_GROUP_TEXTURE
) {
4900 struct iris_sampler_view
*view
= shs
->textures
[i
];
4901 uint32_t addr
= view
? use_sampler_view(ice
, batch
, view
)
4902 : use_null_surface(batch
, ice
);
4903 push_bt_entry(addr
);
4906 foreach_surface_used(i
, IRIS_SURFACE_GROUP_IMAGE
) {
4907 uint32_t addr
= use_image(batch
, ice
, shs
, info
, i
);
4908 push_bt_entry(addr
);
4911 foreach_surface_used(i
, IRIS_SURFACE_GROUP_UBO
) {
4914 if (i
== bt
->sizes
[IRIS_SURFACE_GROUP_UBO
] - 1) {
4915 if (ish
->const_data
) {
4916 iris_use_pinned_bo(batch
, iris_resource_bo(ish
->const_data
), false,
4917 IRIS_DOMAIN_OTHER_READ
);
4918 iris_use_pinned_bo(batch
, iris_resource_bo(ish
->const_data_state
.res
),
4919 false, IRIS_DOMAIN_NONE
);
4920 addr
= ish
->const_data_state
.offset
;
4922 /* This can only happen with INTEL_DISABLE_COMPACT_BINDING_TABLE=1. */
4923 addr
= use_null_surface(batch
, ice
);
4926 addr
= use_ubo_ssbo(batch
, ice
, &shs
->constbuf
[i
],
4927 &shs
->constbuf_surf_state
[i
], false,
4928 IRIS_DOMAIN_OTHER_READ
);
4931 push_bt_entry(addr
);
4934 foreach_surface_used(i
, IRIS_SURFACE_GROUP_SSBO
) {
4936 use_ubo_ssbo(batch
, ice
, &shs
->ssbo
[i
], &shs
->ssbo_surf_state
[i
],
4937 shs
->writable_ssbos
& (1u << i
), IRIS_DOMAIN_NONE
);
4938 push_bt_entry(addr
);
4942 /* XXX: YUV surfaces not implemented yet */
4943 bt_assert(plane_start
[1], ...);
4944 bt_assert(plane_start
[2], ...);
4949 iris_use_optional_res(struct iris_batch
*batch
,
4950 struct pipe_resource
*res
,
4952 enum iris_domain access
)
4955 struct iris_bo
*bo
= iris_resource_bo(res
);
4956 iris_use_pinned_bo(batch
, bo
, writeable
, access
);
4961 pin_depth_and_stencil_buffers(struct iris_batch
*batch
,
4962 struct pipe_surface
*zsbuf
,
4963 struct iris_depth_stencil_alpha_state
*cso_zsa
)
4968 struct iris_resource
*zres
, *sres
;
4969 iris_get_depth_stencil_resources(zsbuf
->texture
, &zres
, &sres
);
4972 const enum iris_domain access
= cso_zsa
->depth_writes_enabled
?
4973 IRIS_DOMAIN_DEPTH_WRITE
: IRIS_DOMAIN_OTHER_READ
;
4974 iris_use_pinned_bo(batch
, zres
->bo
, cso_zsa
->depth_writes_enabled
,
4977 iris_use_pinned_bo(batch
, zres
->aux
.bo
,
4978 cso_zsa
->depth_writes_enabled
, access
);
4983 const enum iris_domain access
= cso_zsa
->stencil_writes_enabled
?
4984 IRIS_DOMAIN_DEPTH_WRITE
: IRIS_DOMAIN_OTHER_READ
;
4985 iris_use_pinned_bo(batch
, sres
->bo
, cso_zsa
->stencil_writes_enabled
,
4990 /* ------------------------------------------------------------------- */
4993 * Pin any BOs which were installed by a previous batch, and restored
4994 * via the hardware logical context mechanism.
4996 * We don't need to re-emit all state every batch - the hardware context
4997 * mechanism will save and restore it for us. This includes pointers to
4998 * various BOs...which won't exist unless we ask the kernel to pin them
4999 * by adding them to the validation list.
5001 * We can skip buffers if we've re-emitted those packets, as we're
5002 * overwriting those stale pointers with new ones, and don't actually
5003 * refer to the old BOs.
5006 iris_restore_render_saved_bos(struct iris_context
*ice
,
5007 struct iris_batch
*batch
,
5008 const struct pipe_draw_info
*draw
)
5010 struct iris_genx_state
*genx
= ice
->state
.genx
;
5012 const uint64_t clean
= ~ice
->state
.dirty
;
5013 const uint64_t stage_clean
= ~ice
->state
.stage_dirty
;
5015 if (clean
& IRIS_DIRTY_CC_VIEWPORT
) {
5016 iris_use_optional_res(batch
, ice
->state
.last_res
.cc_vp
, false,
5020 if (clean
& IRIS_DIRTY_SF_CL_VIEWPORT
) {
5021 iris_use_optional_res(batch
, ice
->state
.last_res
.sf_cl_vp
, false,
5025 if (clean
& IRIS_DIRTY_BLEND_STATE
) {
5026 iris_use_optional_res(batch
, ice
->state
.last_res
.blend
, false,
5030 if (clean
& IRIS_DIRTY_COLOR_CALC_STATE
) {
5031 iris_use_optional_res(batch
, ice
->state
.last_res
.color_calc
, false,
5035 if (clean
& IRIS_DIRTY_SCISSOR_RECT
) {
5036 iris_use_optional_res(batch
, ice
->state
.last_res
.scissor
, false,
5040 if (ice
->state
.streamout_active
&& (clean
& IRIS_DIRTY_SO_BUFFERS
)) {
5041 for (int i
= 0; i
< 4; i
++) {
5042 struct iris_stream_output_target
*tgt
=
5043 (void *) ice
->state
.so_target
[i
];
5045 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->base
.buffer
),
5046 true, IRIS_DOMAIN_OTHER_WRITE
);
5047 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->offset
.res
),
5048 true, IRIS_DOMAIN_OTHER_WRITE
);
5053 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5054 if (!(stage_clean
& (IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
)))
5057 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5058 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5063 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
5065 for (int i
= 0; i
< 4; i
++) {
5066 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[i
];
5068 if (range
->length
== 0)
5071 /* Range block is a binding table index, map back to UBO index. */
5072 unsigned block_index
= iris_bti_to_group_index(
5073 &shader
->bt
, IRIS_SURFACE_GROUP_UBO
, range
->block
);
5074 assert(block_index
!= IRIS_SURFACE_NOT_USED
);
5076 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[block_index
];
5077 struct iris_resource
*res
= (void *) cbuf
->buffer
;
5080 iris_use_pinned_bo(batch
, res
->bo
, false, IRIS_DOMAIN_OTHER_READ
);
5082 iris_use_pinned_bo(batch
, batch
->screen
->workaround_bo
, false,
5083 IRIS_DOMAIN_OTHER_READ
);
5087 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5088 if (stage_clean
& (IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
)) {
5089 /* Re-pin any buffers referred to by the binding table. */
5090 iris_populate_binding_table(ice
, batch
, stage
, true);
5094 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5095 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5096 struct pipe_resource
*res
= shs
->sampler_table
.res
;
5098 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false,
5102 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5103 if (stage_clean
& (IRIS_STAGE_DIRTY_VS
<< stage
)) {
5104 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5107 struct iris_bo
*bo
= iris_resource_bo(shader
->assembly
.res
);
5108 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
5110 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
5112 if (prog_data
->total_scratch
> 0) {
5113 struct iris_bo
*bo
=
5114 iris_get_scratch_space(ice
, prog_data
->total_scratch
, stage
);
5115 iris_use_pinned_bo(batch
, bo
, true, IRIS_DOMAIN_NONE
);
5121 if ((clean
& IRIS_DIRTY_DEPTH_BUFFER
) &&
5122 (clean
& IRIS_DIRTY_WM_DEPTH_STENCIL
)) {
5123 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5124 pin_depth_and_stencil_buffers(batch
, cso_fb
->zsbuf
, ice
->state
.cso_zsa
);
5127 iris_use_optional_res(batch
, ice
->state
.last_res
.index_buffer
, false,
5128 IRIS_DOMAIN_OTHER_READ
);
5130 if (clean
& IRIS_DIRTY_VERTEX_BUFFERS
) {
5131 uint64_t bound
= ice
->state
.bound_vertex_buffers
;
5133 const int i
= u_bit_scan64(&bound
);
5134 struct pipe_resource
*res
= genx
->vertex_buffers
[i
].resource
;
5135 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false,
5136 IRIS_DOMAIN_OTHER_READ
);
5142 iris_restore_compute_saved_bos(struct iris_context
*ice
,
5143 struct iris_batch
*batch
,
5144 const struct pipe_grid_info
*grid
)
5146 const uint64_t stage_clean
= ~ice
->state
.stage_dirty
;
5148 const int stage
= MESA_SHADER_COMPUTE
;
5149 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5151 if (stage_clean
& IRIS_STAGE_DIRTY_BINDINGS_CS
) {
5152 /* Re-pin any buffers referred to by the binding table. */
5153 iris_populate_binding_table(ice
, batch
, stage
, true);
5156 struct pipe_resource
*sampler_res
= shs
->sampler_table
.res
;
5158 iris_use_pinned_bo(batch
, iris_resource_bo(sampler_res
), false,
5161 if ((stage_clean
& IRIS_STAGE_DIRTY_SAMPLER_STATES_CS
) &&
5162 (stage_clean
& IRIS_STAGE_DIRTY_BINDINGS_CS
) &&
5163 (stage_clean
& IRIS_STAGE_DIRTY_CONSTANTS_CS
) &&
5164 (stage_clean
& IRIS_STAGE_DIRTY_CS
)) {
5165 iris_use_optional_res(batch
, ice
->state
.last_res
.cs_desc
, false,
5169 if (stage_clean
& IRIS_STAGE_DIRTY_CS
) {
5170 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5173 struct iris_bo
*bo
= iris_resource_bo(shader
->assembly
.res
);
5174 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_NONE
);
5176 struct iris_bo
*curbe_bo
=
5177 iris_resource_bo(ice
->state
.last_res
.cs_thread_ids
);
5178 iris_use_pinned_bo(batch
, curbe_bo
, false, IRIS_DOMAIN_NONE
);
5180 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
5182 if (prog_data
->total_scratch
> 0) {
5183 struct iris_bo
*bo
=
5184 iris_get_scratch_space(ice
, prog_data
->total_scratch
, stage
);
5185 iris_use_pinned_bo(batch
, bo
, true, IRIS_DOMAIN_NONE
);
5192 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5195 iris_update_surface_base_address(struct iris_batch
*batch
,
5196 struct iris_binder
*binder
)
5198 if (batch
->last_surface_base_address
== binder
->bo
->gtt_offset
)
5201 uint32_t mocs
= batch
->screen
->isl_dev
.mocs
.internal
;
5203 iris_batch_sync_region_start(batch
);
5205 flush_before_state_base_change(batch
);
5208 /* GEN:BUG:1607854226:
5210 * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5211 * mode by putting the pipeline temporarily in 3D mode..
5213 if (batch
->name
== IRIS_BATCH_COMPUTE
)
5214 emit_pipeline_select(batch
, _3D
);
5217 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
5218 sba
.SurfaceStateBaseAddressModifyEnable
= true;
5219 sba
.SurfaceStateBaseAddress
= ro_bo(binder
->bo
, 0);
5221 /* The hardware appears to pay attention to the MOCS fields even
5222 * if you don't set the "Address Modify Enable" bit for the base.
5224 sba
.GeneralStateMOCS
= mocs
;
5225 sba
.StatelessDataPortAccessMOCS
= mocs
;
5226 sba
.DynamicStateMOCS
= mocs
;
5227 sba
.IndirectObjectMOCS
= mocs
;
5228 sba
.InstructionMOCS
= mocs
;
5229 sba
.SurfaceStateMOCS
= mocs
;
5231 sba
.BindlessSurfaceStateMOCS
= mocs
;
5236 /* GEN:BUG:1607854226:
5238 * Put the pipeline back into compute mode.
5240 if (batch
->name
== IRIS_BATCH_COMPUTE
)
5241 emit_pipeline_select(batch
, GPGPU
);
5244 flush_after_state_base_change(batch
);
5245 iris_batch_sync_region_end(batch
);
5247 batch
->last_surface_base_address
= binder
->bo
->gtt_offset
;
5251 iris_viewport_zmin_zmax(const struct pipe_viewport_state
*vp
, bool halfz
,
5252 bool window_space_position
, float *zmin
, float *zmax
)
5254 if (window_space_position
) {
5259 util_viewport_zmin_zmax(vp
, halfz
, zmin
, zmax
);
5264 genX(invalidate_aux_map_state
)(struct iris_batch
*batch
)
5266 struct iris_screen
*screen
= batch
->screen
;
5267 void *aux_map_ctx
= iris_bufmgr_get_aux_map_context(screen
->bufmgr
);
5270 uint32_t aux_map_state_num
= gen_aux_map_get_state_num(aux_map_ctx
);
5271 if (batch
->last_aux_map_state
!= aux_map_state_num
) {
5272 /* HSD 1209978178: docs say that before programming the aux table:
5274 * "Driver must ensure that the engine is IDLE but ensure it doesn't
5275 * add extra flushes in the case it knows that the engine is already
5278 * An end of pipe sync is needed here, otherwise we see GPU hangs in
5279 * dEQP-GLES31.functional.copy_image.* tests.
5281 iris_emit_end_of_pipe_sync(batch
, "Invalidate aux map table",
5282 PIPE_CONTROL_CS_STALL
);
5284 /* If the aux-map state number increased, then we need to rewrite the
5285 * register. Rewriting the register is used to both set the aux-map
5286 * translation table address, and also to invalidate any previously
5287 * cached translations.
5289 iris_load_register_imm32(batch
, GENX(GFX_CCS_AUX_INV_num
), 1);
5290 batch
->last_aux_map_state
= aux_map_state_num
;
5295 init_aux_map_state(struct iris_batch
*batch
)
5297 struct iris_screen
*screen
= batch
->screen
;
5298 void *aux_map_ctx
= iris_bufmgr_get_aux_map_context(screen
->bufmgr
);
5302 uint64_t base_addr
= gen_aux_map_get_base(aux_map_ctx
);
5303 assert(base_addr
!= 0 && align64(base_addr
, 32 * 1024) == base_addr
);
5304 iris_load_register_imm64(batch
, GENX(GFX_AUX_TABLE_BASE_ADDR_num
),
5311 struct iris_address addr
;
5315 uint32_t max_length
;
5319 setup_constant_buffers(struct iris_context
*ice
,
5320 struct iris_batch
*batch
,
5322 struct push_bos
*push_bos
)
5324 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5325 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5326 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
5328 uint32_t push_range_sum
= 0;
5331 for (int i
= 0; i
< 4; i
++) {
5332 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[i
];
5334 if (range
->length
== 0)
5337 push_range_sum
+= range
->length
;
5339 if (range
->length
> push_bos
->max_length
)
5340 push_bos
->max_length
= range
->length
;
5342 /* Range block is a binding table index, map back to UBO index. */
5343 unsigned block_index
= iris_bti_to_group_index(
5344 &shader
->bt
, IRIS_SURFACE_GROUP_UBO
, range
->block
);
5345 assert(block_index
!= IRIS_SURFACE_NOT_USED
);
5347 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[block_index
];
5348 struct iris_resource
*res
= (void *) cbuf
->buffer
;
5350 assert(cbuf
->buffer_offset
% 32 == 0);
5352 push_bos
->buffers
[n
].length
= range
->length
;
5353 push_bos
->buffers
[n
].addr
=
5354 res
? ro_bo(res
->bo
, range
->start
* 32 + cbuf
->buffer_offset
)
5355 : batch
->screen
->workaround_address
;
5359 /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5361 * "The sum of all four read length fields must be less than or
5362 * equal to the size of 64."
5364 assert(push_range_sum
<= 64);
5366 push_bos
->buffer_count
= n
;
5370 emit_push_constant_packets(struct iris_context
*ice
,
5371 struct iris_batch
*batch
,
5373 const struct push_bos
*push_bos
)
5375 UNUSED
struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
5376 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5377 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
5379 iris_emit_cmd(batch
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
5380 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
5382 pkt
.MOCS
= isl_dev
->mocs
.internal
;
5385 /* The Skylake PRM contains the following restriction:
5387 * "The driver must ensure The following case does not occur
5388 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5389 * buffer 3 read length equal to zero committed followed by a
5390 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5393 * To avoid this, we program the buffers in the highest slots.
5394 * This way, slot 0 is only used if slot 3 is also used.
5396 int n
= push_bos
->buffer_count
;
5398 const unsigned shift
= 4 - n
;
5399 for (int i
= 0; i
< n
; i
++) {
5400 pkt
.ConstantBody
.ReadLength
[i
+ shift
] =
5401 push_bos
->buffers
[i
].length
;
5402 pkt
.ConstantBody
.Buffer
[i
+ shift
] = push_bos
->buffers
[i
].addr
;
5410 emit_push_constant_packet_all(struct iris_context
*ice
,
5411 struct iris_batch
*batch
,
5412 uint32_t shader_mask
,
5413 const struct push_bos
*push_bos
)
5415 struct isl_device
*isl_dev
= &batch
->screen
->isl_dev
;
5418 iris_emit_cmd(batch
, GENX(3DSTATE_CONSTANT_ALL
), pc
) {
5419 pc
.ShaderUpdateEnable
= shader_mask
;
5424 const uint32_t n
= push_bos
->buffer_count
;
5425 const uint32_t max_pointers
= 4;
5426 const uint32_t num_dwords
= 2 + 2 * n
;
5427 uint32_t const_all
[2 + 2 * max_pointers
];
5428 uint32_t *dw
= &const_all
[0];
5430 assert(n
<= max_pointers
);
5431 iris_pack_command(GENX(3DSTATE_CONSTANT_ALL
), dw
, all
) {
5432 all
.DWordLength
= num_dwords
- 2;
5433 all
.MOCS
= isl_dev
->mocs
.internal
;
5434 all
.ShaderUpdateEnable
= shader_mask
;
5435 all
.PointerBufferMask
= (1 << n
) - 1;
5439 for (int i
= 0; i
< n
; i
++) {
5440 _iris_pack_state(batch
, GENX(3DSTATE_CONSTANT_ALL_DATA
),
5442 data
.PointerToConstantBuffer
= push_bos
->buffers
[i
].addr
;
5443 data
.ConstantBufferReadLength
= push_bos
->buffers
[i
].length
;
5446 iris_batch_emit(batch
, const_all
, sizeof(uint32_t) * num_dwords
);
5451 iris_upload_dirty_render_state(struct iris_context
*ice
,
5452 struct iris_batch
*batch
,
5453 const struct pipe_draw_info
*draw
)
5455 const uint64_t dirty
= ice
->state
.dirty
;
5456 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
5458 if (!(dirty
& IRIS_ALL_DIRTY_FOR_RENDER
) &&
5459 !(stage_dirty
& IRIS_ALL_STAGE_DIRTY_FOR_RENDER
))
5462 struct iris_genx_state
*genx
= ice
->state
.genx
;
5463 struct iris_binder
*binder
= &ice
->state
.binder
;
5464 struct brw_wm_prog_data
*wm_prog_data
= (void *)
5465 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
5467 if (dirty
& IRIS_DIRTY_CC_VIEWPORT
) {
5468 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
5469 uint32_t cc_vp_address
;
5471 /* XXX: could avoid streaming for depth_clip [0,1] case. */
5472 uint32_t *cc_vp_map
=
5473 stream_state(batch
, ice
->state
.dynamic_uploader
,
5474 &ice
->state
.last_res
.cc_vp
,
5475 4 * ice
->state
.num_viewports
*
5476 GENX(CC_VIEWPORT_length
), 32, &cc_vp_address
);
5477 for (int i
= 0; i
< ice
->state
.num_viewports
; i
++) {
5479 iris_viewport_zmin_zmax(&ice
->state
.viewports
[i
], cso_rast
->clip_halfz
,
5480 ice
->state
.window_space_position
,
5482 if (cso_rast
->depth_clip_near
)
5484 if (cso_rast
->depth_clip_far
)
5487 iris_pack_state(GENX(CC_VIEWPORT
), cc_vp_map
, ccv
) {
5488 ccv
.MinimumDepth
= zmin
;
5489 ccv
.MaximumDepth
= zmax
;
5492 cc_vp_map
+= GENX(CC_VIEWPORT_length
);
5495 iris_emit_cmd(batch
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
5496 ptr
.CCViewportPointer
= cc_vp_address
;
5500 if (dirty
& IRIS_DIRTY_SF_CL_VIEWPORT
) {
5501 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5502 uint32_t sf_cl_vp_address
;
5504 stream_state(batch
, ice
->state
.dynamic_uploader
,
5505 &ice
->state
.last_res
.sf_cl_vp
,
5506 4 * ice
->state
.num_viewports
*
5507 GENX(SF_CLIP_VIEWPORT_length
), 64, &sf_cl_vp_address
);
5509 for (unsigned i
= 0; i
< ice
->state
.num_viewports
; i
++) {
5510 const struct pipe_viewport_state
*state
= &ice
->state
.viewports
[i
];
5511 float gb_xmin
, gb_xmax
, gb_ymin
, gb_ymax
;
5513 float vp_xmin
= viewport_extent(state
, 0, -1.0f
);
5514 float vp_xmax
= viewport_extent(state
, 0, 1.0f
);
5515 float vp_ymin
= viewport_extent(state
, 1, -1.0f
);
5516 float vp_ymax
= viewport_extent(state
, 1, 1.0f
);
5518 gen_calculate_guardband_size(cso_fb
->width
, cso_fb
->height
,
5519 state
->scale
[0], state
->scale
[1],
5520 state
->translate
[0], state
->translate
[1],
5521 &gb_xmin
, &gb_xmax
, &gb_ymin
, &gb_ymax
);
5523 iris_pack_state(GENX(SF_CLIP_VIEWPORT
), vp_map
, vp
) {
5524 vp
.ViewportMatrixElementm00
= state
->scale
[0];
5525 vp
.ViewportMatrixElementm11
= state
->scale
[1];
5526 vp
.ViewportMatrixElementm22
= state
->scale
[2];
5527 vp
.ViewportMatrixElementm30
= state
->translate
[0];
5528 vp
.ViewportMatrixElementm31
= state
->translate
[1];
5529 vp
.ViewportMatrixElementm32
= state
->translate
[2];
5530 vp
.XMinClipGuardband
= gb_xmin
;
5531 vp
.XMaxClipGuardband
= gb_xmax
;
5532 vp
.YMinClipGuardband
= gb_ymin
;
5533 vp
.YMaxClipGuardband
= gb_ymax
;
5534 vp
.XMinViewPort
= MAX2(vp_xmin
, 0);
5535 vp
.XMaxViewPort
= MIN2(vp_xmax
, cso_fb
->width
) - 1;
5536 vp
.YMinViewPort
= MAX2(vp_ymin
, 0);
5537 vp
.YMaxViewPort
= MIN2(vp_ymax
, cso_fb
->height
) - 1;
5540 vp_map
+= GENX(SF_CLIP_VIEWPORT_length
);
5543 iris_emit_cmd(batch
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
5544 ptr
.SFClipViewportPointer
= sf_cl_vp_address
;
5548 if (dirty
& IRIS_DIRTY_URB
) {
5551 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
5552 if (!ice
->shaders
.prog
[i
]) {
5555 struct brw_vue_prog_data
*vue_prog_data
=
5556 (void *) ice
->shaders
.prog
[i
]->prog_data
;
5557 size
[i
] = vue_prog_data
->urb_entry_size
;
5559 assert(size
[i
] != 0);
5562 unsigned entries
[4], start
[4];
5563 gen_get_urb_config(&batch
->screen
->devinfo
,
5564 batch
->screen
->l3_config_3d
,
5565 ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
] != NULL
,
5566 ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
] != NULL
,
5567 size
, entries
, start
,
5568 &ice
->state
.urb_deref_block_size
);
5570 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
5571 iris_emit_cmd(batch
, GENX(3DSTATE_URB_VS
), urb
) {
5572 urb
._3DCommandSubOpcode
+= i
;
5573 urb
.VSURBStartingAddress
= start
[i
];
5574 urb
.VSURBEntryAllocationSize
= size
[i
] - 1;
5575 urb
.VSNumberofURBEntries
= entries
[i
];
5580 if (dirty
& IRIS_DIRTY_BLEND_STATE
) {
5581 struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
5582 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5583 struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
5584 const int header_dwords
= GENX(BLEND_STATE_length
);
5586 /* Always write at least one BLEND_STATE - the final RT message will
5587 * reference BLEND_STATE[0] even if there aren't color writes. There
5588 * may still be alpha testing, computed depth, and so on.
5590 const int rt_dwords
=
5591 MAX2(cso_fb
->nr_cbufs
, 1) * GENX(BLEND_STATE_ENTRY_length
);
5593 uint32_t blend_offset
;
5594 uint32_t *blend_map
=
5595 stream_state(batch
, ice
->state
.dynamic_uploader
,
5596 &ice
->state
.last_res
.blend
,
5597 4 * (header_dwords
+ rt_dwords
), 64, &blend_offset
);
5599 uint32_t blend_state_header
;
5600 iris_pack_state(GENX(BLEND_STATE
), &blend_state_header
, bs
) {
5601 bs
.AlphaTestEnable
= cso_zsa
->alpha
.enabled
;
5602 bs
.AlphaTestFunction
= translate_compare_func(cso_zsa
->alpha
.func
);
5605 blend_map
[0] = blend_state_header
| cso_blend
->blend_state
[0];
5606 memcpy(&blend_map
[1], &cso_blend
->blend_state
[1], 4 * rt_dwords
);
5608 iris_emit_cmd(batch
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
5609 ptr
.BlendStatePointer
= blend_offset
;
5610 ptr
.BlendStatePointerValid
= true;
5614 if (dirty
& IRIS_DIRTY_COLOR_CALC_STATE
) {
5615 struct iris_depth_stencil_alpha_state
*cso
= ice
->state
.cso_zsa
;
5617 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
5621 stream_state(batch
, ice
->state
.dynamic_uploader
,
5622 &ice
->state
.last_res
.color_calc
,
5623 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length
),
5625 iris_pack_state(GENX(COLOR_CALC_STATE
), cc_map
, cc
) {
5626 cc
.AlphaTestFormat
= ALPHATEST_FLOAT32
;
5627 cc
.AlphaReferenceValueAsFLOAT32
= cso
->alpha
.ref_value
;
5628 cc
.BlendConstantColorRed
= ice
->state
.blend_color
.color
[0];
5629 cc
.BlendConstantColorGreen
= ice
->state
.blend_color
.color
[1];
5630 cc
.BlendConstantColorBlue
= ice
->state
.blend_color
.color
[2];
5631 cc
.BlendConstantColorAlpha
= ice
->state
.blend_color
.color
[3];
5633 cc
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
5634 cc
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
5637 iris_emit_cmd(batch
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
5638 ptr
.ColorCalcStatePointer
= cc_offset
;
5639 ptr
.ColorCalcStatePointerValid
= true;
5643 /* GEN:BUG:1604061319
5645 * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5647 * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5648 * any stage has a dirty binding table.
5650 const bool emit_const_wa
= GEN_GEN
>= 11 &&
5651 ((dirty
& IRIS_DIRTY_RENDER_BUFFER
) ||
5652 (stage_dirty
& IRIS_ALL_STAGE_DIRTY_BINDINGS
));
5655 uint32_t nobuffer_stages
= 0;
5658 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5659 if (!(stage_dirty
& (IRIS_STAGE_DIRTY_CONSTANTS_VS
<< stage
)) &&
5663 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5664 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5669 if (shs
->sysvals_need_upload
)
5670 upload_sysvals(ice
, stage
, NULL
);
5672 struct push_bos push_bos
= {};
5673 setup_constant_buffers(ice
, batch
, stage
, &push_bos
);
5676 /* If this stage doesn't have any push constants, emit it later in a
5677 * single CONSTANT_ALL packet with all the other stages.
5679 if (push_bos
.buffer_count
== 0) {
5680 nobuffer_stages
|= 1 << stage
;
5684 /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5685 * contains only 5 bits, so we can only use it for buffers smaller than
5688 if (push_bos
.max_length
< 32) {
5689 emit_push_constant_packet_all(ice
, batch
, 1 << stage
, &push_bos
);
5693 emit_push_constant_packets(ice
, batch
, stage
, &push_bos
);
5697 if (nobuffer_stages
)
5698 emit_push_constant_packet_all(ice
, batch
, nobuffer_stages
, NULL
);
5701 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5702 /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5703 * in order to commit constants. TODO: Investigate "Disable Gather
5704 * at Set Shader" to go back to legacy mode...
5706 if (stage_dirty
& ((IRIS_STAGE_DIRTY_BINDINGS_VS
|
5707 (GEN_GEN
== 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS
: 0))
5709 iris_emit_cmd(batch
, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
), ptr
) {
5710 ptr
._3DCommandSubOpcode
= 38 + stage
;
5711 ptr
.PointertoVSBindingTable
= binder
->bt_offset
[stage
];
5716 if (GEN_GEN
>= 11 && (dirty
& IRIS_DIRTY_RENDER_BUFFER
)) {
5717 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5718 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5720 /* The PIPE_CONTROL command description says:
5722 * "Whenever a Binding Table Index (BTI) used by a Render Target
5723 * Message points to a different RENDER_SURFACE_STATE, SW must issue a
5724 * Render Target Cache Flush by enabling this bit. When render target
5725 * flush is set due to new association of BTI, PS Scoreboard Stall bit
5726 * must be set in this packet."
5728 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5729 iris_emit_pipe_control_flush(batch
, "workaround: RT BTI change [draw]",
5730 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
5731 PIPE_CONTROL_STALL_AT_SCOREBOARD
);
5734 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5735 if (stage_dirty
& (IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
)) {
5736 iris_populate_binding_table(ice
, batch
, stage
, false);
5740 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5741 if (!(stage_dirty
& (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS
<< stage
)) ||
5742 !ice
->shaders
.prog
[stage
])
5745 iris_upload_sampler_states(ice
, stage
);
5747 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
5748 struct pipe_resource
*res
= shs
->sampler_table
.res
;
5750 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false,
5753 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
5754 ptr
._3DCommandSubOpcode
= 43 + stage
;
5755 ptr
.PointertoVSSamplerState
= shs
->sampler_table
.offset
;
5759 if (ice
->state
.need_border_colors
)
5760 iris_use_pinned_bo(batch
, ice
->state
.border_color_pool
.bo
, false,
5763 if (dirty
& IRIS_DIRTY_MULTISAMPLE
) {
5764 iris_emit_cmd(batch
, GENX(3DSTATE_MULTISAMPLE
), ms
) {
5766 ice
->state
.cso_rast
->half_pixel_center
? CENTER
: UL_CORNER
;
5767 if (ice
->state
.framebuffer
.samples
> 0)
5768 ms
.NumberofMultisamples
= ffs(ice
->state
.framebuffer
.samples
) - 1;
5772 if (dirty
& IRIS_DIRTY_SAMPLE_MASK
) {
5773 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLE_MASK
), ms
) {
5774 ms
.SampleMask
= ice
->state
.sample_mask
;
5778 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
5779 if (!(stage_dirty
& (IRIS_STAGE_DIRTY_VS
<< stage
)))
5782 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
5785 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
5786 struct iris_resource
*cache
= (void *) shader
->assembly
.res
;
5787 iris_use_pinned_bo(batch
, cache
->bo
, false, IRIS_DOMAIN_NONE
);
5789 if (prog_data
->total_scratch
> 0) {
5790 struct iris_bo
*bo
=
5791 iris_get_scratch_space(ice
, prog_data
->total_scratch
, stage
);
5792 iris_use_pinned_bo(batch
, bo
, true, IRIS_DOMAIN_NONE
);
5795 if (stage
== MESA_SHADER_FRAGMENT
) {
5796 UNUSED
struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
5797 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5799 uint32_t ps_state
[GENX(3DSTATE_PS_length
)] = {0};
5800 iris_pack_command(GENX(3DSTATE_PS
), ps_state
, ps
) {
5801 ps
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
5802 ps
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
5803 ps
._32PixelDispatchEnable
= wm_prog_data
->dispatch_32
;
5805 /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5807 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5808 * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5811 * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
5813 if (GEN_GEN
>= 9 && cso_fb
->samples
== 16 &&
5814 !wm_prog_data
->persample_dispatch
) {
5815 assert(ps
._8PixelDispatchEnable
|| ps
._16PixelDispatchEnable
);
5816 ps
._32PixelDispatchEnable
= false;
5819 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
5820 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 0);
5821 ps
.DispatchGRFStartRegisterForConstantSetupData1
=
5822 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 1);
5823 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
5824 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 2);
5826 ps
.KernelStartPointer0
= KSP(shader
) +
5827 brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 0);
5828 ps
.KernelStartPointer1
= KSP(shader
) +
5829 brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 1);
5830 ps
.KernelStartPointer2
= KSP(shader
) +
5831 brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 2);
5834 uint32_t psx_state
[GENX(3DSTATE_PS_EXTRA_length
)] = {0};
5835 iris_pack_command(GENX(3DSTATE_PS_EXTRA
), psx_state
, psx
) {
5837 if (!wm_prog_data
->uses_sample_mask
)
5838 psx
.InputCoverageMaskState
= ICMS_NONE
;
5839 else if (wm_prog_data
->post_depth_coverage
)
5840 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
5841 else if (wm_prog_data
->inner_coverage
&&
5842 cso
->conservative_rasterization
)
5843 psx
.InputCoverageMaskState
= ICMS_INNER_CONSERVATIVE
;
5845 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
5847 psx
.PixelShaderUsesInputCoverageMask
=
5848 wm_prog_data
->uses_sample_mask
;
5852 uint32_t *shader_ps
= (uint32_t *) shader
->derived_data
;
5853 uint32_t *shader_psx
= shader_ps
+ GENX(3DSTATE_PS_length
);
5854 iris_emit_merge(batch
, shader_ps
, ps_state
,
5855 GENX(3DSTATE_PS_length
));
5856 iris_emit_merge(batch
, shader_psx
, psx_state
,
5857 GENX(3DSTATE_PS_EXTRA_length
));
5859 iris_batch_emit(batch
, shader
->derived_data
,
5860 iris_derived_program_state_size(stage
));
5863 if (stage
== MESA_SHADER_TESS_EVAL
) {
5864 iris_emit_cmd(batch
, GENX(3DSTATE_HS
), hs
);
5865 iris_emit_cmd(batch
, GENX(3DSTATE_TE
), te
);
5866 iris_emit_cmd(batch
, GENX(3DSTATE_DS
), ds
);
5867 } else if (stage
== MESA_SHADER_GEOMETRY
) {
5868 iris_emit_cmd(batch
, GENX(3DSTATE_GS
), gs
);
5873 if (ice
->state
.streamout_active
) {
5874 if (dirty
& IRIS_DIRTY_SO_BUFFERS
) {
5875 iris_batch_emit(batch
, genx
->so_buffers
,
5876 4 * 4 * GENX(3DSTATE_SO_BUFFER_length
));
5877 for (int i
= 0; i
< 4; i
++) {
5878 struct iris_stream_output_target
*tgt
=
5879 (void *) ice
->state
.so_target
[i
];
5882 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->base
.buffer
),
5883 true, IRIS_DOMAIN_OTHER_WRITE
);
5884 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->offset
.res
),
5885 true, IRIS_DOMAIN_OTHER_WRITE
);
5890 if ((dirty
& IRIS_DIRTY_SO_DECL_LIST
) && ice
->state
.streamout
) {
5891 uint32_t *decl_list
=
5892 ice
->state
.streamout
+ GENX(3DSTATE_STREAMOUT_length
);
5893 iris_batch_emit(batch
, decl_list
, 4 * ((decl_list
[0] & 0xff) + 2));
5896 if (dirty
& IRIS_DIRTY_STREAMOUT
) {
5897 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
5899 uint32_t dynamic_sol
[GENX(3DSTATE_STREAMOUT_length
)];
5900 iris_pack_command(GENX(3DSTATE_STREAMOUT
), dynamic_sol
, sol
) {
5901 sol
.SOFunctionEnable
= true;
5902 sol
.SOStatisticsEnable
= true;
5904 sol
.RenderingDisable
= cso_rast
->rasterizer_discard
&&
5905 !ice
->state
.prims_generated_query_active
;
5906 sol
.ReorderMode
= cso_rast
->flatshade_first
? LEADING
: TRAILING
;
5909 assert(ice
->state
.streamout
);
5911 iris_emit_merge(batch
, ice
->state
.streamout
, dynamic_sol
,
5912 GENX(3DSTATE_STREAMOUT_length
));
5915 if (dirty
& IRIS_DIRTY_STREAMOUT
) {
5916 iris_emit_cmd(batch
, GENX(3DSTATE_STREAMOUT
), sol
);
5920 if (dirty
& IRIS_DIRTY_CLIP
) {
5921 struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
5922 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
5924 bool gs_or_tes
= ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
] ||
5925 ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
];
5926 bool points_or_lines
= cso_rast
->fill_mode_point_or_line
||
5927 (gs_or_tes
? ice
->shaders
.output_topology_is_points_or_lines
5928 : ice
->state
.prim_is_points_or_lines
);
5930 uint32_t dynamic_clip
[GENX(3DSTATE_CLIP_length
)];
5931 iris_pack_command(GENX(3DSTATE_CLIP
), &dynamic_clip
, cl
) {
5932 cl
.StatisticsEnable
= ice
->state
.statistics_counters_enabled
;
5933 if (cso_rast
->rasterizer_discard
)
5934 cl
.ClipMode
= CLIPMODE_REJECT_ALL
;
5935 else if (ice
->state
.window_space_position
)
5936 cl
.ClipMode
= CLIPMODE_ACCEPT_ALL
;
5938 cl
.ClipMode
= CLIPMODE_NORMAL
;
5940 cl
.PerspectiveDivideDisable
= ice
->state
.window_space_position
;
5941 cl
.ViewportXYClipTestEnable
= !points_or_lines
;
5943 if (wm_prog_data
->barycentric_interp_modes
&
5944 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
5945 cl
.NonPerspectiveBarycentricEnable
= true;
5947 cl
.ForceZeroRTAIndexEnable
= cso_fb
->layers
<= 1;
5948 cl
.MaximumVPIndex
= ice
->state
.num_viewports
- 1;
5950 iris_emit_merge(batch
, cso_rast
->clip
, dynamic_clip
,
5951 ARRAY_SIZE(cso_rast
->clip
));
5954 if (dirty
& (IRIS_DIRTY_RASTER
| IRIS_DIRTY_URB
)) {
5955 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
5956 iris_batch_emit(batch
, cso
->raster
, sizeof(cso
->raster
));
5958 uint32_t dynamic_sf
[GENX(3DSTATE_SF_length
)];
5959 iris_pack_command(GENX(3DSTATE_SF
), &dynamic_sf
, sf
) {
5960 sf
.ViewportTransformEnable
= !ice
->state
.window_space_position
;
5963 sf
.DerefBlockSize
= ice
->state
.urb_deref_block_size
;
5966 iris_emit_merge(batch
, cso
->sf
, dynamic_sf
,
5967 ARRAY_SIZE(dynamic_sf
));
5970 if (dirty
& IRIS_DIRTY_WM
) {
5971 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
5972 uint32_t dynamic_wm
[GENX(3DSTATE_WM_length
)];
5974 iris_pack_command(GENX(3DSTATE_WM
), &dynamic_wm
, wm
) {
5975 wm
.StatisticsEnable
= ice
->state
.statistics_counters_enabled
;
5977 wm
.BarycentricInterpolationMode
=
5978 wm_prog_data
->barycentric_interp_modes
;
5980 if (wm_prog_data
->early_fragment_tests
)
5981 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
5982 else if (wm_prog_data
->has_side_effects
)
5983 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
5985 /* We could skip this bit if color writes are enabled. */
5986 if (wm_prog_data
->has_side_effects
|| wm_prog_data
->uses_kill
)
5987 wm
.ForceThreadDispatchEnable
= ForceON
;
5989 iris_emit_merge(batch
, cso
->wm
, dynamic_wm
, ARRAY_SIZE(cso
->wm
));
5992 if (dirty
& IRIS_DIRTY_SBE
) {
5993 iris_emit_sbe(batch
, ice
);
5996 if (dirty
& IRIS_DIRTY_PS_BLEND
) {
5997 struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
5998 struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
5999 const struct shader_info
*fs_info
=
6000 iris_get_shader_info(ice
, MESA_SHADER_FRAGMENT
);
6002 uint32_t dynamic_pb
[GENX(3DSTATE_PS_BLEND_length
)];
6003 iris_pack_command(GENX(3DSTATE_PS_BLEND
), &dynamic_pb
, pb
) {
6004 pb
.HasWriteableRT
= has_writeable_rt(cso_blend
, fs_info
);
6005 pb
.AlphaTestEnable
= cso_zsa
->alpha
.enabled
;
6007 /* The dual source blending docs caution against using SRC1 factors
6008 * when the shader doesn't use a dual source render target write.
6009 * Empirically, this can lead to GPU hangs, and the results are
6010 * undefined anyway, so simply disable blending to avoid the hang.
6012 pb
.ColorBufferBlendEnable
= (cso_blend
->blend_enables
& 1) &&
6013 (!cso_blend
->dual_color_blending
|| wm_prog_data
->dual_src_blend
);
6016 iris_emit_merge(batch
, cso_blend
->ps_blend
, dynamic_pb
,
6017 ARRAY_SIZE(cso_blend
->ps_blend
));
6020 if (dirty
& IRIS_DIRTY_WM_DEPTH_STENCIL
) {
6021 struct iris_depth_stencil_alpha_state
*cso
= ice
->state
.cso_zsa
;
6022 #if GEN_GEN >= 9 && GEN_GEN < 12
6023 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
6024 uint32_t stencil_refs
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
6025 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), &stencil_refs
, wmds
) {
6026 wmds
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
6027 wmds
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
6029 iris_emit_merge(batch
, cso
->wmds
, stencil_refs
, ARRAY_SIZE(cso
->wmds
));
6031 /* Use modify disable fields which allow us to emit packets
6032 * directly instead of merging them later.
6034 iris_batch_emit(batch
, cso
->wmds
, sizeof(cso
->wmds
));
6038 iris_batch_emit(batch
, cso
->depth_bounds
, sizeof(cso
->depth_bounds
));
6042 if (dirty
& IRIS_DIRTY_STENCIL_REF
) {
6044 /* Use modify disable fields which allow us to emit packets
6045 * directly instead of merging them later.
6047 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
6048 uint32_t stencil_refs
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
6049 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), &stencil_refs
, wmds
) {
6050 wmds
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
6051 wmds
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
6052 wmds
.StencilTestMaskModifyDisable
= true;
6053 wmds
.StencilWriteMaskModifyDisable
= true;
6054 wmds
.StencilStateModifyDisable
= true;
6055 wmds
.DepthStateModifyDisable
= true;
6057 iris_batch_emit(batch
, stencil_refs
, sizeof(stencil_refs
));
6061 if (dirty
& IRIS_DIRTY_SCISSOR_RECT
) {
6062 uint32_t scissor_offset
=
6063 emit_state(batch
, ice
->state
.dynamic_uploader
,
6064 &ice
->state
.last_res
.scissor
,
6065 ice
->state
.scissors
,
6066 sizeof(struct pipe_scissor_state
) *
6067 ice
->state
.num_viewports
, 32);
6069 iris_emit_cmd(batch
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
6070 ptr
.ScissorRectPointer
= scissor_offset
;
6074 if (dirty
& IRIS_DIRTY_DEPTH_BUFFER
) {
6075 struct iris_depth_buffer_state
*cso_z
= &ice
->state
.genx
->depth_buffer
;
6077 /* Do not emit the clear params yets. We need to update the clear value
6080 uint32_t clear_length
= GENX(3DSTATE_CLEAR_PARAMS_length
) * 4;
6081 uint32_t cso_z_size
= batch
->screen
->isl_dev
.ds
.size
- clear_length
;;
6084 /* GEN:BUG:14010455700
6086 * ISL will change some CHICKEN registers depending on the depth surface
6087 * format, along with emitting the depth and stencil packets. In that
6088 * case, we want to do a depth flush and stall, so the pipeline is not
6089 * using these settings while we change the registers.
6091 iris_emit_end_of_pipe_sync(batch
,
6092 "Workaround: Stop pipeline for 14010455700",
6093 PIPE_CONTROL_DEPTH_STALL
|
6094 PIPE_CONTROL_DEPTH_CACHE_FLUSH
);
6097 iris_batch_emit(batch
, cso_z
->packets
, cso_z_size
);
6098 if (GEN_GEN
>= 12) {
6099 /* GEN:BUG:1408224581
6101 * Workaround: Gen12LP Astep only An additional pipe control with
6102 * post-sync = store dword operation would be required.( w/a is to
6103 * have an additional pipe control after the stencil state whenever
6104 * the surface state bits of this state is changing).
6106 iris_emit_pipe_control_write(batch
, "WA for stencil state",
6107 PIPE_CONTROL_WRITE_IMMEDIATE
,
6108 batch
->screen
->workaround_address
.bo
,
6109 batch
->screen
->workaround_address
.offset
, 0);
6112 union isl_color_value clear_value
= { .f32
= { 0, } };
6114 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
6115 if (cso_fb
->zsbuf
) {
6116 struct iris_resource
*zres
, *sres
;
6117 iris_get_depth_stencil_resources(cso_fb
->zsbuf
->texture
,
6119 if (zres
&& zres
->aux
.bo
)
6120 clear_value
= iris_resource_get_clear_color(zres
, NULL
, NULL
);
6123 uint32_t clear_params
[GENX(3DSTATE_CLEAR_PARAMS_length
)];
6124 iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS
), clear_params
, clear
) {
6125 clear
.DepthClearValueValid
= true;
6126 clear
.DepthClearValue
= clear_value
.f32
[0];
6128 iris_batch_emit(batch
, clear_params
, clear_length
);
6131 if (dirty
& (IRIS_DIRTY_DEPTH_BUFFER
| IRIS_DIRTY_WM_DEPTH_STENCIL
)) {
6132 /* Listen for buffer changes, and also write enable changes. */
6133 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
6134 pin_depth_and_stencil_buffers(batch
, cso_fb
->zsbuf
, ice
->state
.cso_zsa
);
6137 if (dirty
& IRIS_DIRTY_POLYGON_STIPPLE
) {
6138 iris_emit_cmd(batch
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
6139 for (int i
= 0; i
< 32; i
++) {
6140 poly
.PatternRow
[i
] = ice
->state
.poly_stipple
.stipple
[i
];
6145 if (dirty
& IRIS_DIRTY_LINE_STIPPLE
) {
6146 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
6147 iris_batch_emit(batch
, cso
->line_stipple
, sizeof(cso
->line_stipple
));
6150 if (dirty
& IRIS_DIRTY_VF_TOPOLOGY
) {
6151 iris_emit_cmd(batch
, GENX(3DSTATE_VF_TOPOLOGY
), topo
) {
6152 topo
.PrimitiveTopologyType
=
6153 translate_prim_type(draw
->mode
, draw
->vertices_per_patch
);
6157 if (dirty
& IRIS_DIRTY_VERTEX_BUFFERS
) {
6158 int count
= util_bitcount64(ice
->state
.bound_vertex_buffers
);
6159 uint64_t dynamic_bound
= ice
->state
.bound_vertex_buffers
;
6161 if (ice
->state
.vs_uses_draw_params
) {
6162 assert(ice
->draw
.draw_params
.res
);
6164 struct iris_vertex_buffer_state
*state
=
6165 &(ice
->state
.genx
->vertex_buffers
[count
]);
6166 pipe_resource_reference(&state
->resource
, ice
->draw
.draw_params
.res
);
6167 struct iris_resource
*res
= (void *) state
->resource
;
6169 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), state
->state
, vb
) {
6170 vb
.VertexBufferIndex
= count
;
6171 vb
.AddressModifyEnable
= true;
6173 vb
.BufferSize
= res
->bo
->size
- ice
->draw
.draw_params
.offset
;
6174 vb
.BufferStartingAddress
=
6175 ro_bo(NULL
, res
->bo
->gtt_offset
+
6176 (int) ice
->draw
.draw_params
.offset
);
6177 vb
.MOCS
= iris_mocs(res
->bo
, &batch
->screen
->isl_dev
);
6179 dynamic_bound
|= 1ull << count
;
6183 if (ice
->state
.vs_uses_derived_draw_params
) {
6184 struct iris_vertex_buffer_state
*state
=
6185 &(ice
->state
.genx
->vertex_buffers
[count
]);
6186 pipe_resource_reference(&state
->resource
,
6187 ice
->draw
.derived_draw_params
.res
);
6188 struct iris_resource
*res
= (void *) ice
->draw
.derived_draw_params
.res
;
6190 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), state
->state
, vb
) {
6191 vb
.VertexBufferIndex
= count
;
6192 vb
.AddressModifyEnable
= true;
6195 res
->bo
->size
- ice
->draw
.derived_draw_params
.offset
;
6196 vb
.BufferStartingAddress
=
6197 ro_bo(NULL
, res
->bo
->gtt_offset
+
6198 (int) ice
->draw
.derived_draw_params
.offset
);
6199 vb
.MOCS
= iris_mocs(res
->bo
, &batch
->screen
->isl_dev
);
6201 dynamic_bound
|= 1ull << count
;
6207 /* Gen11+ doesn't need the cache workaround below */
6208 uint64_t bound
= dynamic_bound
;
6210 const int i
= u_bit_scan64(&bound
);
6211 iris_use_optional_res(batch
, genx
->vertex_buffers
[i
].resource
,
6212 false, IRIS_DOMAIN_OTHER_READ
);
6215 /* The VF cache designers cut corners, and made the cache key's
6216 * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6217 * 32 bits of the address. If you have two vertex buffers which get
6218 * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6219 * you can get collisions (even within a single batch).
6221 * So, we need to do a VF cache invalidate if the buffer for a VB
6222 * slot slot changes [48:32] address bits from the previous time.
6224 unsigned flush_flags
= 0;
6226 uint64_t bound
= dynamic_bound
;
6228 const int i
= u_bit_scan64(&bound
);
6229 uint16_t high_bits
= 0;
6231 struct iris_resource
*res
=
6232 (void *) genx
->vertex_buffers
[i
].resource
;
6234 iris_use_pinned_bo(batch
, res
->bo
, false, IRIS_DOMAIN_OTHER_READ
);
6236 high_bits
= res
->bo
->gtt_offset
>> 32ull;
6237 if (high_bits
!= ice
->state
.last_vbo_high_bits
[i
]) {
6238 flush_flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
|
6239 PIPE_CONTROL_CS_STALL
;
6240 ice
->state
.last_vbo_high_bits
[i
] = high_bits
;
6246 iris_emit_pipe_control_flush(batch
,
6247 "workaround: VF cache 32-bit key [VB]",
6252 const unsigned vb_dwords
= GENX(VERTEX_BUFFER_STATE_length
);
6255 iris_get_command_space(batch
, 4 * (1 + vb_dwords
* count
));
6256 _iris_pack_command(batch
, GENX(3DSTATE_VERTEX_BUFFERS
), map
, vb
) {
6257 vb
.DWordLength
= (vb_dwords
* count
+ 1) - 2;
6261 bound
= dynamic_bound
;
6263 const int i
= u_bit_scan64(&bound
);
6264 memcpy(map
, genx
->vertex_buffers
[i
].state
,
6265 sizeof(uint32_t) * vb_dwords
);
6271 if (dirty
& IRIS_DIRTY_VERTEX_ELEMENTS
) {
6272 struct iris_vertex_element_state
*cso
= ice
->state
.cso_vertex_elements
;
6273 const unsigned entries
= MAX2(cso
->count
, 1);
6274 if (!(ice
->state
.vs_needs_sgvs_element
||
6275 ice
->state
.vs_uses_derived_draw_params
||
6276 ice
->state
.vs_needs_edge_flag
)) {
6277 iris_batch_emit(batch
, cso
->vertex_elements
, sizeof(uint32_t) *
6278 (1 + entries
* GENX(VERTEX_ELEMENT_STATE_length
)));
6280 uint32_t dynamic_ves
[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length
)];
6281 const unsigned dyn_count
= cso
->count
+
6282 ice
->state
.vs_needs_sgvs_element
+
6283 ice
->state
.vs_uses_derived_draw_params
;
6285 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS
),
6288 1 + GENX(VERTEX_ELEMENT_STATE_length
) * dyn_count
- 2;
6290 memcpy(&dynamic_ves
[1], &cso
->vertex_elements
[1],
6291 (cso
->count
- ice
->state
.vs_needs_edge_flag
) *
6292 GENX(VERTEX_ELEMENT_STATE_length
) * sizeof(uint32_t));
6293 uint32_t *ve_pack_dest
=
6294 &dynamic_ves
[1 + (cso
->count
- ice
->state
.vs_needs_edge_flag
) *
6295 GENX(VERTEX_ELEMENT_STATE_length
)];
6297 if (ice
->state
.vs_needs_sgvs_element
) {
6298 uint32_t base_ctrl
= ice
->state
.vs_uses_draw_params
?
6299 VFCOMP_STORE_SRC
: VFCOMP_STORE_0
;
6300 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
6302 ve
.VertexBufferIndex
=
6303 util_bitcount64(ice
->state
.bound_vertex_buffers
);
6304 ve
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
6305 ve
.Component0Control
= base_ctrl
;
6306 ve
.Component1Control
= base_ctrl
;
6307 ve
.Component2Control
= VFCOMP_STORE_0
;
6308 ve
.Component3Control
= VFCOMP_STORE_0
;
6310 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
6312 if (ice
->state
.vs_uses_derived_draw_params
) {
6313 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
6315 ve
.VertexBufferIndex
=
6316 util_bitcount64(ice
->state
.bound_vertex_buffers
) +
6317 ice
->state
.vs_uses_draw_params
;
6318 ve
.SourceElementFormat
= ISL_FORMAT_R32G32_UINT
;
6319 ve
.Component0Control
= VFCOMP_STORE_SRC
;
6320 ve
.Component1Control
= VFCOMP_STORE_SRC
;
6321 ve
.Component2Control
= VFCOMP_STORE_0
;
6322 ve
.Component3Control
= VFCOMP_STORE_0
;
6324 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
6326 if (ice
->state
.vs_needs_edge_flag
) {
6327 for (int i
= 0; i
< GENX(VERTEX_ELEMENT_STATE_length
); i
++)
6328 ve_pack_dest
[i
] = cso
->edgeflag_ve
[i
];
6331 iris_batch_emit(batch
, &dynamic_ves
, sizeof(uint32_t) *
6332 (1 + dyn_count
* GENX(VERTEX_ELEMENT_STATE_length
)));
6335 if (!ice
->state
.vs_needs_edge_flag
) {
6336 iris_batch_emit(batch
, cso
->vf_instancing
, sizeof(uint32_t) *
6337 entries
* GENX(3DSTATE_VF_INSTANCING_length
));
6339 assert(cso
->count
> 0);
6340 const unsigned edgeflag_index
= cso
->count
- 1;
6341 uint32_t dynamic_vfi
[33 * GENX(3DSTATE_VF_INSTANCING_length
)];
6342 memcpy(&dynamic_vfi
[0], cso
->vf_instancing
, edgeflag_index
*
6343 GENX(3DSTATE_VF_INSTANCING_length
) * sizeof(uint32_t));
6345 uint32_t *vfi_pack_dest
= &dynamic_vfi
[0] +
6346 edgeflag_index
* GENX(3DSTATE_VF_INSTANCING_length
);
6347 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
6348 vi
.VertexElementIndex
= edgeflag_index
+
6349 ice
->state
.vs_needs_sgvs_element
+
6350 ice
->state
.vs_uses_derived_draw_params
;
6352 for (int i
= 0; i
< GENX(3DSTATE_VF_INSTANCING_length
); i
++)
6353 vfi_pack_dest
[i
] |= cso
->edgeflag_vfi
[i
];
6355 iris_batch_emit(batch
, &dynamic_vfi
[0], sizeof(uint32_t) *
6356 entries
* GENX(3DSTATE_VF_INSTANCING_length
));
6360 if (dirty
& IRIS_DIRTY_VF_SGVS
) {
6361 const struct brw_vs_prog_data
*vs_prog_data
= (void *)
6362 ice
->shaders
.prog
[MESA_SHADER_VERTEX
]->prog_data
;
6363 struct iris_vertex_element_state
*cso
= ice
->state
.cso_vertex_elements
;
6365 iris_emit_cmd(batch
, GENX(3DSTATE_VF_SGVS
), sgv
) {
6366 if (vs_prog_data
->uses_vertexid
) {
6367 sgv
.VertexIDEnable
= true;
6368 sgv
.VertexIDComponentNumber
= 2;
6369 sgv
.VertexIDElementOffset
=
6370 cso
->count
- ice
->state
.vs_needs_edge_flag
;
6373 if (vs_prog_data
->uses_instanceid
) {
6374 sgv
.InstanceIDEnable
= true;
6375 sgv
.InstanceIDComponentNumber
= 3;
6376 sgv
.InstanceIDElementOffset
=
6377 cso
->count
- ice
->state
.vs_needs_edge_flag
;
6382 if (dirty
& IRIS_DIRTY_VF
) {
6383 iris_emit_cmd(batch
, GENX(3DSTATE_VF
), vf
) {
6384 if (draw
->primitive_restart
) {
6385 vf
.IndexedDrawCutIndexEnable
= true;
6386 vf
.CutIndex
= draw
->restart_index
;
6391 if (dirty
& IRIS_DIRTY_VF_STATISTICS
) {
6392 iris_emit_cmd(batch
, GENX(3DSTATE_VF_STATISTICS
), vf
) {
6393 vf
.StatisticsEnable
= true;
6398 if (dirty
& IRIS_DIRTY_PMA_FIX
) {
6399 bool enable
= want_pma_fix(ice
);
6400 genX(update_pma_fix
)(ice
, batch
, enable
);
6404 if (ice
->state
.current_hash_scale
!= 1)
6405 genX(emit_hashing_mode
)(ice
, batch
, UINT_MAX
, UINT_MAX
, 1);
6408 genX(invalidate_aux_map_state
)(batch
);
6413 iris_upload_render_state(struct iris_context
*ice
,
6414 struct iris_batch
*batch
,
6415 const struct pipe_draw_info
*draw
)
6417 bool use_predicate
= ice
->state
.predicate
== IRIS_PREDICATE_STATE_USE_BIT
;
6419 iris_batch_sync_region_start(batch
);
6421 /* Always pin the binder. If we're emitting new binding table pointers,
6422 * we need it. If not, we're probably inheriting old tables via the
6423 * context, and need it anyway. Since true zero-bindings cases are
6424 * practically non-existent, just pin it and avoid last_res tracking.
6426 iris_use_pinned_bo(batch
, ice
->state
.binder
.bo
, false,
6429 if (!batch
->contains_draw_with_next_seqno
) {
6430 iris_restore_render_saved_bos(ice
, batch
, draw
);
6431 batch
->contains_draw_with_next_seqno
= batch
->contains_draw
= true;
6434 iris_upload_dirty_render_state(ice
, batch
, draw
);
6436 if (draw
->index_size
> 0) {
6439 if (draw
->has_user_indices
) {
6440 u_upload_data(ice
->ctx
.stream_uploader
, 0,
6441 draw
->count
* draw
->index_size
, 4, draw
->index
.user
,
6442 &offset
, &ice
->state
.last_res
.index_buffer
);
6444 struct iris_resource
*res
= (void *) draw
->index
.resource
;
6445 res
->bind_history
|= PIPE_BIND_INDEX_BUFFER
;
6447 pipe_resource_reference(&ice
->state
.last_res
.index_buffer
,
6448 draw
->index
.resource
);
6452 struct iris_genx_state
*genx
= ice
->state
.genx
;
6453 struct iris_bo
*bo
= iris_resource_bo(ice
->state
.last_res
.index_buffer
);
6455 uint32_t ib_packet
[GENX(3DSTATE_INDEX_BUFFER_length
)];
6456 iris_pack_command(GENX(3DSTATE_INDEX_BUFFER
), ib_packet
, ib
) {
6457 ib
.IndexFormat
= draw
->index_size
>> 1;
6458 ib
.MOCS
= iris_mocs(bo
, &batch
->screen
->isl_dev
);
6459 ib
.BufferSize
= bo
->size
- offset
;
6460 ib
.BufferStartingAddress
= ro_bo(NULL
, bo
->gtt_offset
+ offset
);
6463 if (memcmp(genx
->last_index_buffer
, ib_packet
, sizeof(ib_packet
)) != 0) {
6464 memcpy(genx
->last_index_buffer
, ib_packet
, sizeof(ib_packet
));
6465 iris_batch_emit(batch
, ib_packet
, sizeof(ib_packet
));
6466 iris_use_pinned_bo(batch
, bo
, false, IRIS_DOMAIN_OTHER_READ
);
6470 /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6471 uint16_t high_bits
= bo
->gtt_offset
>> 32ull;
6472 if (high_bits
!= ice
->state
.last_index_bo_high_bits
) {
6473 iris_emit_pipe_control_flush(batch
,
6474 "workaround: VF cache 32-bit key [IB]",
6475 PIPE_CONTROL_VF_CACHE_INVALIDATE
|
6476 PIPE_CONTROL_CS_STALL
);
6477 ice
->state
.last_index_bo_high_bits
= high_bits
;
6482 #define _3DPRIM_END_OFFSET 0x2420
6483 #define _3DPRIM_START_VERTEX 0x2430
6484 #define _3DPRIM_VERTEX_COUNT 0x2434
6485 #define _3DPRIM_INSTANCE_COUNT 0x2438
6486 #define _3DPRIM_START_INSTANCE 0x243C
6487 #define _3DPRIM_BASE_VERTEX 0x2440
6489 if (draw
->indirect
) {
6490 if (draw
->indirect
->indirect_draw_count
) {
6491 use_predicate
= true;
6493 struct iris_bo
*draw_count_bo
=
6494 iris_resource_bo(draw
->indirect
->indirect_draw_count
);
6495 unsigned draw_count_offset
=
6496 draw
->indirect
->indirect_draw_count_offset
;
6498 iris_emit_pipe_control_flush(batch
,
6499 "ensure indirect draw buffer is flushed",
6500 PIPE_CONTROL_FLUSH_ENABLE
);
6502 if (ice
->state
.predicate
== IRIS_PREDICATE_STATE_USE_BIT
) {
6503 struct gen_mi_builder b
;
6504 gen_mi_builder_init(&b
, batch
);
6506 /* comparison = draw id < draw count */
6507 struct gen_mi_value comparison
=
6508 gen_mi_ult(&b
, gen_mi_imm(draw
->drawid
),
6509 gen_mi_mem32(ro_bo(draw_count_bo
,
6510 draw_count_offset
)));
6512 /* predicate = comparison & conditional rendering predicate */
6513 gen_mi_store(&b
, gen_mi_reg32(MI_PREDICATE_RESULT
),
6514 gen_mi_iand(&b
, comparison
,
6515 gen_mi_reg32(CS_GPR(15))));
6517 uint32_t mi_predicate
;
6519 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6520 iris_load_register_imm64(batch
, MI_PREDICATE_SRC1
, draw
->drawid
);
6521 /* Upload the current draw count from the draw parameters buffer
6522 * to MI_PREDICATE_SRC0.
6524 iris_load_register_mem32(batch
, MI_PREDICATE_SRC0
,
6525 draw_count_bo
, draw_count_offset
);
6526 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6527 iris_load_register_imm32(batch
, MI_PREDICATE_SRC0
+ 4, 0);
6529 if (draw
->drawid
== 0) {
6530 mi_predicate
= MI_PREDICATE
| MI_PREDICATE_LOADOP_LOADINV
|
6531 MI_PREDICATE_COMBINEOP_SET
|
6532 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
;
6534 /* While draw_index < draw_count the predicate's result will be
6535 * (draw_index == draw_count) ^ TRUE = TRUE
6536 * When draw_index == draw_count the result is
6537 * (TRUE) ^ TRUE = FALSE
6538 * After this all results will be:
6539 * (FALSE) ^ FALSE = FALSE
6541 mi_predicate
= MI_PREDICATE
| MI_PREDICATE_LOADOP_LOAD
|
6542 MI_PREDICATE_COMBINEOP_XOR
|
6543 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
;
6545 iris_batch_emit(batch
, &mi_predicate
, sizeof(uint32_t));
6548 struct iris_bo
*bo
= iris_resource_bo(draw
->indirect
->buffer
);
6551 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6552 lrm
.RegisterAddress
= _3DPRIM_VERTEX_COUNT
;
6553 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 0);
6555 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6556 lrm
.RegisterAddress
= _3DPRIM_INSTANCE_COUNT
;
6557 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 4);
6559 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6560 lrm
.RegisterAddress
= _3DPRIM_START_VERTEX
;
6561 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 8);
6563 if (draw
->index_size
) {
6564 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6565 lrm
.RegisterAddress
= _3DPRIM_BASE_VERTEX
;
6566 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 12);
6568 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6569 lrm
.RegisterAddress
= _3DPRIM_START_INSTANCE
;
6570 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 16);
6573 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6574 lrm
.RegisterAddress
= _3DPRIM_START_INSTANCE
;
6575 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 12);
6577 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
6578 lri
.RegisterOffset
= _3DPRIM_BASE_VERTEX
;
6582 } else if (draw
->count_from_stream_output
) {
6583 struct iris_stream_output_target
*so
=
6584 (void *) draw
->count_from_stream_output
;
6586 /* XXX: Replace with actual cache tracking */
6587 iris_emit_pipe_control_flush(batch
,
6588 "draw count from stream output stall",
6589 PIPE_CONTROL_CS_STALL
);
6591 struct gen_mi_builder b
;
6592 gen_mi_builder_init(&b
, batch
);
6594 struct iris_address addr
=
6595 ro_bo(iris_resource_bo(so
->offset
.res
), so
->offset
.offset
);
6596 struct gen_mi_value offset
=
6597 gen_mi_iadd_imm(&b
, gen_mi_mem32(addr
), -so
->base
.buffer_offset
);
6599 gen_mi_store(&b
, gen_mi_reg32(_3DPRIM_VERTEX_COUNT
),
6600 gen_mi_udiv32_imm(&b
, offset
, so
->stride
));
6602 _iris_emit_lri(batch
, _3DPRIM_START_VERTEX
, 0);
6603 _iris_emit_lri(batch
, _3DPRIM_BASE_VERTEX
, 0);
6604 _iris_emit_lri(batch
, _3DPRIM_START_INSTANCE
, 0);
6605 _iris_emit_lri(batch
, _3DPRIM_INSTANCE_COUNT
, draw
->instance_count
);
6608 iris_emit_cmd(batch
, GENX(3DPRIMITIVE
), prim
) {
6609 prim
.VertexAccessType
= draw
->index_size
> 0 ? RANDOM
: SEQUENTIAL
;
6610 prim
.PredicateEnable
= use_predicate
;
6612 if (draw
->indirect
|| draw
->count_from_stream_output
) {
6613 prim
.IndirectParameterEnable
= true;
6615 prim
.StartInstanceLocation
= draw
->start_instance
;
6616 prim
.InstanceCount
= draw
->instance_count
;
6617 prim
.VertexCountPerInstance
= draw
->count
;
6619 prim
.StartVertexLocation
= draw
->start
;
6621 if (draw
->index_size
) {
6622 prim
.BaseVertexLocation
+= draw
->index_bias
;
6624 prim
.StartVertexLocation
+= draw
->index_bias
;
6629 iris_batch_sync_region_end(batch
);
6633 iris_load_indirect_location(struct iris_context
*ice
,
6634 struct iris_batch
*batch
,
6635 const struct pipe_grid_info
*grid
)
6637 #define GPGPU_DISPATCHDIMX 0x2500
6638 #define GPGPU_DISPATCHDIMY 0x2504
6639 #define GPGPU_DISPATCHDIMZ 0x2508
6641 assert(grid
->indirect
);
6643 struct iris_state_ref
*grid_size
= &ice
->state
.grid_size
;
6644 struct iris_bo
*bo
= iris_resource_bo(grid_size
->res
);
6645 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6646 lrm
.RegisterAddress
= GPGPU_DISPATCHDIMX
;
6647 lrm
.MemoryAddress
= ro_bo(bo
, grid_size
->offset
+ 0);
6649 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6650 lrm
.RegisterAddress
= GPGPU_DISPATCHDIMY
;
6651 lrm
.MemoryAddress
= ro_bo(bo
, grid_size
->offset
+ 4);
6653 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
6654 lrm
.RegisterAddress
= GPGPU_DISPATCHDIMZ
;
6655 lrm
.MemoryAddress
= ro_bo(bo
, grid_size
->offset
+ 8);
6660 iris_upload_gpgpu_walker(struct iris_context
*ice
,
6661 struct iris_batch
*batch
,
6662 const struct pipe_grid_info
*grid
)
6664 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
6665 struct iris_screen
*screen
= batch
->screen
;
6666 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
6667 struct iris_binder
*binder
= &ice
->state
.binder
;
6668 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_COMPUTE
];
6669 struct iris_compiled_shader
*shader
=
6670 ice
->shaders
.prog
[MESA_SHADER_COMPUTE
];
6671 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
6672 struct brw_cs_prog_data
*cs_prog_data
= (void *) prog_data
;
6673 const uint32_t group_size
= grid
->block
[0] * grid
->block
[1] * grid
->block
[2];
6674 const unsigned simd_size
=
6675 brw_cs_simd_size_for_group_size(devinfo
, cs_prog_data
, group_size
);
6676 const unsigned threads
= DIV_ROUND_UP(group_size
, simd_size
);
6679 if (stage_dirty
& IRIS_STAGE_DIRTY_CS
) {
6680 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
6682 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6683 * the only bits that are changed are scoreboard related: Scoreboard
6684 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
6685 * these scoreboard related states, a MEDIA_STATE_FLUSH is
6688 iris_emit_pipe_control_flush(batch
,
6689 "workaround: stall before MEDIA_VFE_STATE",
6690 PIPE_CONTROL_CS_STALL
);
6692 iris_emit_cmd(batch
, GENX(MEDIA_VFE_STATE
), vfe
) {
6693 if (prog_data
->total_scratch
) {
6694 struct iris_bo
*bo
=
6695 iris_get_scratch_space(ice
, prog_data
->total_scratch
,
6696 MESA_SHADER_COMPUTE
);
6697 vfe
.PerThreadScratchSpace
= ffs(prog_data
->total_scratch
) - 11;
6698 vfe
.ScratchSpaceBasePointer
= rw_bo(bo
, 0, IRIS_DOMAIN_NONE
);
6701 vfe
.MaximumNumberofThreads
=
6702 devinfo
->max_cs_threads
* screen
->subslice_total
- 1;
6704 vfe
.ResetGatewayTimer
=
6705 Resettingrelativetimerandlatchingtheglobaltimestamp
;
6708 vfe
.BypassGatewayControl
= true;
6710 vfe
.NumberofURBEntries
= 2;
6711 vfe
.URBEntryAllocationSize
= 2;
6713 vfe
.CURBEAllocationSize
=
6714 ALIGN(cs_prog_data
->push
.per_thread
.regs
* threads
+
6715 cs_prog_data
->push
.cross_thread
.regs
, 2);
6719 /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6720 if (stage_dirty
& IRIS_STAGE_DIRTY_CS
) {
6721 uint32_t curbe_data_offset
= 0;
6722 assert(cs_prog_data
->push
.cross_thread
.dwords
== 0 &&
6723 cs_prog_data
->push
.per_thread
.dwords
== 1 &&
6724 cs_prog_data
->base
.param
[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID
);
6725 const unsigned push_const_size
=
6726 brw_cs_push_const_total_size(cs_prog_data
, threads
);
6727 uint32_t *curbe_data_map
=
6728 stream_state(batch
, ice
->state
.dynamic_uploader
,
6729 &ice
->state
.last_res
.cs_thread_ids
,
6730 ALIGN(push_const_size
, 64), 64,
6731 &curbe_data_offset
);
6732 assert(curbe_data_map
);
6733 memset(curbe_data_map
, 0x5a, ALIGN(push_const_size
, 64));
6734 iris_fill_cs_push_const_buffer(cs_prog_data
, threads
, curbe_data_map
);
6736 iris_emit_cmd(batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
6737 curbe
.CURBETotalDataLength
= ALIGN(push_const_size
, 64);
6738 curbe
.CURBEDataStartAddress
= curbe_data_offset
;
6742 for (unsigned i
= 0; i
< IRIS_MAX_GLOBAL_BINDINGS
; i
++) {
6743 struct pipe_resource
*res
= ice
->state
.global_bindings
[i
];
6747 iris_use_pinned_bo(batch
, iris_resource_bo(res
),
6748 true, IRIS_DOMAIN_NONE
);
6751 if (stage_dirty
& (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS
|
6752 IRIS_STAGE_DIRTY_BINDINGS_CS
|
6753 IRIS_STAGE_DIRTY_CONSTANTS_CS
|
6754 IRIS_STAGE_DIRTY_CS
)) {
6755 uint32_t desc
[GENX(INTERFACE_DESCRIPTOR_DATA_length
)];
6757 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA
), desc
, idd
) {
6758 idd
.KernelStartPointer
=
6759 KSP(shader
) + brw_cs_prog_data_prog_offset(cs_prog_data
, simd_size
);
6760 idd
.SamplerStatePointer
= shs
->sampler_table
.offset
;
6761 idd
.BindingTablePointer
= binder
->bt_offset
[MESA_SHADER_COMPUTE
];
6762 idd
.NumberofThreadsinGPGPUThreadGroup
= threads
;
6765 for (int i
= 0; i
< GENX(INTERFACE_DESCRIPTOR_DATA_length
); i
++)
6766 desc
[i
] |= ((uint32_t *) shader
->derived_data
)[i
];
6768 iris_emit_cmd(batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
6769 load
.InterfaceDescriptorTotalLength
=
6770 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
6771 load
.InterfaceDescriptorDataStartAddress
=
6772 emit_state(batch
, ice
->state
.dynamic_uploader
,
6773 &ice
->state
.last_res
.cs_desc
, desc
, sizeof(desc
), 64);
6778 iris_load_indirect_location(ice
, batch
, grid
);
6780 const uint32_t right_mask
= brw_cs_right_mask(group_size
, simd_size
);
6782 iris_emit_cmd(batch
, GENX(GPGPU_WALKER
), ggw
) {
6783 ggw
.IndirectParameterEnable
= grid
->indirect
!= NULL
;
6784 ggw
.SIMDSize
= simd_size
/ 16;
6785 ggw
.ThreadDepthCounterMaximum
= 0;
6786 ggw
.ThreadHeightCounterMaximum
= 0;
6787 ggw
.ThreadWidthCounterMaximum
= threads
- 1;
6788 ggw
.ThreadGroupIDXDimension
= grid
->grid
[0];
6789 ggw
.ThreadGroupIDYDimension
= grid
->grid
[1];
6790 ggw
.ThreadGroupIDZDimension
= grid
->grid
[2];
6791 ggw
.RightExecutionMask
= right_mask
;
6792 ggw
.BottomExecutionMask
= 0xffffffff;
6795 iris_emit_cmd(batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
6799 iris_upload_compute_state(struct iris_context
*ice
,
6800 struct iris_batch
*batch
,
6801 const struct pipe_grid_info
*grid
)
6803 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
6804 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_COMPUTE
];
6805 struct iris_compiled_shader
*shader
=
6806 ice
->shaders
.prog
[MESA_SHADER_COMPUTE
];
6808 iris_batch_sync_region_start(batch
);
6810 /* Always pin the binder. If we're emitting new binding table pointers,
6811 * we need it. If not, we're probably inheriting old tables via the
6812 * context, and need it anyway. Since true zero-bindings cases are
6813 * practically non-existent, just pin it and avoid last_res tracking.
6815 iris_use_pinned_bo(batch
, ice
->state
.binder
.bo
, false, IRIS_DOMAIN_NONE
);
6817 if ((stage_dirty
& IRIS_STAGE_DIRTY_CONSTANTS_CS
) &&
6818 shs
->sysvals_need_upload
)
6819 upload_sysvals(ice
, MESA_SHADER_COMPUTE
, grid
);
6821 if (stage_dirty
& IRIS_STAGE_DIRTY_BINDINGS_CS
)
6822 iris_populate_binding_table(ice
, batch
, MESA_SHADER_COMPUTE
, false);
6824 if (stage_dirty
& IRIS_STAGE_DIRTY_SAMPLER_STATES_CS
)
6825 iris_upload_sampler_states(ice
, MESA_SHADER_COMPUTE
);
6827 iris_use_optional_res(batch
, shs
->sampler_table
.res
, false,
6829 iris_use_pinned_bo(batch
, iris_resource_bo(shader
->assembly
.res
), false,
6832 if (ice
->state
.need_border_colors
)
6833 iris_use_pinned_bo(batch
, ice
->state
.border_color_pool
.bo
, false,
6837 genX(invalidate_aux_map_state
)(batch
);
6840 iris_upload_gpgpu_walker(ice
, batch
, grid
);
6842 if (!batch
->contains_draw_with_next_seqno
) {
6843 iris_restore_compute_saved_bos(ice
, batch
, grid
);
6844 batch
->contains_draw_with_next_seqno
= batch
->contains_draw
= true;
6847 iris_batch_sync_region_end(batch
);
6851 * State module teardown.
6854 iris_destroy_state(struct iris_context
*ice
)
6856 struct iris_genx_state
*genx
= ice
->state
.genx
;
6858 pipe_resource_reference(&ice
->draw
.draw_params
.res
, NULL
);
6859 pipe_resource_reference(&ice
->draw
.derived_draw_params
.res
, NULL
);
6861 /* Loop over all VBOs, including ones for draw parameters */
6862 for (unsigned i
= 0; i
< ARRAY_SIZE(genx
->vertex_buffers
); i
++) {
6863 pipe_resource_reference(&genx
->vertex_buffers
[i
].resource
, NULL
);
6866 free(ice
->state
.genx
);
6868 for (int i
= 0; i
< 4; i
++) {
6869 pipe_so_target_reference(&ice
->state
.so_target
[i
], NULL
);
6872 for (unsigned i
= 0; i
< ice
->state
.framebuffer
.nr_cbufs
; i
++) {
6873 pipe_surface_reference(&ice
->state
.framebuffer
.cbufs
[i
], NULL
);
6875 pipe_surface_reference(&ice
->state
.framebuffer
.zsbuf
, NULL
);
6877 for (int stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
6878 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
6879 pipe_resource_reference(&shs
->sampler_table
.res
, NULL
);
6880 for (int i
= 0; i
< PIPE_MAX_CONSTANT_BUFFERS
; i
++) {
6881 pipe_resource_reference(&shs
->constbuf
[i
].buffer
, NULL
);
6882 pipe_resource_reference(&shs
->constbuf_surf_state
[i
].res
, NULL
);
6884 for (int i
= 0; i
< PIPE_MAX_SHADER_IMAGES
; i
++) {
6885 pipe_resource_reference(&shs
->image
[i
].base
.resource
, NULL
);
6886 pipe_resource_reference(&shs
->image
[i
].surface_state
.ref
.res
, NULL
);
6887 free(shs
->image
[i
].surface_state
.cpu
);
6889 for (int i
= 0; i
< PIPE_MAX_SHADER_BUFFERS
; i
++) {
6890 pipe_resource_reference(&shs
->ssbo
[i
].buffer
, NULL
);
6891 pipe_resource_reference(&shs
->ssbo_surf_state
[i
].res
, NULL
);
6893 for (int i
= 0; i
< IRIS_MAX_TEXTURE_SAMPLERS
; i
++) {
6894 pipe_sampler_view_reference((struct pipe_sampler_view
**)
6895 &shs
->textures
[i
], NULL
);
6899 pipe_resource_reference(&ice
->state
.grid_size
.res
, NULL
);
6900 pipe_resource_reference(&ice
->state
.grid_surf_state
.res
, NULL
);
6902 pipe_resource_reference(&ice
->state
.null_fb
.res
, NULL
);
6903 pipe_resource_reference(&ice
->state
.unbound_tex
.res
, NULL
);
6905 pipe_resource_reference(&ice
->state
.last_res
.cc_vp
, NULL
);
6906 pipe_resource_reference(&ice
->state
.last_res
.sf_cl_vp
, NULL
);
6907 pipe_resource_reference(&ice
->state
.last_res
.color_calc
, NULL
);
6908 pipe_resource_reference(&ice
->state
.last_res
.scissor
, NULL
);
6909 pipe_resource_reference(&ice
->state
.last_res
.blend
, NULL
);
6910 pipe_resource_reference(&ice
->state
.last_res
.index_buffer
, NULL
);
6911 pipe_resource_reference(&ice
->state
.last_res
.cs_thread_ids
, NULL
);
6912 pipe_resource_reference(&ice
->state
.last_res
.cs_desc
, NULL
);
6915 /* ------------------------------------------------------------------- */
6918 iris_rebind_buffer(struct iris_context
*ice
,
6919 struct iris_resource
*res
)
6921 struct pipe_context
*ctx
= &ice
->ctx
;
6922 struct iris_genx_state
*genx
= ice
->state
.genx
;
6924 assert(res
->base
.target
== PIPE_BUFFER
);
6926 /* Buffers can't be framebuffer attachments, nor display related,
6927 * and we don't have upstream Clover support.
6929 assert(!(res
->bind_history
& (PIPE_BIND_DEPTH_STENCIL
|
6930 PIPE_BIND_RENDER_TARGET
|
6931 PIPE_BIND_BLENDABLE
|
6932 PIPE_BIND_DISPLAY_TARGET
|
6934 PIPE_BIND_COMPUTE_RESOURCE
|
6935 PIPE_BIND_GLOBAL
)));
6937 if (res
->bind_history
& PIPE_BIND_VERTEX_BUFFER
) {
6938 uint64_t bound_vbs
= ice
->state
.bound_vertex_buffers
;
6940 const int i
= u_bit_scan64(&bound_vbs
);
6941 struct iris_vertex_buffer_state
*state
= &genx
->vertex_buffers
[i
];
6943 /* Update the CPU struct */
6944 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start
) == 32);
6945 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits
) == 64);
6946 uint64_t *addr
= (uint64_t *) &state
->state
[1];
6947 struct iris_bo
*bo
= iris_resource_bo(state
->resource
);
6949 if (*addr
!= bo
->gtt_offset
+ state
->offset
) {
6950 *addr
= bo
->gtt_offset
+ state
->offset
;
6951 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_BUFFERS
;
6956 /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
6957 * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
6959 * There is also no need to handle these:
6960 * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
6961 * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
6964 if (res
->bind_history
& PIPE_BIND_STREAM_OUTPUT
) {
6965 /* XXX: be careful about resetting vs appending... */
6969 for (int s
= MESA_SHADER_VERTEX
; s
< MESA_SHADER_STAGES
; s
++) {
6970 struct iris_shader_state
*shs
= &ice
->state
.shaders
[s
];
6971 enum pipe_shader_type p_stage
= stage_to_pipe(s
);
6973 if (!(res
->bind_stages
& (1 << s
)))
6976 if (res
->bind_history
& PIPE_BIND_CONSTANT_BUFFER
) {
6977 /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
6978 uint32_t bound_cbufs
= shs
->bound_cbufs
& ~1u;
6979 while (bound_cbufs
) {
6980 const int i
= u_bit_scan(&bound_cbufs
);
6981 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[i
];
6982 struct iris_state_ref
*surf_state
= &shs
->constbuf_surf_state
[i
];
6984 if (res
->bo
== iris_resource_bo(cbuf
->buffer
)) {
6985 pipe_resource_reference(&surf_state
->res
, NULL
);
6986 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_VS
<< s
;
6991 if (res
->bind_history
& PIPE_BIND_SHADER_BUFFER
) {
6992 uint32_t bound_ssbos
= shs
->bound_ssbos
;
6993 while (bound_ssbos
) {
6994 const int i
= u_bit_scan(&bound_ssbos
);
6995 struct pipe_shader_buffer
*ssbo
= &shs
->ssbo
[i
];
6997 if (res
->bo
== iris_resource_bo(ssbo
->buffer
)) {
6998 struct pipe_shader_buffer buf
= {
6999 .buffer
= &res
->base
,
7000 .buffer_offset
= ssbo
->buffer_offset
,
7001 .buffer_size
= ssbo
->buffer_size
,
7003 iris_set_shader_buffers(ctx
, p_stage
, i
, 1, &buf
,
7004 (shs
->writable_ssbos
>> i
) & 1);
7009 if (res
->bind_history
& PIPE_BIND_SAMPLER_VIEW
) {
7010 uint32_t bound_sampler_views
= shs
->bound_sampler_views
;
7011 while (bound_sampler_views
) {
7012 const int i
= u_bit_scan(&bound_sampler_views
);
7013 struct iris_sampler_view
*isv
= shs
->textures
[i
];
7014 struct iris_bo
*bo
= isv
->res
->bo
;
7016 if (update_surface_state_addrs(ice
->state
.surface_uploader
,
7017 &isv
->surface_state
, bo
)) {
7018 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< s
;
7023 if (res
->bind_history
& PIPE_BIND_SHADER_IMAGE
) {
7024 uint32_t bound_image_views
= shs
->bound_image_views
;
7025 while (bound_image_views
) {
7026 const int i
= u_bit_scan(&bound_image_views
);
7027 struct iris_image_view
*iv
= &shs
->image
[i
];
7028 struct iris_bo
*bo
= iris_resource_bo(iv
->base
.resource
);
7030 if (update_surface_state_addrs(ice
->state
.surface_uploader
,
7031 &iv
->surface_state
, bo
)) {
7032 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< s
;
7039 /* ------------------------------------------------------------------- */
7042 * Introduce a batch synchronization boundary, and update its cache coherency
7043 * status to reflect the execution of a PIPE_CONTROL command with the
7047 batch_mark_sync_for_pipe_control(struct iris_batch
*batch
, uint32_t flags
)
7049 iris_batch_sync_boundary(batch
);
7051 if ((flags
& PIPE_CONTROL_CS_STALL
)) {
7052 if ((flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
))
7053 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_RENDER_WRITE
);
7055 if ((flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
))
7056 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_DEPTH_WRITE
);
7058 if ((flags
& PIPE_CONTROL_FLUSH_ENABLE
))
7059 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_OTHER_WRITE
);
7061 if ((flags
& (PIPE_CONTROL_CACHE_FLUSH_BITS
|
7062 PIPE_CONTROL_STALL_AT_SCOREBOARD
)))
7063 iris_batch_mark_flush_sync(batch
, IRIS_DOMAIN_OTHER_READ
);
7066 if ((flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
))
7067 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_RENDER_WRITE
);
7069 if ((flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
))
7070 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_DEPTH_WRITE
);
7072 if ((flags
& PIPE_CONTROL_FLUSH_ENABLE
))
7073 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_OTHER_WRITE
);
7075 if ((flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
) &&
7076 (flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
))
7077 iris_batch_mark_invalidate_sync(batch
, IRIS_DOMAIN_OTHER_READ
);
7081 flags_to_post_sync_op(uint32_t flags
)
7083 if (flags
& PIPE_CONTROL_WRITE_IMMEDIATE
)
7084 return WriteImmediateData
;
7086 if (flags
& PIPE_CONTROL_WRITE_DEPTH_COUNT
)
7087 return WritePSDepthCount
;
7089 if (flags
& PIPE_CONTROL_WRITE_TIMESTAMP
)
7090 return WriteTimestamp
;
7096 * Do the given flags have a Post Sync or LRI Post Sync operation?
7098 static enum pipe_control_flags
7099 get_post_sync_flags(enum pipe_control_flags flags
)
7101 flags
&= PIPE_CONTROL_WRITE_IMMEDIATE
|
7102 PIPE_CONTROL_WRITE_DEPTH_COUNT
|
7103 PIPE_CONTROL_WRITE_TIMESTAMP
|
7104 PIPE_CONTROL_LRI_POST_SYNC_OP
;
7106 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7107 * "LRI Post Sync Operation". So more than one bit set would be illegal.
7109 assert(util_bitcount(flags
) <= 1);
7114 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7117 * Emit a series of PIPE_CONTROL commands, taking into account any
7118 * workarounds necessary to actually accomplish the caller's request.
7120 * Unless otherwise noted, spec quotations in this function come from:
7122 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7123 * Restrictions for PIPE_CONTROL.
7125 * You should not use this function directly. Use the helpers in
7126 * iris_pipe_control.c instead, which may split the pipe control further.
7129 iris_emit_raw_pipe_control(struct iris_batch
*batch
,
7136 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
7137 enum pipe_control_flags post_sync_flags
= get_post_sync_flags(flags
);
7138 enum pipe_control_flags non_lri_post_sync_flags
=
7139 post_sync_flags
& ~PIPE_CONTROL_LRI_POST_SYNC_OP
;
7141 /* Recursive PIPE_CONTROL workarounds --------------------------------
7142 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7144 * We do these first because we want to look at the original operation,
7145 * rather than any workarounds we set.
7147 if (GEN_GEN
== 9 && (flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
)) {
7148 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7149 * lists several workarounds:
7151 * "Project: SKL, KBL, BXT
7153 * If the VF Cache Invalidation Enable is set to a 1 in a
7154 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7155 * sets to 0, with the VF Cache Invalidation Enable set to 0
7156 * needs to be sent prior to the PIPE_CONTROL with VF Cache
7157 * Invalidation Enable set to a 1."
7159 iris_emit_raw_pipe_control(batch
,
7160 "workaround: recursive VF cache invalidate",
7164 /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
7165 * invalidates the instruction cache
7167 if (GEN_GEN
== 12 && (flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
)) {
7168 iris_emit_raw_pipe_control(batch
,
7169 "workaround: CS stall before instruction "
7171 PIPE_CONTROL_CS_STALL
|
7172 PIPE_CONTROL_STALL_AT_SCOREBOARD
, bo
, offset
,
7176 if ((GEN_GEN
== 9 || (GEN_GEN
== 12 && devinfo
->revision
== 0 /* A0*/)) &&
7177 IS_COMPUTE_PIPELINE(batch
) && post_sync_flags
) {
7178 /* Project: SKL / Argument: LRI Post Sync Operation [23]
7180 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7181 * programmed prior to programming a PIPECONTROL command with "LRI
7182 * Post Sync Operation" in GPGPU mode of operation (i.e when
7183 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
7185 * The same text exists a few rows below for Post Sync Op.
7187 * On Gen12 this is GEN:BUG:1607156449.
7189 iris_emit_raw_pipe_control(batch
,
7190 "workaround: CS stall before gpgpu post-sync",
7191 PIPE_CONTROL_CS_STALL
, bo
, offset
, imm
);
7194 /* "Flush Types" workarounds ---------------------------------------------
7195 * We do these now because they may add post-sync operations or CS stalls.
7198 if (GEN_GEN
< 11 && flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
) {
7199 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7201 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7202 * 'Write PS Depth Count' or 'Write Timestamp'."
7205 flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
7206 post_sync_flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
7207 non_lri_post_sync_flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
7208 bo
= batch
->screen
->workaround_address
.bo
;
7209 offset
= batch
->screen
->workaround_address
.offset
;
7213 if (flags
& PIPE_CONTROL_DEPTH_STALL
) {
7214 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7216 * "This bit must be DISABLED for operations other than writing
7219 * This seems like nonsense. An Ivybridge workaround requires us to
7220 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7221 * operation. Gen8+ requires us to emit depth stalls and depth cache
7222 * flushes together. So, it's hard to imagine this means anything other
7223 * than "we originally intended this to be used for PS_DEPTH_COUNT".
7225 * We ignore the supposed restriction and do nothing.
7229 if (flags
& (PIPE_CONTROL_RENDER_TARGET_FLUSH
|
7230 PIPE_CONTROL_STALL_AT_SCOREBOARD
)) {
7231 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7233 * "This bit must be DISABLED for End-of-pipe (Read) fences,
7234 * PS_DEPTH_COUNT or TIMESTAMP queries."
7236 * TODO: Implement end-of-pipe checking.
7238 assert(!(post_sync_flags
& (PIPE_CONTROL_WRITE_DEPTH_COUNT
|
7239 PIPE_CONTROL_WRITE_TIMESTAMP
)));
7242 if (GEN_GEN
< 11 && (flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
)) {
7243 /* From the PIPE_CONTROL instruction table, bit 1:
7245 * "This bit is ignored if Depth Stall Enable is set.
7246 * Further, the render cache is not flushed even if Write Cache
7247 * Flush Enable bit is set."
7249 * We assert that the caller doesn't do this combination, to try and
7250 * prevent mistakes. It shouldn't hurt the GPU, though.
7252 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
7253 * and "Render Target Flush" combo is explicitly required for BTI
7254 * update workarounds.
7256 assert(!(flags
& (PIPE_CONTROL_DEPTH_STALL
|
7257 PIPE_CONTROL_RENDER_TARGET_FLUSH
)));
7260 /* PIPE_CONTROL page workarounds ------------------------------------- */
7262 if (GEN_GEN
<= 8 && (flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
)) {
7263 /* From the PIPE_CONTROL page itself:
7266 * Restriction: Pipe_control with CS-stall bit set must be issued
7267 * before a pipe-control command that has the State Cache
7268 * Invalidate bit set."
7270 flags
|= PIPE_CONTROL_CS_STALL
;
7273 if (flags
& PIPE_CONTROL_FLUSH_LLC
) {
7274 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7277 * SW must always program Post-Sync Operation to "Write Immediate
7278 * Data" when Flush LLC is set."
7280 * For now, we just require the caller to do it.
7282 assert(flags
& PIPE_CONTROL_WRITE_IMMEDIATE
);
7285 /* "Post-Sync Operation" workarounds -------------------------------- */
7287 /* Project: All / Argument: Global Snapshot Count Reset [19]
7289 * "This bit must not be exercised on any product.
7290 * Requires stall bit ([20] of DW1) set."
7292 * We don't use this, so we just assert that it isn't used. The
7293 * PIPE_CONTROL instruction page indicates that they intended this
7294 * as a debug feature and don't think it is useful in production,
7295 * but it may actually be usable, should we ever want to.
7297 assert((flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
) == 0);
7299 if (flags
& (PIPE_CONTROL_MEDIA_STATE_CLEAR
|
7300 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
)) {
7301 /* Project: All / Arguments:
7303 * - Generic Media State Clear [16]
7304 * - Indirect State Pointers Disable [16]
7306 * "Requires stall bit ([20] of DW1) set."
7308 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7309 * State Clear) says:
7311 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7312 * programmed prior to programming a PIPECONTROL command with "Media
7313 * State Clear" set in GPGPU mode of operation"
7315 * This is a subset of the earlier rule, so there's nothing to do.
7317 flags
|= PIPE_CONTROL_CS_STALL
;
7320 if (flags
& PIPE_CONTROL_STORE_DATA_INDEX
) {
7321 /* Project: All / Argument: Store Data Index
7323 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7326 * For now, we just assert that the caller does this. We might want to
7327 * automatically add a write to the workaround BO...
7329 assert(non_lri_post_sync_flags
!= 0);
7332 if (flags
& PIPE_CONTROL_SYNC_GFDT
) {
7333 /* Project: All / Argument: Sync GFDT
7335 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7336 * than '0' or 0x2520[13] must be set."
7338 * For now, we just assert that the caller does this.
7340 assert(non_lri_post_sync_flags
!= 0);
7343 if (flags
& PIPE_CONTROL_TLB_INVALIDATE
) {
7344 /* Project: IVB+ / Argument: TLB inv
7346 * "Requires stall bit ([20] of DW1) set."
7348 * Also, from the PIPE_CONTROL instruction table:
7351 * Post Sync Operation or CS stall must be set to ensure a TLB
7352 * invalidation occurs. Otherwise no cycle will occur to the TLB
7353 * cache to invalidate."
7355 * This is not a subset of the earlier rule, so there's nothing to do.
7357 flags
|= PIPE_CONTROL_CS_STALL
;
7360 if (GEN_GEN
>= 12 && ((flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
) ||
7361 (flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
))) {
7362 /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
7365 * Unified Cache (Tile Cache Disabled):
7367 * When the Color and Depth (Z) streams are enabled to be cached in
7368 * the DC space of L2, Software must use "Render Target Cache Flush
7369 * Enable" and "Depth Cache Flush Enable" along with "Tile Cache
7370 * Flush" for getting the color and depth (Z) write data to be
7371 * globally observable. In this mode of operation it is not required
7372 * to set "CS Stall" upon setting "Tile Cache Flush" bit.
7374 flags
|= PIPE_CONTROL_TILE_CACHE_FLUSH
;
7377 if (GEN_GEN
== 9 && devinfo
->gt
== 4) {
7378 /* TODO: The big Skylake GT4 post sync op workaround */
7381 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7383 if (IS_COMPUTE_PIPELINE(batch
)) {
7384 if (GEN_GEN
>= 9 && (flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
)) {
7385 /* Project: SKL+ / Argument: Tex Invalidate
7386 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7388 flags
|= PIPE_CONTROL_CS_STALL
;
7391 if (GEN_GEN
== 8 && (post_sync_flags
||
7392 (flags
& (PIPE_CONTROL_NOTIFY_ENABLE
|
7393 PIPE_CONTROL_DEPTH_STALL
|
7394 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
7395 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
7396 PIPE_CONTROL_DATA_CACHE_FLUSH
)))) {
7397 /* Project: BDW / Arguments:
7399 * - LRI Post Sync Operation [23]
7400 * - Post Sync Op [15:14]
7402 * - Depth Stall [13]
7403 * - Render Target Cache Flush [12]
7404 * - Depth Cache Flush [0]
7405 * - DC Flush Enable [5]
7407 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
7410 flags
|= PIPE_CONTROL_CS_STALL
;
7412 /* Also, from the PIPE_CONTROL instruction table, bit 20:
7415 * This bit must be always set when PIPE_CONTROL command is
7416 * programmed by GPGPU and MEDIA workloads, except for the cases
7417 * when only Read Only Cache Invalidation bits are set (State
7418 * Cache Invalidation Enable, Instruction cache Invalidation
7419 * Enable, Texture Cache Invalidation Enable, Constant Cache
7420 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
7421 * need not implemented when FF_DOP_CG is disable via "Fixed
7422 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7424 * It sounds like we could avoid CS stalls in some cases, but we
7425 * don't currently bother. This list isn't exactly the list above,
7431 /* "Stall" workarounds ----------------------------------------------
7432 * These have to come after the earlier ones because we may have added
7433 * some additional CS stalls above.
7436 if (GEN_GEN
< 9 && (flags
& PIPE_CONTROL_CS_STALL
)) {
7437 /* Project: PRE-SKL, VLV, CHV
7439 * "[All Stepping][All SKUs]:
7441 * One of the following must also be set:
7443 * - Render Target Cache Flush Enable ([12] of DW1)
7444 * - Depth Cache Flush Enable ([0] of DW1)
7445 * - Stall at Pixel Scoreboard ([1] of DW1)
7446 * - Depth Stall ([13] of DW1)
7447 * - Post-Sync Operation ([13] of DW1)
7448 * - DC Flush Enable ([5] of DW1)"
7450 * If we don't already have one of those bits set, we choose to add
7451 * "Stall at Pixel Scoreboard". Some of the other bits require a
7452 * CS stall as a workaround (see above), which would send us into
7453 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
7454 * appears to be safe, so we choose that.
7456 const uint32_t wa_bits
= PIPE_CONTROL_RENDER_TARGET_FLUSH
|
7457 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
7458 PIPE_CONTROL_WRITE_IMMEDIATE
|
7459 PIPE_CONTROL_WRITE_DEPTH_COUNT
|
7460 PIPE_CONTROL_WRITE_TIMESTAMP
|
7461 PIPE_CONTROL_STALL_AT_SCOREBOARD
|
7462 PIPE_CONTROL_DEPTH_STALL
|
7463 PIPE_CONTROL_DATA_CACHE_FLUSH
;
7464 if (!(flags
& wa_bits
))
7465 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
7468 if (GEN_GEN
>= 12 && (flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
)) {
7469 /* GEN:BUG:1409600907:
7471 * "PIPE_CONTROL with Depth Stall Enable bit must be set
7472 * with any PIPE_CONTROL with Depth Flush Enable bit set.
7474 flags
|= PIPE_CONTROL_DEPTH_STALL
;
7477 /* Emit --------------------------------------------------------------- */
7479 if (INTEL_DEBUG
& DEBUG_PIPE_CONTROL
) {
7481 " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64
"]: %s\n",
7482 (flags
& PIPE_CONTROL_FLUSH_ENABLE
) ? "PipeCon " : "",
7483 (flags
& PIPE_CONTROL_CS_STALL
) ? "CS " : "",
7484 (flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
) ? "Scoreboard " : "",
7485 (flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
) ? "VF " : "",
7486 (flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
) ? "RT " : "",
7487 (flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
) ? "Const " : "",
7488 (flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
) ? "TC " : "",
7489 (flags
& PIPE_CONTROL_DATA_CACHE_FLUSH
) ? "DC " : "",
7490 (flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
) ? "ZFlush " : "",
7491 (flags
& PIPE_CONTROL_DEPTH_STALL
) ? "ZStall " : "",
7492 (flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
) ? "State " : "",
7493 (flags
& PIPE_CONTROL_TLB_INVALIDATE
) ? "TLB " : "",
7494 (flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
) ? "Inst " : "",
7495 (flags
& PIPE_CONTROL_MEDIA_STATE_CLEAR
) ? "MediaClear " : "",
7496 (flags
& PIPE_CONTROL_NOTIFY_ENABLE
) ? "Notify " : "",
7497 (flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
) ?
7499 (flags
& PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
) ?
7501 (flags
& PIPE_CONTROL_WRITE_IMMEDIATE
) ? "WriteImm " : "",
7502 (flags
& PIPE_CONTROL_WRITE_DEPTH_COUNT
) ? "WriteZCount " : "",
7503 (flags
& PIPE_CONTROL_WRITE_TIMESTAMP
) ? "WriteTimestamp " : "",
7504 (flags
& PIPE_CONTROL_FLUSH_HDC
) ? "HDC " : "",
7508 batch_mark_sync_for_pipe_control(batch
, flags
);
7509 iris_batch_sync_region_start(batch
);
7511 iris_emit_cmd(batch
, GENX(PIPE_CONTROL
), pc
) {
7513 pc
.TileCacheFlushEnable
= flags
& PIPE_CONTROL_TILE_CACHE_FLUSH
;
7516 pc
.HDCPipelineFlushEnable
= flags
& PIPE_CONTROL_FLUSH_HDC
;
7518 pc
.LRIPostSyncOperation
= NoLRIOperation
;
7519 pc
.PipeControlFlushEnable
= flags
& PIPE_CONTROL_FLUSH_ENABLE
;
7520 pc
.DCFlushEnable
= flags
& PIPE_CONTROL_DATA_CACHE_FLUSH
;
7521 pc
.StoreDataIndex
= 0;
7522 pc
.CommandStreamerStallEnable
= flags
& PIPE_CONTROL_CS_STALL
;
7523 pc
.GlobalSnapshotCountReset
=
7524 flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
;
7525 pc
.TLBInvalidate
= flags
& PIPE_CONTROL_TLB_INVALIDATE
;
7526 pc
.GenericMediaStateClear
= flags
& PIPE_CONTROL_MEDIA_STATE_CLEAR
;
7527 pc
.StallAtPixelScoreboard
= flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
;
7528 pc
.RenderTargetCacheFlushEnable
=
7529 flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
;
7530 pc
.DepthCacheFlushEnable
= flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
7531 pc
.StateCacheInvalidationEnable
=
7532 flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
7533 pc
.VFCacheInvalidationEnable
= flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
;
7534 pc
.ConstantCacheInvalidationEnable
=
7535 flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
7536 pc
.PostSyncOperation
= flags_to_post_sync_op(flags
);
7537 pc
.DepthStallEnable
= flags
& PIPE_CONTROL_DEPTH_STALL
;
7538 pc
.InstructionCacheInvalidateEnable
=
7539 flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
;
7540 pc
.NotifyEnable
= flags
& PIPE_CONTROL_NOTIFY_ENABLE
;
7541 pc
.IndirectStatePointersDisable
=
7542 flags
& PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
;
7543 pc
.TextureCacheInvalidationEnable
=
7544 flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
7545 pc
.Address
= rw_bo(bo
, offset
, IRIS_DOMAIN_OTHER_WRITE
);
7546 pc
.ImmediateData
= imm
;
7549 iris_batch_sync_region_end(batch
);
7554 * Preemption on Gen9 has to be enabled or disabled in various cases.
7556 * See these workarounds for preemption:
7557 * - WaDisableMidObjectPreemptionForGSLineStripAdj
7558 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
7559 * - WaDisableMidObjectPreemptionForLineLoop
7562 * We don't put this in the vtable because it's only used on Gen9.
7565 gen9_toggle_preemption(struct iris_context
*ice
,
7566 struct iris_batch
*batch
,
7567 const struct pipe_draw_info
*draw
)
7569 struct iris_genx_state
*genx
= ice
->state
.genx
;
7570 bool object_preemption
= true;
7572 /* WaDisableMidObjectPreemptionForGSLineStripAdj
7574 * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7575 * and GS is enabled."
7577 if (draw
->mode
== PIPE_PRIM_LINE_STRIP_ADJACENCY
&&
7578 ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
])
7579 object_preemption
= false;
7581 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7583 * "TriFan miscompare in Execlist Preemption test. Cut index that is
7584 * on a previous context. End the previous, the resume another context
7585 * with a tri-fan or polygon, and the vertex count is corrupted. If we
7586 * prempt again we will cause corruption.
7588 * WA: Disable mid-draw preemption when draw-call has a tri-fan."
7590 if (draw
->mode
== PIPE_PRIM_TRIANGLE_FAN
)
7591 object_preemption
= false;
7593 /* WaDisableMidObjectPreemptionForLineLoop
7595 * "VF Stats Counters Missing a vertex when preemption enabled.
7597 * WA: Disable mid-draw preemption when the draw uses a lineloop
7600 if (draw
->mode
== PIPE_PRIM_LINE_LOOP
)
7601 object_preemption
= false;
7605 * "VF is corrupting GAFS data when preempted on an instance boundary
7606 * and replayed with instancing enabled.
7608 * WA: Disable preemption when using instanceing."
7610 if (draw
->instance_count
> 1)
7611 object_preemption
= false;
7613 if (genx
->object_preemption
!= object_preemption
) {
7614 iris_enable_obj_preemption(batch
, object_preemption
);
7615 genx
->object_preemption
= object_preemption
;
7621 iris_lost_genx_state(struct iris_context
*ice
, struct iris_batch
*batch
)
7623 struct iris_genx_state
*genx
= ice
->state
.genx
;
7625 memset(genx
->last_index_buffer
, 0, sizeof(genx
->last_index_buffer
));
7629 iris_emit_mi_report_perf_count(struct iris_batch
*batch
,
7631 uint32_t offset_in_bytes
,
7634 iris_batch_sync_region_start(batch
);
7635 iris_emit_cmd(batch
, GENX(MI_REPORT_PERF_COUNT
), mi_rpc
) {
7636 mi_rpc
.MemoryAddress
= rw_bo(bo
, offset_in_bytes
,
7637 IRIS_DOMAIN_OTHER_WRITE
);
7638 mi_rpc
.ReportID
= report_id
;
7640 iris_batch_sync_region_end(batch
);
7644 * Update the pixel hashing modes that determine the balancing of PS threads
7645 * across subslices and slices.
7647 * \param width Width bound of the rendering area (already scaled down if \p
7648 * scale is greater than 1).
7649 * \param height Height bound of the rendering area (already scaled down if \p
7650 * scale is greater than 1).
7651 * \param scale The number of framebuffer samples that could potentially be
7652 * affected by an individual channel of the PS thread. This is
7653 * typically one for single-sampled rendering, but for operations
7654 * like CCS resolves and fast clears a single PS invocation may
7655 * update a huge number of pixels, in which case a finer
7656 * balancing is desirable in order to maximally utilize the
7657 * bandwidth available. UINT_MAX can be used as shorthand for
7658 * "finest hashing mode available".
7661 genX(emit_hashing_mode
)(struct iris_context
*ice
, struct iris_batch
*batch
,
7662 unsigned width
, unsigned height
, unsigned scale
)
7665 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
7666 const unsigned slice_hashing
[] = {
7667 /* Because all Gen9 platforms with more than one slice require
7668 * three-way subslice hashing, a single "normal" 16x16 slice hashing
7669 * block is guaranteed to suffer from substantial imbalance, with one
7670 * subslice receiving twice as much work as the other two in the
7673 * The performance impact of that would be particularly severe when
7674 * three-way hashing is also in use for slice balancing (which is the
7675 * case for all Gen9 GT4 platforms), because one of the slices
7676 * receives one every three 16x16 blocks in either direction, which
7677 * is roughly the periodicity of the underlying subslice imbalance
7678 * pattern ("roughly" because in reality the hardware's
7679 * implementation of three-way hashing doesn't do exact modulo 3
7680 * arithmetic, which somewhat decreases the magnitude of this effect
7681 * in practice). This leads to a systematic subslice imbalance
7682 * within that slice regardless of the size of the primitive. The
7683 * 32x32 hashing mode guarantees that the subslice imbalance within a
7684 * single slice hashing block is minimal, largely eliminating this
7688 /* Finest slice hashing mode available. */
7691 const unsigned subslice_hashing
[] = {
7692 /* 16x16 would provide a slight cache locality benefit especially
7693 * visible in the sampler L1 cache efficiency of low-bandwidth
7694 * non-LLC platforms, but it comes at the cost of greater subslice
7695 * imbalance for primitives of dimensions approximately intermediate
7696 * between 16x4 and 16x16.
7699 /* Finest subslice hashing mode available. */
7702 /* Dimensions of the smallest hashing block of a given hashing mode. If
7703 * the rendering area is smaller than this there can't possibly be any
7704 * benefit from switching to this mode, so we optimize out the
7707 const unsigned min_size
[][2] = {
7711 const unsigned idx
= scale
> 1;
7713 if (width
> min_size
[idx
][0] || height
> min_size
[idx
][1]) {
7716 iris_pack_state(GENX(GT_MODE
), >_mode
, reg
) {
7717 reg
.SliceHashing
= (devinfo
->num_slices
> 1 ? slice_hashing
[idx
] : 0);
7718 reg
.SliceHashingMask
= (devinfo
->num_slices
> 1 ? -1 : 0);
7719 reg
.SubsliceHashing
= subslice_hashing
[idx
];
7720 reg
.SubsliceHashingMask
= -1;
7723 iris_emit_raw_pipe_control(batch
,
7724 "workaround: CS stall before GT_MODE LRI",
7725 PIPE_CONTROL_STALL_AT_SCOREBOARD
|
7726 PIPE_CONTROL_CS_STALL
,
7729 iris_emit_lri(batch
, GT_MODE
, gt_mode
);
7731 ice
->state
.current_hash_scale
= scale
;
7737 iris_set_frontend_noop(struct pipe_context
*ctx
, bool enable
)
7739 struct iris_context
*ice
= (struct iris_context
*) ctx
;
7741 if (iris_batch_prepare_noop(&ice
->batches
[IRIS_BATCH_RENDER
], enable
)) {
7742 ice
->state
.dirty
|= IRIS_ALL_DIRTY_FOR_RENDER
;
7743 ice
->state
.stage_dirty
|= IRIS_ALL_STAGE_DIRTY_FOR_RENDER
;
7746 if (iris_batch_prepare_noop(&ice
->batches
[IRIS_BATCH_COMPUTE
], enable
)) {
7747 ice
->state
.dirty
|= IRIS_ALL_DIRTY_FOR_COMPUTE
;
7748 ice
->state
.stage_dirty
|= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE
;
7753 genX(init_state
)(struct iris_context
*ice
)
7755 struct pipe_context
*ctx
= &ice
->ctx
;
7756 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
7758 ctx
->create_blend_state
= iris_create_blend_state
;
7759 ctx
->create_depth_stencil_alpha_state
= iris_create_zsa_state
;
7760 ctx
->create_rasterizer_state
= iris_create_rasterizer_state
;
7761 ctx
->create_sampler_state
= iris_create_sampler_state
;
7762 ctx
->create_sampler_view
= iris_create_sampler_view
;
7763 ctx
->create_surface
= iris_create_surface
;
7764 ctx
->create_vertex_elements_state
= iris_create_vertex_elements
;
7765 ctx
->bind_blend_state
= iris_bind_blend_state
;
7766 ctx
->bind_depth_stencil_alpha_state
= iris_bind_zsa_state
;
7767 ctx
->bind_sampler_states
= iris_bind_sampler_states
;
7768 ctx
->bind_rasterizer_state
= iris_bind_rasterizer_state
;
7769 ctx
->bind_vertex_elements_state
= iris_bind_vertex_elements_state
;
7770 ctx
->delete_blend_state
= iris_delete_state
;
7771 ctx
->delete_depth_stencil_alpha_state
= iris_delete_state
;
7772 ctx
->delete_rasterizer_state
= iris_delete_state
;
7773 ctx
->delete_sampler_state
= iris_delete_state
;
7774 ctx
->delete_vertex_elements_state
= iris_delete_state
;
7775 ctx
->set_blend_color
= iris_set_blend_color
;
7776 ctx
->set_clip_state
= iris_set_clip_state
;
7777 ctx
->set_constant_buffer
= iris_set_constant_buffer
;
7778 ctx
->set_shader_buffers
= iris_set_shader_buffers
;
7779 ctx
->set_shader_images
= iris_set_shader_images
;
7780 ctx
->set_sampler_views
= iris_set_sampler_views
;
7781 ctx
->set_compute_resources
= iris_set_compute_resources
;
7782 ctx
->set_global_binding
= iris_set_global_binding
;
7783 ctx
->set_tess_state
= iris_set_tess_state
;
7784 ctx
->set_framebuffer_state
= iris_set_framebuffer_state
;
7785 ctx
->set_polygon_stipple
= iris_set_polygon_stipple
;
7786 ctx
->set_sample_mask
= iris_set_sample_mask
;
7787 ctx
->set_scissor_states
= iris_set_scissor_states
;
7788 ctx
->set_stencil_ref
= iris_set_stencil_ref
;
7789 ctx
->set_vertex_buffers
= iris_set_vertex_buffers
;
7790 ctx
->set_viewport_states
= iris_set_viewport_states
;
7791 ctx
->sampler_view_destroy
= iris_sampler_view_destroy
;
7792 ctx
->surface_destroy
= iris_surface_destroy
;
7793 ctx
->draw_vbo
= iris_draw_vbo
;
7794 ctx
->launch_grid
= iris_launch_grid
;
7795 ctx
->create_stream_output_target
= iris_create_stream_output_target
;
7796 ctx
->stream_output_target_destroy
= iris_stream_output_target_destroy
;
7797 ctx
->set_stream_output_targets
= iris_set_stream_output_targets
;
7798 ctx
->set_frontend_noop
= iris_set_frontend_noop
;
7800 screen
->vtbl
.destroy_state
= iris_destroy_state
;
7801 screen
->vtbl
.init_render_context
= iris_init_render_context
;
7802 screen
->vtbl
.init_compute_context
= iris_init_compute_context
;
7803 screen
->vtbl
.upload_render_state
= iris_upload_render_state
;
7804 screen
->vtbl
.update_surface_base_address
= iris_update_surface_base_address
;
7805 screen
->vtbl
.upload_compute_state
= iris_upload_compute_state
;
7806 screen
->vtbl
.emit_raw_pipe_control
= iris_emit_raw_pipe_control
;
7807 screen
->vtbl
.emit_mi_report_perf_count
= iris_emit_mi_report_perf_count
;
7808 screen
->vtbl
.rebind_buffer
= iris_rebind_buffer
;
7809 screen
->vtbl
.load_register_reg32
= iris_load_register_reg32
;
7810 screen
->vtbl
.load_register_reg64
= iris_load_register_reg64
;
7811 screen
->vtbl
.load_register_imm32
= iris_load_register_imm32
;
7812 screen
->vtbl
.load_register_imm64
= iris_load_register_imm64
;
7813 screen
->vtbl
.load_register_mem32
= iris_load_register_mem32
;
7814 screen
->vtbl
.load_register_mem64
= iris_load_register_mem64
;
7815 screen
->vtbl
.store_register_mem32
= iris_store_register_mem32
;
7816 screen
->vtbl
.store_register_mem64
= iris_store_register_mem64
;
7817 screen
->vtbl
.store_data_imm32
= iris_store_data_imm32
;
7818 screen
->vtbl
.store_data_imm64
= iris_store_data_imm64
;
7819 screen
->vtbl
.copy_mem_mem
= iris_copy_mem_mem
;
7820 screen
->vtbl
.derived_program_state_size
= iris_derived_program_state_size
;
7821 screen
->vtbl
.store_derived_program_state
= iris_store_derived_program_state
;
7822 screen
->vtbl
.create_so_decl_list
= iris_create_so_decl_list
;
7823 screen
->vtbl
.populate_vs_key
= iris_populate_vs_key
;
7824 screen
->vtbl
.populate_tcs_key
= iris_populate_tcs_key
;
7825 screen
->vtbl
.populate_tes_key
= iris_populate_tes_key
;
7826 screen
->vtbl
.populate_gs_key
= iris_populate_gs_key
;
7827 screen
->vtbl
.populate_fs_key
= iris_populate_fs_key
;
7828 screen
->vtbl
.populate_cs_key
= iris_populate_cs_key
;
7829 screen
->vtbl
.lost_genx_state
= iris_lost_genx_state
;
7831 ice
->state
.dirty
= ~0ull;
7832 ice
->state
.stage_dirty
= ~0ull;
7834 ice
->state
.statistics_counters_enabled
= true;
7836 ice
->state
.sample_mask
= 0xffff;
7837 ice
->state
.num_viewports
= 1;
7838 ice
->state
.prim_mode
= PIPE_PRIM_MAX
;
7839 ice
->state
.genx
= calloc(1, sizeof(struct iris_genx_state
));
7840 ice
->draw
.derived_params
.drawid
= -1;
7842 /* Make a 1x1x1 null surface for unbound textures */
7843 void *null_surf_map
=
7844 upload_state(ice
->state
.surface_uploader
, &ice
->state
.unbound_tex
,
7845 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
7846 isl_null_fill_state(&screen
->isl_dev
, null_surf_map
, isl_extent3d(1, 1, 1));
7847 ice
->state
.unbound_tex
.offset
+=
7848 iris_bo_offset_from_base_address(iris_resource_bo(ice
->state
.unbound_tex
.res
));
7850 /* Default all scissor rectangles to be empty regions. */
7851 for (int i
= 0; i
< IRIS_MAX_VIEWPORTS
; i
++) {
7852 ice
->state
.scissors
[i
] = (struct pipe_scissor_state
) {
7853 .minx
= 1, .maxx
= 0, .miny
= 1, .maxy
= 0,