2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
30 * This is the main state upload code.
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_inlines.h"
92 #include "util/u_format.h"
93 #include "util/u_framebuffer.h"
94 #include "util/u_transfer.h"
95 #include "util/u_upload_mgr.h"
96 #include "util/u_viewport.h"
99 #include "intel/compiler/brw_compiler.h"
100 #include "intel/common/gen_l3_config.h"
101 #include "intel/common/gen_sample_positions.h"
102 #include "iris_batch.h"
103 #include "iris_context.h"
104 #include "iris_pipe.h"
105 #include "iris_resource.h"
107 #define __gen_address_type struct iris_address
108 #define __gen_user_data struct iris_batch
110 #define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x))
113 __gen_combine_address(struct iris_batch
*batch
, void *location
,
114 struct iris_address addr
, uint32_t delta
)
116 uint64_t result
= addr
.offset
+ delta
;
119 iris_use_pinned_bo(batch
, addr
.bo
, addr
.write
);
120 /* Assume this is a general address, not relative to a base. */
121 result
+= addr
.bo
->gtt_offset
;
127 #define __genxml_cmd_length(cmd) cmd ## _length
128 #define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
129 #define __genxml_cmd_header(cmd) cmd ## _header
130 #define __genxml_cmd_pack(cmd) cmd ## _pack
132 #define _iris_pack_command(batch, cmd, dst, name) \
133 for (struct cmd name = { __genxml_cmd_header(cmd) }, \
134 *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
135 ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \
139 #define iris_pack_command(cmd, dst, name) \
140 _iris_pack_command(NULL, cmd, dst, name)
142 #define iris_pack_state(cmd, dst, name) \
143 for (struct cmd name = {}, \
144 *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
145 __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \
148 #define iris_emit_cmd(batch, cmd, name) \
149 _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
151 #define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \
153 uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \
154 for (uint32_t i = 0; i < num_dwords; i++) \
155 dw[i] = (dwords0)[i] | (dwords1)[i]; \
156 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \
159 #include "genxml/genX_pack.h"
160 #include "genxml/gen_macros.h"
161 #include "genxml/genX_bits.h"
163 #define MOCS_WB (2 << 1)
166 * Statically assert that PIPE_* enums match the hardware packets.
167 * (As long as they match, we don't need to translate them.)
169 UNUSED
static void pipe_asserts()
171 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
173 /* pipe_logicop happens to match the hardware. */
174 PIPE_ASSERT(PIPE_LOGICOP_CLEAR
== LOGICOP_CLEAR
);
175 PIPE_ASSERT(PIPE_LOGICOP_NOR
== LOGICOP_NOR
);
176 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED
== LOGICOP_AND_INVERTED
);
177 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED
== LOGICOP_COPY_INVERTED
);
178 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE
== LOGICOP_AND_REVERSE
);
179 PIPE_ASSERT(PIPE_LOGICOP_INVERT
== LOGICOP_INVERT
);
180 PIPE_ASSERT(PIPE_LOGICOP_XOR
== LOGICOP_XOR
);
181 PIPE_ASSERT(PIPE_LOGICOP_NAND
== LOGICOP_NAND
);
182 PIPE_ASSERT(PIPE_LOGICOP_AND
== LOGICOP_AND
);
183 PIPE_ASSERT(PIPE_LOGICOP_EQUIV
== LOGICOP_EQUIV
);
184 PIPE_ASSERT(PIPE_LOGICOP_NOOP
== LOGICOP_NOOP
);
185 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED
== LOGICOP_OR_INVERTED
);
186 PIPE_ASSERT(PIPE_LOGICOP_COPY
== LOGICOP_COPY
);
187 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE
== LOGICOP_OR_REVERSE
);
188 PIPE_ASSERT(PIPE_LOGICOP_OR
== LOGICOP_OR
);
189 PIPE_ASSERT(PIPE_LOGICOP_SET
== LOGICOP_SET
);
191 /* pipe_blend_func happens to match the hardware. */
192 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE
== BLENDFACTOR_ONE
);
193 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR
== BLENDFACTOR_SRC_COLOR
);
194 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA
== BLENDFACTOR_SRC_ALPHA
);
195 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA
== BLENDFACTOR_DST_ALPHA
);
196 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR
== BLENDFACTOR_DST_COLOR
);
197 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
== BLENDFACTOR_SRC_ALPHA_SATURATE
);
198 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR
== BLENDFACTOR_CONST_COLOR
);
199 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA
== BLENDFACTOR_CONST_ALPHA
);
200 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR
== BLENDFACTOR_SRC1_COLOR
);
201 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA
== BLENDFACTOR_SRC1_ALPHA
);
202 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO
== BLENDFACTOR_ZERO
);
203 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR
== BLENDFACTOR_INV_SRC_COLOR
);
204 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA
== BLENDFACTOR_INV_SRC_ALPHA
);
205 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA
== BLENDFACTOR_INV_DST_ALPHA
);
206 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR
== BLENDFACTOR_INV_DST_COLOR
);
207 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR
== BLENDFACTOR_INV_CONST_COLOR
);
208 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA
== BLENDFACTOR_INV_CONST_ALPHA
);
209 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR
== BLENDFACTOR_INV_SRC1_COLOR
);
210 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA
== BLENDFACTOR_INV_SRC1_ALPHA
);
212 /* pipe_blend_func happens to match the hardware. */
213 PIPE_ASSERT(PIPE_BLEND_ADD
== BLENDFUNCTION_ADD
);
214 PIPE_ASSERT(PIPE_BLEND_SUBTRACT
== BLENDFUNCTION_SUBTRACT
);
215 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT
== BLENDFUNCTION_REVERSE_SUBTRACT
);
216 PIPE_ASSERT(PIPE_BLEND_MIN
== BLENDFUNCTION_MIN
);
217 PIPE_ASSERT(PIPE_BLEND_MAX
== BLENDFUNCTION_MAX
);
219 /* pipe_stencil_op happens to match the hardware. */
220 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP
== STENCILOP_KEEP
);
221 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO
== STENCILOP_ZERO
);
222 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE
== STENCILOP_REPLACE
);
223 PIPE_ASSERT(PIPE_STENCIL_OP_INCR
== STENCILOP_INCRSAT
);
224 PIPE_ASSERT(PIPE_STENCIL_OP_DECR
== STENCILOP_DECRSAT
);
225 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP
== STENCILOP_INCR
);
226 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP
== STENCILOP_DECR
);
227 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT
== STENCILOP_INVERT
);
229 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
230 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT
== UPPERLEFT
);
231 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT
== LOWERLEFT
);
236 translate_prim_type(enum pipe_prim_type prim
, uint8_t verts_per_patch
)
238 static const unsigned map
[] = {
239 [PIPE_PRIM_POINTS
] = _3DPRIM_POINTLIST
,
240 [PIPE_PRIM_LINES
] = _3DPRIM_LINELIST
,
241 [PIPE_PRIM_LINE_LOOP
] = _3DPRIM_LINELOOP
,
242 [PIPE_PRIM_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
243 [PIPE_PRIM_TRIANGLES
] = _3DPRIM_TRILIST
,
244 [PIPE_PRIM_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
245 [PIPE_PRIM_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
246 [PIPE_PRIM_QUADS
] = _3DPRIM_QUADLIST
,
247 [PIPE_PRIM_QUAD_STRIP
] = _3DPRIM_QUADSTRIP
,
248 [PIPE_PRIM_POLYGON
] = _3DPRIM_POLYGON
,
249 [PIPE_PRIM_LINES_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
250 [PIPE_PRIM_LINE_STRIP_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
251 [PIPE_PRIM_TRIANGLES_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
252 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
253 [PIPE_PRIM_PATCHES
] = _3DPRIM_PATCHLIST_1
- 1,
256 return map
[prim
] + (prim
== PIPE_PRIM_PATCHES
? verts_per_patch
: 0);
260 translate_compare_func(enum pipe_compare_func pipe_func
)
262 static const unsigned map
[] = {
263 [PIPE_FUNC_NEVER
] = COMPAREFUNCTION_NEVER
,
264 [PIPE_FUNC_LESS
] = COMPAREFUNCTION_LESS
,
265 [PIPE_FUNC_EQUAL
] = COMPAREFUNCTION_EQUAL
,
266 [PIPE_FUNC_LEQUAL
] = COMPAREFUNCTION_LEQUAL
,
267 [PIPE_FUNC_GREATER
] = COMPAREFUNCTION_GREATER
,
268 [PIPE_FUNC_NOTEQUAL
] = COMPAREFUNCTION_NOTEQUAL
,
269 [PIPE_FUNC_GEQUAL
] = COMPAREFUNCTION_GEQUAL
,
270 [PIPE_FUNC_ALWAYS
] = COMPAREFUNCTION_ALWAYS
,
272 return map
[pipe_func
];
276 translate_shadow_func(enum pipe_compare_func pipe_func
)
278 /* Gallium specifies the result of shadow comparisons as:
280 * 1 if ref <op> texel,
285 * 0 if texel <op> ref,
288 * So we need to flip the operator and also negate.
290 static const unsigned map
[] = {
291 [PIPE_FUNC_NEVER
] = PREFILTEROPALWAYS
,
292 [PIPE_FUNC_LESS
] = PREFILTEROPLEQUAL
,
293 [PIPE_FUNC_EQUAL
] = PREFILTEROPNOTEQUAL
,
294 [PIPE_FUNC_LEQUAL
] = PREFILTEROPLESS
,
295 [PIPE_FUNC_GREATER
] = PREFILTEROPGEQUAL
,
296 [PIPE_FUNC_NOTEQUAL
] = PREFILTEROPEQUAL
,
297 [PIPE_FUNC_GEQUAL
] = PREFILTEROPGREATER
,
298 [PIPE_FUNC_ALWAYS
] = PREFILTEROPNEVER
,
300 return map
[pipe_func
];
304 translate_cull_mode(unsigned pipe_face
)
306 static const unsigned map
[4] = {
307 [PIPE_FACE_NONE
] = CULLMODE_NONE
,
308 [PIPE_FACE_FRONT
] = CULLMODE_FRONT
,
309 [PIPE_FACE_BACK
] = CULLMODE_BACK
,
310 [PIPE_FACE_FRONT_AND_BACK
] = CULLMODE_BOTH
,
312 return map
[pipe_face
];
316 translate_fill_mode(unsigned pipe_polymode
)
318 static const unsigned map
[4] = {
319 [PIPE_POLYGON_MODE_FILL
] = FILL_MODE_SOLID
,
320 [PIPE_POLYGON_MODE_LINE
] = FILL_MODE_WIREFRAME
,
321 [PIPE_POLYGON_MODE_POINT
] = FILL_MODE_POINT
,
322 [PIPE_POLYGON_MODE_FILL_RECTANGLE
] = FILL_MODE_SOLID
,
324 return map
[pipe_polymode
];
328 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip
)
330 static const unsigned map
[] = {
331 [PIPE_TEX_MIPFILTER_NEAREST
] = MIPFILTER_NEAREST
,
332 [PIPE_TEX_MIPFILTER_LINEAR
] = MIPFILTER_LINEAR
,
333 [PIPE_TEX_MIPFILTER_NONE
] = MIPFILTER_NONE
,
335 return map
[pipe_mip
];
339 translate_wrap(unsigned pipe_wrap
)
341 static const unsigned map
[] = {
342 [PIPE_TEX_WRAP_REPEAT
] = TCM_WRAP
,
343 [PIPE_TEX_WRAP_CLAMP
] = TCM_HALF_BORDER
,
344 [PIPE_TEX_WRAP_CLAMP_TO_EDGE
] = TCM_CLAMP
,
345 [PIPE_TEX_WRAP_CLAMP_TO_BORDER
] = TCM_CLAMP_BORDER
,
346 [PIPE_TEX_WRAP_MIRROR_REPEAT
] = TCM_MIRROR
,
347 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
] = TCM_MIRROR_ONCE
,
349 /* These are unsupported. */
350 [PIPE_TEX_WRAP_MIRROR_CLAMP
] = -1,
351 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
] = -1,
353 return map
[pipe_wrap
];
356 static struct iris_address
357 ro_bo(struct iris_bo
*bo
, uint64_t offset
)
359 /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
360 * validation list at CSO creation time, instead of draw time.
362 return (struct iris_address
) { .bo
= bo
, .offset
= offset
};
365 static struct iris_address
366 rw_bo(struct iris_bo
*bo
, uint64_t offset
)
368 /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
369 * validation list at CSO creation time, instead of draw time.
371 return (struct iris_address
) { .bo
= bo
, .offset
= offset
, .write
= true };
375 * Allocate space for some indirect state.
377 * Return a pointer to the map (to fill it out) and a state ref (for
378 * referring to the state in GPU commands).
381 upload_state(struct u_upload_mgr
*uploader
,
382 struct iris_state_ref
*ref
,
387 u_upload_alloc(uploader
, 0, size
, alignment
, &ref
->offset
, &ref
->res
, &p
);
392 * Stream out temporary/short-lived state.
394 * This allocates space, pins the BO, and includes the BO address in the
395 * returned offset (which works because all state lives in 32-bit memory
399 stream_state(struct iris_batch
*batch
,
400 struct u_upload_mgr
*uploader
,
401 struct pipe_resource
**out_res
,
404 uint32_t *out_offset
)
408 u_upload_alloc(uploader
, 0, size
, alignment
, out_offset
, out_res
, &ptr
);
410 struct iris_bo
*bo
= iris_resource_bo(*out_res
);
411 iris_use_pinned_bo(batch
, bo
, false);
413 *out_offset
+= iris_bo_offset_from_base_address(bo
);
419 * stream_state() + memcpy.
422 emit_state(struct iris_batch
*batch
,
423 struct u_upload_mgr
*uploader
,
424 struct pipe_resource
**out_res
,
431 stream_state(batch
, uploader
, out_res
, size
, alignment
, &offset
);
434 memcpy(map
, data
, size
);
440 * Did field 'x' change between 'old_cso' and 'new_cso'?
442 * (If so, we may want to set some dirty flags.)
444 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
445 #define cso_changed_memcmp(x) \
446 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
449 flush_for_state_base_change(struct iris_batch
*batch
)
451 /* Flush before emitting STATE_BASE_ADDRESS.
453 * This isn't documented anywhere in the PRM. However, it seems to be
454 * necessary prior to changing the surface state base adress. We've
455 * seen issues in Vulkan where we get GPU hangs when using multi-level
456 * command buffers which clear depth, reset state base address, and then
459 * Normally, in GL, we would trust the kernel to do sufficient stalls
460 * and flushes prior to executing our batch. However, it doesn't seem
461 * as if the kernel's flushing is always sufficient and we don't want to
464 * We make this an end-of-pipe sync instead of a normal flush because we
465 * do not know the current status of the GPU. On Haswell at least,
466 * having a fast-clear operation in flight at the same time as a normal
467 * rendering operation can cause hangs. Since the kernel's flushing is
468 * insufficient, we need to ensure that any rendering operations from
469 * other processes are definitely complete before we try to do our own
470 * rendering. It's a bit of a big hammer but it appears to work.
472 iris_emit_end_of_pipe_sync(batch
,
473 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
474 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
475 PIPE_CONTROL_DATA_CACHE_FLUSH
);
479 _iris_emit_lri(struct iris_batch
*batch
, uint32_t reg
, uint32_t val
)
481 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
482 lri
.RegisterOffset
= reg
;
486 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
489 * Upload the initial GPU state for a render context.
491 * This sets some invariant state that needs to be programmed a particular
492 * way, but we never actually change.
495 iris_init_render_context(struct iris_screen
*screen
,
496 struct iris_batch
*batch
,
497 struct iris_vtable
*vtbl
,
498 struct pipe_debug_callback
*dbg
)
502 iris_init_batch(batch
, screen
, vtbl
, dbg
, I915_EXEC_RENDER
);
504 flush_for_state_base_change(batch
);
506 /* We program most base addresses once at context initialization time.
507 * Each base address points at a 4GB memory zone, and never needs to
508 * change. See iris_bufmgr.h for a description of the memory zones.
510 * The one exception is Surface State Base Address, which needs to be
511 * updated occasionally. See iris_binder.c for the details there.
513 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
515 // XXX: MOCS is stupid for this.
516 sba
.GeneralStateMemoryObjectControlState
= MOCS_WB
;
517 sba
.StatelessDataPortAccessMemoryObjectControlState
= MOCS_WB
;
518 sba
.DynamicStateMemoryObjectControlState
= MOCS_WB
;
519 sba
.IndirectObjectMemoryObjectControlState
= MOCS_WB
;
520 sba
.InstructionMemoryObjectControlState
= MOCS_WB
;
521 sba
.BindlessSurfaceStateMemoryObjectControlState
= MOCS_WB
;
524 sba
.GeneralStateBaseAddressModifyEnable
= true;
525 sba
.DynamicStateBaseAddressModifyEnable
= true;
526 sba
.IndirectObjectBaseAddressModifyEnable
= true;
527 sba
.InstructionBaseAddressModifyEnable
= true;
528 sba
.GeneralStateBufferSizeModifyEnable
= true;
529 sba
.DynamicStateBufferSizeModifyEnable
= true;
530 sba
.BindlessSurfaceStateBaseAddressModifyEnable
= true;
531 sba
.IndirectObjectBufferSizeModifyEnable
= true;
532 sba
.InstructionBuffersizeModifyEnable
= true;
534 sba
.InstructionBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_SHADER_START
);
535 sba
.DynamicStateBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_DYNAMIC_START
);
537 sba
.GeneralStateBufferSize
= 0xfffff;
538 sba
.IndirectObjectBufferSize
= 0xfffff;
539 sba
.InstructionBufferSize
= 0xfffff;
540 sba
.DynamicStateBufferSize
= 0xfffff;
543 // XXX: INSTPM on Gen8
544 iris_pack_state(GENX(CS_DEBUG_MODE2
), ®_val
, reg
) {
545 reg
.CONSTANT_BUFFERAddressOffsetDisable
= true;
546 reg
.CONSTANT_BUFFERAddressOffsetDisableMask
= true;
548 iris_emit_lri(batch
, CS_DEBUG_MODE2
, reg_val
);
551 iris_pack_state(GENX(CACHE_MODE_1
), ®_val
, reg
) {
552 reg
.FloatBlendOptimizationEnable
= true;
553 reg
.FloatBlendOptimizationEnableMask
= true;
554 reg
.PartialResolveDisableInVC
= true;
555 reg
.PartialResolveDisableInVCMask
= true;
557 iris_emit_lri(batch
, CACHE_MODE_1
, reg_val
);
561 iris_pack_state(GENX(SAMPLER_MODE
), ®_val
, reg
) {
562 reg
.HeaderlessMessageforPreemptableContexts
= 1;
563 reg
.HeaderlessMessageforPreemptableContextsMask
= 1;
565 iris_emit_lri(batch
, SAMPLER_MODE
, reg_val
);
570 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
571 * changing it dynamically. We set it to the maximum size here, and
572 * instead include the render target dimensions in the viewport, so
573 * viewport extents clipping takes care of pruning stray geometry.
575 iris_emit_cmd(batch
, GENX(3DSTATE_DRAWING_RECTANGLE
), rect
) {
576 rect
.ClippedDrawingRectangleXMax
= UINT16_MAX
;
577 rect
.ClippedDrawingRectangleYMax
= UINT16_MAX
;
580 /* Set the initial MSAA sample positions. */
581 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLE_PATTERN
), pat
) {
582 GEN_SAMPLE_POS_1X(pat
._1xSample
);
583 GEN_SAMPLE_POS_2X(pat
._2xSample
);
584 GEN_SAMPLE_POS_4X(pat
._4xSample
);
585 GEN_SAMPLE_POS_8X(pat
._8xSample
);
586 GEN_SAMPLE_POS_16X(pat
._16xSample
);
589 /* Use the legacy AA line coverage computation. */
590 iris_emit_cmd(batch
, GENX(3DSTATE_AA_LINE_PARAMETERS
), foo
);
592 /* Disable chromakeying (it's for media) */
593 iris_emit_cmd(batch
, GENX(3DSTATE_WM_CHROMAKEY
), foo
);
595 /* We want regular rendering, not special HiZ operations. */
596 iris_emit_cmd(batch
, GENX(3DSTATE_WM_HZ_OP
), foo
);
598 /* No polygon stippling offsets are necessary. */
599 // XXX: may need to set an offset for origin-UL framebuffers
600 iris_emit_cmd(batch
, GENX(3DSTATE_POLY_STIPPLE_OFFSET
), foo
);
602 /* Set a static partitioning of the push constant area. */
603 // XXX: this may be a bad idea...could starve the push ringbuffers...
604 for (int i
= 0; i
<= MESA_SHADER_FRAGMENT
; i
++) {
605 iris_emit_cmd(batch
, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS
), alloc
) {
606 alloc
._3DCommandSubOpcode
= 18 + i
;
607 alloc
.ConstantBufferOffset
= 6 * i
;
608 alloc
.ConstantBufferSize
= i
== MESA_SHADER_FRAGMENT
? 8 : 6;
614 iris_init_compute_context(struct iris_screen
*screen
,
615 struct iris_batch
*batch
,
616 struct iris_vtable
*vtbl
,
617 struct pipe_debug_callback
*dbg
)
619 iris_init_batch(batch
, screen
, vtbl
, dbg
, I915_EXEC_RENDER
);
621 /* XXX: PIPE_CONTROLs */
623 iris_emit_cmd(batch
, GENX(PIPELINE_SELECT
), sel
) {
627 sel
.PipelineSelection
= GPGPU
;
630 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
632 // XXX: MOCS is stupid for this.
633 sba
.GeneralStateMemoryObjectControlState
= MOCS_WB
;
634 sba
.StatelessDataPortAccessMemoryObjectControlState
= MOCS_WB
;
635 sba
.SurfaceStateMemoryObjectControlState
= MOCS_WB
;
636 sba
.DynamicStateMemoryObjectControlState
= MOCS_WB
;
637 sba
.IndirectObjectMemoryObjectControlState
= MOCS_WB
;
638 sba
.InstructionMemoryObjectControlState
= MOCS_WB
;
639 sba
.BindlessSurfaceStateMemoryObjectControlState
= MOCS_WB
;
642 sba
.GeneralStateBaseAddressModifyEnable
= true;
643 sba
.SurfaceStateBaseAddressModifyEnable
= true;
644 sba
.DynamicStateBaseAddressModifyEnable
= true;
645 sba
.IndirectObjectBaseAddressModifyEnable
= true;
646 sba
.InstructionBaseAddressModifyEnable
= true;
647 sba
.GeneralStateBufferSizeModifyEnable
= true;
648 sba
.DynamicStateBufferSizeModifyEnable
= true;
649 sba
.BindlessSurfaceStateBaseAddressModifyEnable
= true;
650 sba
.IndirectObjectBufferSizeModifyEnable
= true;
651 sba
.InstructionBuffersizeModifyEnable
= true;
653 sba
.InstructionBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_SHADER_START
);
654 sba
.SurfaceStateBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_SURFACE_START
);
655 sba
.DynamicStateBaseAddress
= ro_bo(NULL
, IRIS_MEMZONE_DYNAMIC_START
);
657 sba
.GeneralStateBufferSize
= 0xfffff;
658 sba
.IndirectObjectBufferSize
= 0xfffff;
659 sba
.InstructionBufferSize
= 0xfffff;
660 sba
.DynamicStateBufferSize
= 0xfffff;
664 struct iris_vertex_buffer_state
{
665 /** The 3DSTATE_VERTEX_BUFFERS hardware packet. */
666 uint32_t vertex_buffers
[1 + 33 * GENX(VERTEX_BUFFER_STATE_length
)];
668 /** The resource to source vertex data from. */
669 struct pipe_resource
*resources
[33];
671 /** The number of bound vertex buffers. */
672 unsigned num_buffers
;
675 struct iris_depth_buffer_state
{
676 /* Depth/HiZ/Stencil related hardware packets. */
677 uint32_t packets
[GENX(3DSTATE_DEPTH_BUFFER_length
) +
678 GENX(3DSTATE_STENCIL_BUFFER_length
) +
679 GENX(3DSTATE_HIER_DEPTH_BUFFER_length
) +
680 GENX(3DSTATE_CLEAR_PARAMS_length
)];
684 * Generation-specific context state (ice->state.genx->...).
686 * Most state can go in iris_context directly, but these encode hardware
687 * packets which vary by generation.
689 struct iris_genx_state
{
690 /** SF_CLIP_VIEWPORT */
691 uint32_t sf_cl_vp
[GENX(SF_CLIP_VIEWPORT_length
) * IRIS_MAX_VIEWPORTS
];
693 struct iris_vertex_buffer_state vertex_buffers
;
694 struct iris_depth_buffer_state depth_buffer
;
696 uint32_t so_buffers
[4 * GENX(3DSTATE_SO_BUFFER_length
)];
697 uint32_t streamout
[4 * GENX(3DSTATE_STREAMOUT_length
)];
701 * The pipe->set_blend_color() driver hook.
703 * This corresponds to our COLOR_CALC_STATE.
706 iris_set_blend_color(struct pipe_context
*ctx
,
707 const struct pipe_blend_color
*state
)
709 struct iris_context
*ice
= (struct iris_context
*) ctx
;
711 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
712 memcpy(&ice
->state
.blend_color
, state
, sizeof(struct pipe_blend_color
));
713 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
717 * Gallium CSO for blend state (see pipe_blend_state).
719 struct iris_blend_state
{
720 /** Partial 3DSTATE_PS_BLEND */
721 uint32_t ps_blend
[GENX(3DSTATE_PS_BLEND_length
)];
723 /** Partial BLEND_STATE */
724 uint32_t blend_state
[GENX(BLEND_STATE_length
) +
725 BRW_MAX_DRAW_BUFFERS
* GENX(BLEND_STATE_ENTRY_length
)];
727 bool alpha_to_coverage
; /* for shader key */
731 * The pipe->create_blend_state() driver hook.
733 * Translates a pipe_blend_state into iris_blend_state.
736 iris_create_blend_state(struct pipe_context
*ctx
,
737 const struct pipe_blend_state
*state
)
739 struct iris_blend_state
*cso
= malloc(sizeof(struct iris_blend_state
));
740 uint32_t *blend_state
= cso
->blend_state
;
742 cso
->alpha_to_coverage
= state
->alpha_to_coverage
;
744 iris_pack_command(GENX(3DSTATE_PS_BLEND
), cso
->ps_blend
, pb
) {
745 /* pb.HasWriteableRT is filled in at draw time. */
746 /* pb.AlphaTestEnable is filled in at draw time. */
747 pb
.AlphaToCoverageEnable
= state
->alpha_to_coverage
;
748 pb
.IndependentAlphaBlendEnable
= state
->independent_blend_enable
;
750 pb
.ColorBufferBlendEnable
= state
->rt
[0].blend_enable
;
752 pb
.SourceBlendFactor
= state
->rt
[0].rgb_src_factor
;
753 pb
.SourceAlphaBlendFactor
= state
->rt
[0].alpha_func
;
754 pb
.DestinationBlendFactor
= state
->rt
[0].rgb_dst_factor
;
755 pb
.DestinationAlphaBlendFactor
= state
->rt
[0].alpha_dst_factor
;
758 iris_pack_state(GENX(BLEND_STATE
), blend_state
, bs
) {
759 bs
.AlphaToCoverageEnable
= state
->alpha_to_coverage
;
760 bs
.IndependentAlphaBlendEnable
= state
->independent_blend_enable
;
761 bs
.AlphaToOneEnable
= state
->alpha_to_one
;
762 bs
.AlphaToCoverageDitherEnable
= state
->alpha_to_coverage
;
763 bs
.ColorDitherEnable
= state
->dither
;
764 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
767 blend_state
+= GENX(BLEND_STATE_length
);
769 for (int i
= 0; i
< BRW_MAX_DRAW_BUFFERS
; i
++) {
770 iris_pack_state(GENX(BLEND_STATE_ENTRY
), blend_state
, be
) {
771 be
.LogicOpEnable
= state
->logicop_enable
;
772 be
.LogicOpFunction
= state
->logicop_func
;
774 be
.PreBlendSourceOnlyClampEnable
= false;
775 be
.ColorClampRange
= COLORCLAMP_RTFORMAT
;
776 be
.PreBlendColorClampEnable
= true;
777 be
.PostBlendColorClampEnable
= true;
779 be
.ColorBufferBlendEnable
= state
->rt
[i
].blend_enable
;
781 be
.ColorBlendFunction
= state
->rt
[i
].rgb_func
;
782 be
.AlphaBlendFunction
= state
->rt
[i
].alpha_func
;
783 be
.SourceBlendFactor
= state
->rt
[i
].rgb_src_factor
;
784 be
.SourceAlphaBlendFactor
= state
->rt
[i
].alpha_func
;
785 be
.DestinationBlendFactor
= state
->rt
[i
].rgb_dst_factor
;
786 be
.DestinationAlphaBlendFactor
= state
->rt
[i
].alpha_dst_factor
;
788 be
.WriteDisableRed
= !(state
->rt
[i
].colormask
& PIPE_MASK_R
);
789 be
.WriteDisableGreen
= !(state
->rt
[i
].colormask
& PIPE_MASK_G
);
790 be
.WriteDisableBlue
= !(state
->rt
[i
].colormask
& PIPE_MASK_B
);
791 be
.WriteDisableAlpha
= !(state
->rt
[i
].colormask
& PIPE_MASK_A
);
793 blend_state
+= GENX(BLEND_STATE_ENTRY_length
);
800 * The pipe->bind_blend_state() driver hook.
802 * Bind a blending CSO and flag related dirty bits.
805 iris_bind_blend_state(struct pipe_context
*ctx
, void *state
)
807 struct iris_context
*ice
= (struct iris_context
*) ctx
;
808 ice
->state
.cso_blend
= state
;
809 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
;
810 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
811 ice
->state
.dirty
|= ice
->state
.dirty_for_nos
[IRIS_NOS_BLEND
];
815 * Gallium CSO for depth, stencil, and alpha testing state.
817 struct iris_depth_stencil_alpha_state
{
818 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
819 uint32_t wmds
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
821 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
822 struct pipe_alpha_state alpha
;
824 /** Outbound to resolve and cache set tracking. */
825 bool depth_writes_enabled
;
826 bool stencil_writes_enabled
;
830 * The pipe->create_depth_stencil_alpha_state() driver hook.
832 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
833 * testing state since we need pieces of it in a variety of places.
836 iris_create_zsa_state(struct pipe_context
*ctx
,
837 const struct pipe_depth_stencil_alpha_state
*state
)
839 struct iris_depth_stencil_alpha_state
*cso
=
840 malloc(sizeof(struct iris_depth_stencil_alpha_state
));
842 bool two_sided_stencil
= state
->stencil
[1].enabled
;
844 cso
->alpha
= state
->alpha
;
845 cso
->depth_writes_enabled
= state
->depth
.writemask
;
846 cso
->stencil_writes_enabled
=
847 state
->stencil
[0].writemask
!= 0 ||
848 (two_sided_stencil
&& state
->stencil
[1].writemask
!= 1);
850 /* The state tracker needs to optimize away EQUAL writes for us. */
851 assert(!(state
->depth
.func
== PIPE_FUNC_EQUAL
&& state
->depth
.writemask
));
853 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), cso
->wmds
, wmds
) {
854 wmds
.StencilFailOp
= state
->stencil
[0].fail_op
;
855 wmds
.StencilPassDepthFailOp
= state
->stencil
[0].zfail_op
;
856 wmds
.StencilPassDepthPassOp
= state
->stencil
[0].zpass_op
;
857 wmds
.StencilTestFunction
=
858 translate_compare_func(state
->stencil
[0].func
);
859 wmds
.BackfaceStencilFailOp
= state
->stencil
[1].fail_op
;
860 wmds
.BackfaceStencilPassDepthFailOp
= state
->stencil
[1].zfail_op
;
861 wmds
.BackfaceStencilPassDepthPassOp
= state
->stencil
[1].zpass_op
;
862 wmds
.BackfaceStencilTestFunction
=
863 translate_compare_func(state
->stencil
[1].func
);
864 wmds
.DepthTestFunction
= translate_compare_func(state
->depth
.func
);
865 wmds
.DoubleSidedStencilEnable
= two_sided_stencil
;
866 wmds
.StencilTestEnable
= state
->stencil
[0].enabled
;
867 wmds
.StencilBufferWriteEnable
=
868 state
->stencil
[0].writemask
!= 0 ||
869 (two_sided_stencil
&& state
->stencil
[1].writemask
!= 0);
870 wmds
.DepthTestEnable
= state
->depth
.enabled
;
871 wmds
.DepthBufferWriteEnable
= state
->depth
.writemask
;
872 wmds
.StencilTestMask
= state
->stencil
[0].valuemask
;
873 wmds
.StencilWriteMask
= state
->stencil
[0].writemask
;
874 wmds
.BackfaceStencilTestMask
= state
->stencil
[1].valuemask
;
875 wmds
.BackfaceStencilWriteMask
= state
->stencil
[1].writemask
;
876 /* wmds.[Backface]StencilReferenceValue are merged later */
883 * The pipe->bind_depth_stencil_alpha_state() driver hook.
885 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
888 iris_bind_zsa_state(struct pipe_context
*ctx
, void *state
)
890 struct iris_context
*ice
= (struct iris_context
*) ctx
;
891 struct iris_depth_stencil_alpha_state
*old_cso
= ice
->state
.cso_zsa
;
892 struct iris_depth_stencil_alpha_state
*new_cso
= state
;
895 if (cso_changed(alpha
.ref_value
))
896 ice
->state
.dirty
|= IRIS_DIRTY_COLOR_CALC_STATE
;
898 if (cso_changed(alpha
.enabled
))
899 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
| IRIS_DIRTY_BLEND_STATE
;
901 if (cso_changed(alpha
.func
))
902 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
904 ice
->state
.depth_writes_enabled
= new_cso
->depth_writes_enabled
;
905 ice
->state
.stencil_writes_enabled
= new_cso
->stencil_writes_enabled
;
908 ice
->state
.cso_zsa
= new_cso
;
909 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
910 ice
->state
.dirty
|= IRIS_DIRTY_WM_DEPTH_STENCIL
;
911 ice
->state
.dirty
|= ice
->state
.dirty_for_nos
[IRIS_NOS_DEPTH_STENCIL_ALPHA
];
915 * Gallium CSO for rasterizer state.
917 struct iris_rasterizer_state
{
918 uint32_t sf
[GENX(3DSTATE_SF_length
)];
919 uint32_t clip
[GENX(3DSTATE_CLIP_length
)];
920 uint32_t raster
[GENX(3DSTATE_RASTER_length
)];
921 uint32_t wm
[GENX(3DSTATE_WM_length
)];
922 uint32_t line_stipple
[GENX(3DSTATE_LINE_STIPPLE_length
)];
924 bool clip_halfz
; /* for CC_VIEWPORT */
925 bool depth_clip_near
; /* for CC_VIEWPORT */
926 bool depth_clip_far
; /* for CC_VIEWPORT */
927 bool flatshade
; /* for shader state */
928 bool flatshade_first
; /* for stream output */
929 bool clamp_fragment_color
; /* for shader state */
930 bool light_twoside
; /* for shader state */
931 bool rasterizer_discard
; /* for 3DSTATE_STREAMOUT */
932 bool half_pixel_center
; /* for 3DSTATE_MULTISAMPLE */
933 bool line_stipple_enable
;
934 bool poly_stipple_enable
;
936 bool force_persample_interp
;
937 enum pipe_sprite_coord_mode sprite_coord_mode
; /* PIPE_SPRITE_* */
938 uint16_t sprite_coord_enable
;
942 get_line_width(const struct pipe_rasterizer_state
*state
)
944 float line_width
= state
->line_width
;
946 /* From the OpenGL 4.4 spec:
948 * "The actual width of non-antialiased lines is determined by rounding
949 * the supplied width to the nearest integer, then clamping it to the
950 * implementation-dependent maximum non-antialiased line width."
952 if (!state
->multisample
&& !state
->line_smooth
)
953 line_width
= roundf(state
->line_width
);
955 if (!state
->multisample
&& state
->line_smooth
&& line_width
< 1.5f
) {
956 /* For 1 pixel line thickness or less, the general anti-aliasing
957 * algorithm gives up, and a garbage line is generated. Setting a
958 * Line Width of 0.0 specifies the rasterization of the "thinnest"
959 * (one-pixel-wide), non-antialiased lines.
961 * Lines rendered with zero Line Width are rasterized using the
962 * "Grid Intersection Quantization" rules as specified by the
963 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
972 * The pipe->create_rasterizer_state() driver hook.
975 iris_create_rasterizer_state(struct pipe_context
*ctx
,
976 const struct pipe_rasterizer_state
*state
)
978 struct iris_rasterizer_state
*cso
=
979 malloc(sizeof(struct iris_rasterizer_state
));
982 point_quad_rasterization
-> SBE
?
987 force_persample_interp
- ?
990 offset_units_unscaled
- cap
not exposed
994 // XXX: it may make more sense just to store the pipe_rasterizer_state,
995 // we're copying a lot of booleans here. But we don't need all of them...
997 cso
->multisample
= state
->multisample
;
998 cso
->force_persample_interp
= state
->force_persample_interp
;
999 cso
->clip_halfz
= state
->clip_halfz
;
1000 cso
->depth_clip_near
= state
->depth_clip_near
;
1001 cso
->depth_clip_far
= state
->depth_clip_far
;
1002 cso
->flatshade
= state
->flatshade
;
1003 cso
->flatshade_first
= state
->flatshade_first
;
1004 cso
->clamp_fragment_color
= state
->clamp_fragment_color
;
1005 cso
->light_twoside
= state
->light_twoside
;
1006 cso
->rasterizer_discard
= state
->rasterizer_discard
;
1007 cso
->half_pixel_center
= state
->half_pixel_center
;
1008 cso
->sprite_coord_mode
= state
->sprite_coord_mode
;
1009 cso
->sprite_coord_enable
= state
->sprite_coord_enable
;
1010 cso
->line_stipple_enable
= state
->line_stipple_enable
;
1011 cso
->poly_stipple_enable
= state
->poly_stipple_enable
;
1013 float line_width
= get_line_width(state
);
1015 iris_pack_command(GENX(3DSTATE_SF
), cso
->sf
, sf
) {
1016 sf
.StatisticsEnable
= true;
1017 sf
.ViewportTransformEnable
= true;
1018 sf
.AALineDistanceMode
= AALINEDISTANCE_TRUE
;
1019 sf
.LineEndCapAntialiasingRegionWidth
=
1020 state
->line_smooth
? _10pixels
: _05pixels
;
1021 sf
.LastPixelEnable
= state
->line_last_pixel
;
1022 sf
.LineWidth
= line_width
;
1023 sf
.SmoothPointEnable
= state
->point_smooth
;
1024 sf
.PointWidthSource
= state
->point_size_per_vertex
? Vertex
: State
;
1025 sf
.PointWidth
= state
->point_size
;
1027 if (state
->flatshade_first
) {
1028 sf
.TriangleFanProvokingVertexSelect
= 1;
1030 sf
.TriangleStripListProvokingVertexSelect
= 2;
1031 sf
.TriangleFanProvokingVertexSelect
= 2;
1032 sf
.LineStripListProvokingVertexSelect
= 1;
1036 iris_pack_command(GENX(3DSTATE_RASTER
), cso
->raster
, rr
) {
1037 rr
.FrontWinding
= state
->front_ccw
? CounterClockwise
: Clockwise
;
1038 rr
.CullMode
= translate_cull_mode(state
->cull_face
);
1039 rr
.FrontFaceFillMode
= translate_fill_mode(state
->fill_front
);
1040 rr
.BackFaceFillMode
= translate_fill_mode(state
->fill_back
);
1041 rr
.DXMultisampleRasterizationEnable
= state
->multisample
;
1042 rr
.GlobalDepthOffsetEnableSolid
= state
->offset_tri
;
1043 rr
.GlobalDepthOffsetEnableWireframe
= state
->offset_line
;
1044 rr
.GlobalDepthOffsetEnablePoint
= state
->offset_point
;
1045 rr
.GlobalDepthOffsetConstant
= state
->offset_units
* 2;
1046 rr
.GlobalDepthOffsetScale
= state
->offset_scale
;
1047 rr
.GlobalDepthOffsetClamp
= state
->offset_clamp
;
1048 rr
.SmoothPointEnable
= state
->point_smooth
;
1049 rr
.AntialiasingEnable
= state
->line_smooth
;
1050 rr
.ScissorRectangleEnable
= state
->scissor
;
1051 rr
.ViewportZNearClipTestEnable
= state
->depth_clip_near
;
1052 rr
.ViewportZFarClipTestEnable
= state
->depth_clip_far
;
1053 //rr.ConservativeRasterizationEnable = not yet supported by Gallium...
1056 iris_pack_command(GENX(3DSTATE_CLIP
), cso
->clip
, cl
) {
1057 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1058 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1060 cl
.StatisticsEnable
= true;
1061 cl
.EarlyCullEnable
= true;
1062 cl
.UserClipDistanceClipTestEnableBitmask
= state
->clip_plane_enable
;
1063 cl
.ForceUserClipDistanceClipTestEnableBitmask
= true;
1064 cl
.APIMode
= state
->clip_halfz
? APIMODE_D3D
: APIMODE_OGL
;
1065 cl
.GuardbandClipTestEnable
= true;
1066 cl
.ClipMode
= CLIPMODE_NORMAL
;
1067 cl
.ClipEnable
= true;
1068 cl
.ViewportXYClipTestEnable
= state
->point_tri_clip
;
1069 cl
.MinimumPointWidth
= 0.125;
1070 cl
.MaximumPointWidth
= 255.875;
1072 if (state
->flatshade_first
) {
1073 cl
.TriangleFanProvokingVertexSelect
= 1;
1075 cl
.TriangleStripListProvokingVertexSelect
= 2;
1076 cl
.TriangleFanProvokingVertexSelect
= 2;
1077 cl
.LineStripListProvokingVertexSelect
= 1;
1081 iris_pack_command(GENX(3DSTATE_WM
), cso
->wm
, wm
) {
1082 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1083 * filled in at draw time from the FS program.
1085 wm
.LineAntialiasingRegionWidth
= _10pixels
;
1086 wm
.LineEndCapAntialiasingRegionWidth
= _05pixels
;
1087 wm
.PointRasterizationRule
= RASTRULE_UPPER_RIGHT
;
1088 wm
.StatisticsEnable
= true;
1089 wm
.LineStippleEnable
= state
->line_stipple_enable
;
1090 wm
.PolygonStippleEnable
= state
->poly_stipple_enable
;
1093 /* Remap from 0..255 back to 1..256 */
1094 const unsigned line_stipple_factor
= state
->line_stipple_factor
+ 1;
1096 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE
), cso
->line_stipple
, line
) {
1097 line
.LineStipplePattern
= state
->line_stipple_pattern
;
1098 line
.LineStippleInverseRepeatCount
= 1.0f
/ line_stipple_factor
;
1099 line
.LineStippleRepeatCount
= line_stipple_factor
;
1106 * The pipe->bind_rasterizer_state() driver hook.
1108 * Bind a rasterizer CSO and flag related dirty bits.
1111 iris_bind_rasterizer_state(struct pipe_context
*ctx
, void *state
)
1113 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1114 struct iris_rasterizer_state
*old_cso
= ice
->state
.cso_rast
;
1115 struct iris_rasterizer_state
*new_cso
= state
;
1118 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1119 if (cso_changed_memcmp(line_stipple
))
1120 ice
->state
.dirty
|= IRIS_DIRTY_LINE_STIPPLE
;
1122 if (cso_changed(half_pixel_center
))
1123 ice
->state
.dirty
|= IRIS_DIRTY_MULTISAMPLE
;
1125 if (cso_changed(line_stipple_enable
) || cso_changed(poly_stipple_enable
))
1126 ice
->state
.dirty
|= IRIS_DIRTY_WM
;
1128 if (cso_changed(rasterizer_discard
) || cso_changed(flatshade_first
))
1129 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
;
1131 if (cso_changed(depth_clip_near
) || cso_changed(depth_clip_far
) ||
1132 cso_changed(clip_halfz
))
1133 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
1135 if (cso_changed(sprite_coord_enable
) || cso_changed(light_twoside
))
1136 ice
->state
.dirty
|= IRIS_DIRTY_SBE
;
1139 ice
->state
.cso_rast
= new_cso
;
1140 ice
->state
.dirty
|= IRIS_DIRTY_RASTER
;
1141 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
1142 ice
->state
.dirty
|= ice
->state
.dirty_for_nos
[IRIS_NOS_RASTERIZER
];
1146 * Return true if the given wrap mode requires the border color to exist.
1148 * (We can skip uploading it if the sampler isn't going to use it.)
1151 wrap_mode_needs_border_color(unsigned wrap_mode
)
1153 return wrap_mode
== TCM_CLAMP_BORDER
|| wrap_mode
== TCM_HALF_BORDER
;
1157 * Gallium CSO for sampler state.
1159 struct iris_sampler_state
{
1160 union pipe_color_union border_color
;
1161 bool needs_border_color
;
1163 uint32_t sampler_state
[GENX(SAMPLER_STATE_length
)];
1167 * The pipe->create_sampler_state() driver hook.
1169 * We fill out SAMPLER_STATE (except for the border color pointer), and
1170 * store that on the CPU. It doesn't make sense to upload it to a GPU
1171 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1172 * all bound sampler states to be in contiguous memor.
1175 iris_create_sampler_state(struct pipe_context
*ctx
,
1176 const struct pipe_sampler_state
*state
)
1178 struct iris_sampler_state
*cso
= CALLOC_STRUCT(iris_sampler_state
);
1183 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST
== MAPFILTER_NEAREST
);
1184 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR
== MAPFILTER_LINEAR
);
1186 unsigned wrap_s
= translate_wrap(state
->wrap_s
);
1187 unsigned wrap_t
= translate_wrap(state
->wrap_t
);
1188 unsigned wrap_r
= translate_wrap(state
->wrap_r
);
1190 memcpy(&cso
->border_color
, &state
->border_color
, sizeof(cso
->border_color
));
1192 cso
->needs_border_color
= wrap_mode_needs_border_color(wrap_s
) ||
1193 wrap_mode_needs_border_color(wrap_t
) ||
1194 wrap_mode_needs_border_color(wrap_r
);
1196 float min_lod
= state
->min_lod
;
1197 unsigned mag_img_filter
= state
->mag_img_filter
;
1199 // XXX: explain this code ported from ilo...I don't get it at all...
1200 if (state
->min_mip_filter
== PIPE_TEX_MIPFILTER_NONE
&&
1201 state
->min_lod
> 0.0f
) {
1203 mag_img_filter
= state
->min_img_filter
;
1206 iris_pack_state(GENX(SAMPLER_STATE
), cso
->sampler_state
, samp
) {
1207 samp
.TCXAddressControlMode
= wrap_s
;
1208 samp
.TCYAddressControlMode
= wrap_t
;
1209 samp
.TCZAddressControlMode
= wrap_r
;
1210 samp
.CubeSurfaceControlMode
= state
->seamless_cube_map
;
1211 samp
.NonnormalizedCoordinateEnable
= !state
->normalized_coords
;
1212 samp
.MinModeFilter
= state
->min_img_filter
;
1213 samp
.MagModeFilter
= mag_img_filter
;
1214 samp
.MipModeFilter
= translate_mip_filter(state
->min_mip_filter
);
1215 samp
.MaximumAnisotropy
= RATIO21
;
1217 if (state
->max_anisotropy
>= 2) {
1218 if (state
->min_img_filter
== PIPE_TEX_FILTER_LINEAR
) {
1219 samp
.MinModeFilter
= MAPFILTER_ANISOTROPIC
;
1220 samp
.AnisotropicAlgorithm
= EWAApproximation
;
1223 if (state
->mag_img_filter
== PIPE_TEX_FILTER_LINEAR
)
1224 samp
.MagModeFilter
= MAPFILTER_ANISOTROPIC
;
1226 samp
.MaximumAnisotropy
=
1227 MIN2((state
->max_anisotropy
- 2) / 2, RATIO161
);
1230 /* Set address rounding bits if not using nearest filtering. */
1231 if (state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
) {
1232 samp
.UAddressMinFilterRoundingEnable
= true;
1233 samp
.VAddressMinFilterRoundingEnable
= true;
1234 samp
.RAddressMinFilterRoundingEnable
= true;
1237 if (state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
) {
1238 samp
.UAddressMagFilterRoundingEnable
= true;
1239 samp
.VAddressMagFilterRoundingEnable
= true;
1240 samp
.RAddressMagFilterRoundingEnable
= true;
1243 if (state
->compare_mode
== PIPE_TEX_COMPARE_R_TO_TEXTURE
)
1244 samp
.ShadowFunction
= translate_shadow_func(state
->compare_func
);
1246 const float hw_max_lod
= GEN_GEN
>= 7 ? 14 : 13;
1248 samp
.LODPreClampMode
= CLAMP_MODE_OGL
;
1249 samp
.MinLOD
= CLAMP(min_lod
, 0, hw_max_lod
);
1250 samp
.MaxLOD
= CLAMP(state
->max_lod
, 0, hw_max_lod
);
1251 samp
.TextureLODBias
= CLAMP(state
->lod_bias
, -16, 15);
1253 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1260 * The pipe->bind_sampler_states() driver hook.
1262 * Now that we know all the sampler states, we upload them all into a
1263 * contiguous area of GPU memory, for 3DSTATE_SAMPLER_STATE_POINTERS_*.
1264 * We also fill out the border color state pointers at this point.
1266 * We could defer this work to draw time, but we assume that binding
1267 * will be less frequent than drawing.
1269 // XXX: this may be a bad idea, need to make sure that st/mesa calls us
1270 // XXX: with the complete set of shaders. If it makes multiple calls to
1271 // XXX: things one at a time, we could waste a lot of time assembling things.
1272 // XXX: it doesn't even BUY us anything to do it here, because we only flag
1273 // XXX: IRIS_DIRTY_SAMPLER_STATE when this is called...
1275 iris_bind_sampler_states(struct pipe_context
*ctx
,
1276 enum pipe_shader_type p_stage
,
1277 unsigned start
, unsigned count
,
1280 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1281 gl_shader_stage stage
= stage_from_pipe(p_stage
);
1282 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
1284 assert(start
+ count
<= IRIS_MAX_TEXTURE_SAMPLERS
);
1285 shs
->num_samplers
= MAX2(shs
->num_samplers
, start
+ count
);
1287 for (int i
= 0; i
< count
; i
++) {
1288 shs
->samplers
[start
+ i
] = states
[i
];
1291 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
1292 * in the dynamic state memory zone, so we can point to it via the
1293 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
1296 upload_state(ice
->state
.dynamic_uploader
, &shs
->sampler_table
,
1297 count
* 4 * GENX(SAMPLER_STATE_length
), 32);
1301 struct pipe_resource
*res
= shs
->sampler_table
.res
;
1302 shs
->sampler_table
.offset
+=
1303 iris_bo_offset_from_base_address(iris_resource_bo(res
));
1305 /* Make sure all land in the same BO */
1306 iris_border_color_pool_reserve(ice
, IRIS_MAX_TEXTURE_SAMPLERS
);
1308 for (int i
= 0; i
< count
; i
++) {
1309 struct iris_sampler_state
*state
= shs
->samplers
[i
];
1312 memset(map
, 0, 4 * GENX(SAMPLER_STATE_length
));
1313 } else if (!state
->needs_border_color
) {
1314 memcpy(map
, state
->sampler_state
, 4 * GENX(SAMPLER_STATE_length
));
1316 ice
->state
.need_border_colors
= true;
1318 /* Stream out the border color and merge the pointer. */
1320 iris_upload_border_color(ice
, &state
->border_color
);
1322 uint32_t dynamic
[GENX(SAMPLER_STATE_length
)];
1323 iris_pack_state(GENX(SAMPLER_STATE
), dynamic
, dyns
) {
1324 dyns
.BorderColorPointer
= offset
;
1327 for (uint32_t j
= 0; j
< GENX(SAMPLER_STATE_length
); j
++)
1328 map
[j
] = state
->sampler_state
[j
] | dynamic
[j
];
1331 map
+= GENX(SAMPLER_STATE_length
);
1334 ice
->state
.dirty
|= IRIS_DIRTY_SAMPLER_STATES_VS
<< stage
;
1337 static enum isl_channel_select
1338 fmt_swizzle(const struct iris_format_info
*fmt
, enum pipe_swizzle swz
)
1341 case PIPE_SWIZZLE_X
: return fmt
->swizzle
.r
;
1342 case PIPE_SWIZZLE_Y
: return fmt
->swizzle
.g
;
1343 case PIPE_SWIZZLE_Z
: return fmt
->swizzle
.b
;
1344 case PIPE_SWIZZLE_W
: return fmt
->swizzle
.a
;
1345 case PIPE_SWIZZLE_1
: return SCS_ONE
;
1346 case PIPE_SWIZZLE_0
: return SCS_ZERO
;
1347 default: unreachable("invalid swizzle");
1352 * The pipe->create_sampler_view() driver hook.
1354 static struct pipe_sampler_view
*
1355 iris_create_sampler_view(struct pipe_context
*ctx
,
1356 struct pipe_resource
*tex
,
1357 const struct pipe_sampler_view
*tmpl
)
1359 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1360 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
1361 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1362 struct iris_sampler_view
*isv
= calloc(1, sizeof(struct iris_sampler_view
));
1367 /* initialize base object */
1369 isv
->base
.context
= ctx
;
1370 isv
->base
.texture
= NULL
;
1371 pipe_reference_init(&isv
->base
.reference
, 1);
1372 pipe_resource_reference(&isv
->base
.texture
, tex
);
1374 void *map
= upload_state(ice
->state
.surface_uploader
, &isv
->surface_state
,
1375 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
1379 struct iris_bo
*state_bo
= iris_resource_bo(isv
->surface_state
.res
);
1380 isv
->surface_state
.offset
+= iris_bo_offset_from_base_address(state_bo
);
1382 if (util_format_is_depth_or_stencil(tmpl
->format
)) {
1383 struct iris_resource
*zres
, *sres
;
1384 const struct util_format_description
*desc
=
1385 util_format_description(tmpl
->format
);
1387 iris_get_depth_stencil_resources(tex
, &zres
, &sres
);
1389 tex
= util_format_has_depth(desc
) ? &zres
->base
: &sres
->base
;
1392 isv
->res
= (struct iris_resource
*) tex
;
1394 isl_surf_usage_flags_t usage
=
1395 ISL_SURF_USAGE_TEXTURE_BIT
|
1396 (isv
->res
->surf
.usage
& ISL_SURF_USAGE_CUBE_BIT
);
1398 const struct iris_format_info fmt
=
1399 iris_format_for_usage(devinfo
, tmpl
->format
, usage
);
1401 isv
->view
= (struct isl_view
) {
1403 .swizzle
= (struct isl_swizzle
) {
1404 .r
= fmt_swizzle(&fmt
, tmpl
->swizzle_r
),
1405 .g
= fmt_swizzle(&fmt
, tmpl
->swizzle_g
),
1406 .b
= fmt_swizzle(&fmt
, tmpl
->swizzle_b
),
1407 .a
= fmt_swizzle(&fmt
, tmpl
->swizzle_a
),
1412 /* Fill out SURFACE_STATE for this view. */
1413 if (tmpl
->target
!= PIPE_BUFFER
) {
1414 isv
->view
.base_level
= tmpl
->u
.tex
.first_level
;
1415 isv
->view
.levels
= tmpl
->u
.tex
.last_level
- tmpl
->u
.tex
.first_level
+ 1;
1416 isv
->view
.base_array_layer
= tmpl
->u
.tex
.first_layer
;
1417 isv
->view
.array_len
=
1418 tmpl
->u
.tex
.last_layer
- tmpl
->u
.tex
.first_layer
+ 1;
1420 isl_surf_fill_state(&screen
->isl_dev
, map
,
1421 .surf
= &isv
->res
->surf
, .view
= &isv
->view
,
1423 .address
= isv
->res
->bo
->gtt_offset
);
1425 // .clear_color = clear_color,
1427 // XXX: what to do about isv->view? other drivers don't use it for bufs
1428 const struct isl_format_layout
*fmtl
=
1429 isl_format_get_layout(isv
->view
.format
);
1430 const unsigned cpp
= fmtl
->bpb
/ 8;
1432 isl_buffer_fill_state(&screen
->isl_dev
, map
,
1433 .address
= isv
->res
->bo
->gtt_offset
+
1435 // XXX: buffer_texture_range_size from i965?
1436 .size_B
= tmpl
->u
.buf
.size
,
1437 .format
= isv
->view
.format
,
1446 iris_sampler_view_destroy(struct pipe_context
*ctx
,
1447 struct pipe_sampler_view
*state
)
1449 struct iris_sampler_view
*isv
= (void *) state
;
1450 pipe_resource_reference(&state
->texture
, NULL
);
1451 pipe_resource_reference(&isv
->surface_state
.res
, NULL
);
1456 * The pipe->create_surface() driver hook.
1458 * In Gallium nomenclature, "surfaces" are a view of a resource that
1459 * can be bound as a render target or depth/stencil buffer.
1461 static struct pipe_surface
*
1462 iris_create_surface(struct pipe_context
*ctx
,
1463 struct pipe_resource
*tex
,
1464 const struct pipe_surface
*tmpl
)
1466 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1467 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
1468 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1469 struct iris_surface
*surf
= calloc(1, sizeof(struct iris_surface
));
1470 struct pipe_surface
*psurf
= &surf
->base
;
1471 struct iris_resource
*res
= (struct iris_resource
*) tex
;
1476 pipe_reference_init(&psurf
->reference
, 1);
1477 pipe_resource_reference(&psurf
->texture
, tex
);
1478 psurf
->context
= ctx
;
1479 psurf
->format
= tmpl
->format
;
1480 psurf
->width
= tex
->width0
;
1481 psurf
->height
= tex
->height0
;
1482 psurf
->texture
= tex
;
1483 psurf
->u
.tex
.first_layer
= tmpl
->u
.tex
.first_layer
;
1484 psurf
->u
.tex
.last_layer
= tmpl
->u
.tex
.last_layer
;
1485 psurf
->u
.tex
.level
= tmpl
->u
.tex
.level
;
1487 isl_surf_usage_flags_t usage
= 0;
1489 usage
= ISL_SURF_USAGE_STORAGE_BIT
;
1490 else if (util_format_is_depth_or_stencil(tmpl
->format
))
1491 usage
= ISL_SURF_USAGE_DEPTH_BIT
;
1493 usage
= ISL_SURF_USAGE_RENDER_TARGET_BIT
;
1495 const struct iris_format_info fmt
=
1496 iris_format_for_usage(devinfo
, psurf
->format
, usage
);
1498 if ((usage
& ISL_SURF_USAGE_RENDER_TARGET_BIT
) &&
1499 !isl_format_supports_rendering(devinfo
, fmt
.fmt
)) {
1500 /* Framebuffer validation will reject this invalid case, but it
1501 * hasn't had the opportunity yet. In the meantime, we need to
1502 * avoid hitting ISL asserts about unsupported formats below.
1508 surf
->view
= (struct isl_view
) {
1510 .base_level
= tmpl
->u
.tex
.level
,
1512 .base_array_layer
= tmpl
->u
.tex
.first_layer
,
1513 .array_len
= tmpl
->u
.tex
.last_layer
- tmpl
->u
.tex
.first_layer
+ 1,
1514 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1518 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
1519 if (res
->surf
.usage
& (ISL_SURF_USAGE_DEPTH_BIT
|
1520 ISL_SURF_USAGE_STENCIL_BIT
))
1524 void *map
= upload_state(ice
->state
.surface_uploader
, &surf
->surface_state
,
1525 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
1529 struct iris_bo
*state_bo
= iris_resource_bo(surf
->surface_state
.res
);
1530 surf
->surface_state
.offset
+= iris_bo_offset_from_base_address(state_bo
);
1532 isl_surf_fill_state(&screen
->isl_dev
, map
,
1533 .surf
= &res
->surf
, .view
= &surf
->view
,
1535 .address
= res
->bo
->gtt_offset
);
1537 // .clear_color = clear_color,
1543 * The pipe->set_shader_images() driver hook.
1546 iris_set_shader_images(struct pipe_context
*ctx
,
1547 enum pipe_shader_type p_stage
,
1548 unsigned start_slot
, unsigned count
,
1549 const struct pipe_image_view
*p_images
)
1551 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1552 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
1553 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1554 gl_shader_stage stage
= stage_from_pipe(p_stage
);
1555 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
1557 for (unsigned i
= 0; i
< count
; i
++) {
1558 if (p_images
&& p_images
[i
].resource
) {
1559 const struct pipe_image_view
*img
= &p_images
[i
];
1560 struct iris_resource
*res
= (void *) img
->resource
;
1561 pipe_resource_reference(&shs
->image
[start_slot
+ i
].res
, &res
->base
);
1563 // XXX: these are not retained forever, use a separate uploader?
1565 upload_state(ice
->state
.surface_uploader
,
1566 &shs
->image
[start_slot
+ i
].surface_state
,
1567 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
1568 if (!unlikely(map
)) {
1569 pipe_resource_reference(&shs
->image
[start_slot
+ i
].res
, NULL
);
1573 struct iris_bo
*surf_state_bo
=
1574 iris_resource_bo(shs
->image
[start_slot
+ i
].surface_state
.res
);
1575 shs
->image
[start_slot
+ i
].surface_state
.offset
+=
1576 iris_bo_offset_from_base_address(surf_state_bo
);
1578 isl_surf_usage_flags_t usage
= ISL_SURF_USAGE_STORAGE_BIT
;
1579 enum isl_format isl_format
=
1580 iris_format_for_usage(devinfo
, img
->format
, usage
).fmt
;
1582 if (img
->shader_access
& PIPE_IMAGE_ACCESS_READ
)
1583 isl_format
= isl_lower_storage_image_format(devinfo
, isl_format
);
1585 shs
->image
[start_slot
+ i
].access
= img
->shader_access
;
1587 if (res
->base
.target
!= PIPE_BUFFER
) {
1588 struct isl_view view
= {
1589 .format
= isl_format
,
1590 .base_level
= img
->u
.tex
.level
,
1592 .base_array_layer
= img
->u
.tex
.first_layer
,
1593 .array_len
= img
->u
.tex
.last_layer
- img
->u
.tex
.first_layer
+ 1,
1594 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1598 isl_surf_fill_state(&screen
->isl_dev
, map
,
1599 .surf
= &res
->surf
, .view
= &view
,
1601 .address
= res
->bo
->gtt_offset
);
1603 // .clear_color = clear_color,
1605 // XXX: what to do about view? other drivers don't use it for bufs
1606 const struct isl_format_layout
*fmtl
=
1607 isl_format_get_layout(isl_format
);
1608 const unsigned cpp
= fmtl
->bpb
/ 8;
1610 isl_buffer_fill_state(&screen
->isl_dev
, map
,
1611 .address
= res
->bo
->gtt_offset
,
1612 // XXX: buffer_texture_range_size from i965?
1613 .size_B
= res
->base
.width0
,
1614 .format
= isl_format
,
1619 pipe_resource_reference(&shs
->image
[start_slot
+ i
].res
, NULL
);
1620 pipe_resource_reference(&shs
->image
[start_slot
+ i
].surface_state
.res
,
1625 ice
->state
.dirty
|= IRIS_DIRTY_BINDINGS_VS
<< stage
;
1630 * The pipe->set_sampler_views() driver hook.
1633 iris_set_sampler_views(struct pipe_context
*ctx
,
1634 enum pipe_shader_type p_stage
,
1635 unsigned start
, unsigned count
,
1636 struct pipe_sampler_view
**views
)
1638 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1639 gl_shader_stage stage
= stage_from_pipe(p_stage
);
1640 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
1643 for (i
= 0; i
< count
; i
++) {
1644 pipe_sampler_view_reference((struct pipe_sampler_view
**)
1645 &shs
->textures
[i
], views
[i
]);
1647 for (; i
< shs
->num_textures
; i
++) {
1648 pipe_sampler_view_reference((struct pipe_sampler_view
**)
1649 &shs
->textures
[i
], NULL
);
1652 shs
->num_textures
= count
;
1654 ice
->state
.dirty
|= (IRIS_DIRTY_BINDINGS_VS
<< stage
);
1658 * The pipe->set_tess_state() driver hook.
1661 iris_set_tess_state(struct pipe_context
*ctx
,
1662 const float default_outer_level
[4],
1663 const float default_inner_level
[2])
1665 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1667 memcpy(&ice
->state
.default_outer_level
[0], &default_outer_level
[0], 4 * sizeof(float));
1668 memcpy(&ice
->state
.default_inner_level
[0], &default_inner_level
[0], 2 * sizeof(float));
1670 ice
->state
.dirty
|= IRIS_DIRTY_CONSTANTS_TCS
;
1674 iris_surface_destroy(struct pipe_context
*ctx
, struct pipe_surface
*p_surf
)
1676 struct iris_surface
*surf
= (void *) p_surf
;
1677 pipe_resource_reference(&p_surf
->texture
, NULL
);
1678 pipe_resource_reference(&surf
->surface_state
.res
, NULL
);
1682 // XXX: actually implement user clip planes
1684 iris_set_clip_state(struct pipe_context
*ctx
,
1685 const struct pipe_clip_state
*state
)
1690 * The pipe->set_polygon_stipple() driver hook.
1693 iris_set_polygon_stipple(struct pipe_context
*ctx
,
1694 const struct pipe_poly_stipple
*state
)
1696 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1697 memcpy(&ice
->state
.poly_stipple
, state
, sizeof(*state
));
1698 ice
->state
.dirty
|= IRIS_DIRTY_POLYGON_STIPPLE
;
1702 * The pipe->set_sample_mask() driver hook.
1705 iris_set_sample_mask(struct pipe_context
*ctx
, unsigned sample_mask
)
1707 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1709 /* We only support 16x MSAA, so we have 16 bits of sample maks.
1710 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
1712 ice
->state
.sample_mask
= sample_mask
& 0xffff;
1713 ice
->state
.dirty
|= IRIS_DIRTY_SAMPLE_MASK
;
1717 * The pipe->set_scissor_states() driver hook.
1719 * This corresponds to our SCISSOR_RECT state structures. It's an
1720 * exact match, so we just store them, and memcpy them out later.
1723 iris_set_scissor_states(struct pipe_context
*ctx
,
1724 unsigned start_slot
,
1725 unsigned num_scissors
,
1726 const struct pipe_scissor_state
*states
)
1728 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1730 for (unsigned i
= 0; i
< num_scissors
; i
++) {
1731 ice
->state
.scissors
[start_slot
+ i
] = states
[i
];
1734 ice
->state
.dirty
|= IRIS_DIRTY_SCISSOR_RECT
;
1738 * The pipe->set_stencil_ref() driver hook.
1740 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
1743 iris_set_stencil_ref(struct pipe_context
*ctx
,
1744 const struct pipe_stencil_ref
*state
)
1746 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1747 memcpy(&ice
->state
.stencil_ref
, state
, sizeof(*state
));
1748 ice
->state
.dirty
|= IRIS_DIRTY_WM_DEPTH_STENCIL
;
1752 viewport_extent(const struct pipe_viewport_state
*state
, int axis
, float sign
)
1754 return copysignf(state
->scale
[axis
], sign
) + state
->translate
[axis
];
1759 calculate_guardband_size(uint32_t fb_width
, uint32_t fb_height
,
1760 float m00
, float m11
, float m30
, float m31
,
1761 float *xmin
, float *xmax
,
1762 float *ymin
, float *ymax
)
1764 /* According to the "Vertex X,Y Clamping and Quantization" section of the
1765 * Strips and Fans documentation:
1767 * "The vertex X and Y screen-space coordinates are also /clamped/ to the
1768 * fixed-point "guardband" range supported by the rasterization hardware"
1772 * "In almost all circumstances, if an object’s vertices are actually
1773 * modified by this clamping (i.e., had X or Y coordinates outside of
1774 * the guardband extent the rendered object will not match the intended
1775 * result. Therefore software should take steps to ensure that this does
1776 * not happen - e.g., by clipping objects such that they do not exceed
1777 * these limits after the Drawing Rectangle is applied."
1779 * I believe the fundamental restriction is that the rasterizer (in
1780 * the SF/WM stages) have a limit on the number of pixels that can be
1781 * rasterized. We need to ensure any coordinates beyond the rasterizer
1782 * limit are handled by the clipper. So effectively that limit becomes
1783 * the clipper's guardband size.
1785 * It goes on to say:
1787 * "In addition, in order to be correctly rendered, objects must have a
1788 * screenspace bounding box not exceeding 8K in the X or Y direction.
1789 * This additional restriction must also be comprehended by software,
1790 * i.e., enforced by use of clipping."
1792 * This makes no sense. Gen7+ hardware supports 16K render targets,
1793 * and you definitely need to be able to draw polygons that fill the
1794 * surface. Our assumption is that the rasterizer was limited to 8K
1795 * on Sandybridge, which only supports 8K surfaces, and it was actually
1796 * increased to 16K on Ivybridge and later.
1798 * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge.
1800 const float gb_size
= GEN_GEN
>= 7 ? 16384.0f
: 8192.0f
;
1802 if (m00
!= 0 && m11
!= 0) {
1803 /* First, we compute the screen-space render area */
1804 const float ss_ra_xmin
= MIN3( 0, m30
+ m00
, m30
- m00
);
1805 const float ss_ra_xmax
= MAX3( fb_width
, m30
+ m00
, m30
- m00
);
1806 const float ss_ra_ymin
= MIN3( 0, m31
+ m11
, m31
- m11
);
1807 const float ss_ra_ymax
= MAX3(fb_height
, m31
+ m11
, m31
- m11
);
1809 /* We want the guardband to be centered on that */
1810 const float ss_gb_xmin
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 - gb_size
;
1811 const float ss_gb_xmax
= (ss_ra_xmin
+ ss_ra_xmax
) / 2 + gb_size
;
1812 const float ss_gb_ymin
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 - gb_size
;
1813 const float ss_gb_ymax
= (ss_ra_ymin
+ ss_ra_ymax
) / 2 + gb_size
;
1815 /* Now we need it in native device coordinates */
1816 const float ndc_gb_xmin
= (ss_gb_xmin
- m30
) / m00
;
1817 const float ndc_gb_xmax
= (ss_gb_xmax
- m30
) / m00
;
1818 const float ndc_gb_ymin
= (ss_gb_ymin
- m31
) / m11
;
1819 const float ndc_gb_ymax
= (ss_gb_ymax
- m31
) / m11
;
1821 /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be
1822 * flipped upside-down. X should be fine though.
1824 assert(ndc_gb_xmin
<= ndc_gb_xmax
);
1825 *xmin
= ndc_gb_xmin
;
1826 *xmax
= ndc_gb_xmax
;
1827 *ymin
= MIN2(ndc_gb_ymin
, ndc_gb_ymax
);
1828 *ymax
= MAX2(ndc_gb_ymin
, ndc_gb_ymax
);
1830 /* The viewport scales to 0, so nothing will be rendered. */
1840 * The pipe->set_viewport_states() driver hook.
1842 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
1843 * the guardband yet, as we need the framebuffer dimensions, but we can
1844 * at least fill out the rest.
1847 iris_set_viewport_states(struct pipe_context
*ctx
,
1848 unsigned start_slot
,
1850 const struct pipe_viewport_state
*states
)
1852 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1853 struct iris_genx_state
*genx
= ice
->state
.genx
;
1854 uint32_t *vp_map
= &genx
->sf_cl_vp
[start_slot
];
1856 for (unsigned i
= 0; i
< count
; i
++) {
1857 const struct pipe_viewport_state
*state
= &states
[i
];
1859 memcpy(&ice
->state
.viewports
[start_slot
+ i
], state
, sizeof(*state
));
1861 iris_pack_state(GENX(SF_CLIP_VIEWPORT
), vp_map
, vp
) {
1862 vp
.ViewportMatrixElementm00
= state
->scale
[0];
1863 vp
.ViewportMatrixElementm11
= state
->scale
[1];
1864 vp
.ViewportMatrixElementm22
= state
->scale
[2];
1865 vp
.ViewportMatrixElementm30
= state
->translate
[0];
1866 vp
.ViewportMatrixElementm31
= state
->translate
[1];
1867 vp
.ViewportMatrixElementm32
= state
->translate
[2];
1868 /* XXX: in i965 this is computed based on the drawbuffer size,
1869 * but we don't have that here...
1871 vp
.XMinClipGuardband
= -1.0;
1872 vp
.XMaxClipGuardband
= 1.0;
1873 vp
.YMinClipGuardband
= -1.0;
1874 vp
.YMaxClipGuardband
= 1.0;
1875 vp
.XMinViewPort
= viewport_extent(state
, 0, -1.0f
);
1876 vp
.XMaxViewPort
= viewport_extent(state
, 0, 1.0f
) - 1;
1877 vp
.YMinViewPort
= viewport_extent(state
, 1, -1.0f
);
1878 vp
.YMaxViewPort
= viewport_extent(state
, 1, 1.0f
) - 1;
1881 vp_map
+= GENX(SF_CLIP_VIEWPORT_length
);
1884 ice
->state
.dirty
|= IRIS_DIRTY_SF_CL_VIEWPORT
;
1886 if (ice
->state
.cso_rast
&& (!ice
->state
.cso_rast
->depth_clip_near
||
1887 !ice
->state
.cso_rast
->depth_clip_far
))
1888 ice
->state
.dirty
|= IRIS_DIRTY_CC_VIEWPORT
;
1892 * The pipe->set_framebuffer_state() driver hook.
1894 * Sets the current draw FBO, including color render targets, depth,
1895 * and stencil buffers.
1898 iris_set_framebuffer_state(struct pipe_context
*ctx
,
1899 const struct pipe_framebuffer_state
*state
)
1901 struct iris_context
*ice
= (struct iris_context
*) ctx
;
1902 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
1903 struct isl_device
*isl_dev
= &screen
->isl_dev
;
1904 struct pipe_framebuffer_state
*cso
= &ice
->state
.framebuffer
;
1905 struct iris_resource
*zres
;
1906 struct iris_resource
*stencil_res
;
1908 unsigned samples
= util_framebuffer_get_num_samples(state
);
1910 if (cso
->samples
!= samples
) {
1911 ice
->state
.dirty
|= IRIS_DIRTY_MULTISAMPLE
;
1914 if (cso
->nr_cbufs
!= state
->nr_cbufs
) {
1915 ice
->state
.dirty
|= IRIS_DIRTY_BLEND_STATE
;
1918 if ((cso
->layers
== 0) != (state
->layers
== 0)) {
1919 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
1922 util_copy_framebuffer_state(cso
, state
);
1923 cso
->samples
= samples
;
1925 struct iris_depth_buffer_state
*cso_z
= &ice
->state
.genx
->depth_buffer
;
1927 struct isl_view view
= {
1930 .base_array_layer
= 0,
1932 .swizzle
= ISL_SWIZZLE_IDENTITY
,
1935 struct isl_depth_stencil_hiz_emit_info info
= {
1941 iris_get_depth_stencil_resources(cso
->zsbuf
->texture
, &zres
,
1944 view
.base_level
= cso
->zsbuf
->u
.tex
.level
;
1945 view
.base_array_layer
= cso
->zsbuf
->u
.tex
.first_layer
;
1947 cso
->zsbuf
->u
.tex
.last_layer
- cso
->zsbuf
->u
.tex
.first_layer
+ 1;
1950 view
.usage
|= ISL_SURF_USAGE_DEPTH_BIT
;
1952 info
.depth_surf
= &zres
->surf
;
1953 info
.depth_address
= zres
->bo
->gtt_offset
;
1954 info
.hiz_usage
= ISL_AUX_USAGE_NONE
;
1956 view
.format
= zres
->surf
.format
;
1960 view
.usage
|= ISL_SURF_USAGE_STENCIL_BIT
;
1961 info
.stencil_surf
= &stencil_res
->surf
;
1962 info
.stencil_address
= stencil_res
->bo
->gtt_offset
;
1964 view
.format
= stencil_res
->surf
.format
;
1968 isl_emit_depth_stencil_hiz_s(isl_dev
, cso_z
->packets
, &info
);
1970 /* Make a null surface for unbound buffers */
1971 void *null_surf_map
=
1972 upload_state(ice
->state
.surface_uploader
, &ice
->state
.null_fb
,
1973 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
1974 isl_null_fill_state(&screen
->isl_dev
, null_surf_map
, isl_extent3d(cso
->width
, cso
->height
, cso
->layers
? cso
->layers
: 1));
1975 ice
->state
.null_fb
.offset
+=
1976 iris_bo_offset_from_base_address(iris_resource_bo(ice
->state
.null_fb
.res
));
1978 ice
->state
.dirty
|= IRIS_DIRTY_DEPTH_BUFFER
;
1980 /* Render target change */
1981 ice
->state
.dirty
|= IRIS_DIRTY_BINDINGS_FS
;
1983 ice
->state
.dirty
|= ice
->state
.dirty_for_nos
[IRIS_NOS_FRAMEBUFFER
];
1986 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
1987 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
1989 /* The PIPE_CONTROL command description says:
1991 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
1992 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
1993 * Target Cache Flush by enabling this bit. When render target flush
1994 * is set due to new association of BTI, PS Scoreboard Stall bit must
1995 * be set in this packet."
1997 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
1998 iris_emit_pipe_control_flush(&ice
->render_batch
,
1999 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
2000 PIPE_CONTROL_STALL_AT_SCOREBOARD
);
2005 * The pipe->set_constant_buffer() driver hook.
2007 * This uploads any constant data in user buffers, and references
2008 * any UBO resources containing constant data.
2011 iris_set_constant_buffer(struct pipe_context
*ctx
,
2012 enum pipe_shader_type p_stage
, unsigned index
,
2013 const struct pipe_constant_buffer
*input
)
2015 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2016 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2017 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2018 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2019 struct iris_const_buffer
*cbuf
= &shs
->constbuf
[index
];
2021 if (input
&& (input
->buffer
|| input
->user_buffer
)) {
2022 if (input
->user_buffer
) {
2023 u_upload_data(ctx
->const_uploader
, 0, input
->buffer_size
, 32,
2024 input
->user_buffer
, &cbuf
->data
.offset
,
2027 pipe_resource_reference(&cbuf
->data
.res
, input
->buffer
);
2030 // XXX: these are not retained forever, use a separate uploader?
2032 upload_state(ice
->state
.surface_uploader
, &cbuf
->surface_state
,
2033 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
2034 if (!unlikely(map
)) {
2035 pipe_resource_reference(&cbuf
->data
.res
, NULL
);
2039 struct iris_resource
*res
= (void *) cbuf
->data
.res
;
2040 struct iris_bo
*surf_bo
= iris_resource_bo(cbuf
->surface_state
.res
);
2041 cbuf
->surface_state
.offset
+= iris_bo_offset_from_base_address(surf_bo
);
2043 isl_buffer_fill_state(&screen
->isl_dev
, map
,
2044 .address
= res
->bo
->gtt_offset
+ cbuf
->data
.offset
,
2045 .size_B
= input
->buffer_size
,
2046 .format
= ISL_FORMAT_R32G32B32A32_FLOAT
,
2050 pipe_resource_reference(&cbuf
->data
.res
, NULL
);
2051 pipe_resource_reference(&cbuf
->surface_state
.res
, NULL
);
2054 ice
->state
.dirty
|= IRIS_DIRTY_CONSTANTS_VS
<< stage
;
2055 // XXX: maybe not necessary all the time...?
2056 // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
2057 // XXX: pull model we may need actual new bindings...
2058 ice
->state
.dirty
|= IRIS_DIRTY_BINDINGS_VS
<< stage
;
2062 * The pipe->set_shader_buffers() driver hook.
2064 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
2065 * SURFACE_STATE here, as the buffer offset may change each time.
2068 iris_set_shader_buffers(struct pipe_context
*ctx
,
2069 enum pipe_shader_type p_stage
,
2070 unsigned start_slot
, unsigned count
,
2071 const struct pipe_shader_buffer
*buffers
)
2073 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2074 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2075 gl_shader_stage stage
= stage_from_pipe(p_stage
);
2076 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
2078 for (unsigned i
= 0; i
< count
; i
++) {
2079 if (buffers
&& buffers
[i
].buffer
) {
2080 const struct pipe_shader_buffer
*buffer
= &buffers
[i
];
2081 struct iris_resource
*res
= (void *) buffer
->buffer
;
2082 pipe_resource_reference(&shs
->ssbo
[start_slot
+ i
], &res
->base
);
2084 // XXX: these are not retained forever, use a separate uploader?
2086 upload_state(ice
->state
.surface_uploader
,
2087 &shs
->ssbo_surface_state
[start_slot
+ i
],
2088 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
2089 if (!unlikely(map
)) {
2090 pipe_resource_reference(&shs
->ssbo
[start_slot
+ i
], NULL
);
2094 struct iris_bo
*surf_state_bo
=
2095 iris_resource_bo(shs
->ssbo_surface_state
[start_slot
+ i
].res
);
2096 shs
->ssbo_surface_state
[start_slot
+ i
].offset
+=
2097 iris_bo_offset_from_base_address(surf_state_bo
);
2099 isl_buffer_fill_state(&screen
->isl_dev
, map
,
2101 res
->bo
->gtt_offset
+ buffer
->buffer_offset
,
2102 .size_B
= buffer
->buffer_size
,
2103 .format
= ISL_FORMAT_RAW
,
2107 pipe_resource_reference(&shs
->ssbo
[start_slot
+ i
], NULL
);
2108 pipe_resource_reference(&shs
->ssbo_surface_state
[start_slot
+ i
].res
,
2113 ice
->state
.dirty
|= IRIS_DIRTY_BINDINGS_VS
<< stage
;
2117 iris_delete_state(struct pipe_context
*ctx
, void *state
)
2123 iris_free_vertex_buffers(struct iris_vertex_buffer_state
*cso
)
2125 for (unsigned i
= 0; i
< cso
->num_buffers
; i
++)
2126 pipe_resource_reference(&cso
->resources
[i
], NULL
);
2130 * The pipe->set_vertex_buffers() driver hook.
2132 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
2135 iris_set_vertex_buffers(struct pipe_context
*ctx
,
2136 unsigned start_slot
, unsigned count
,
2137 const struct pipe_vertex_buffer
*buffers
)
2139 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2140 struct iris_vertex_buffer_state
*cso
= &ice
->state
.genx
->vertex_buffers
;
2142 iris_free_vertex_buffers(&ice
->state
.genx
->vertex_buffers
);
2147 cso
->num_buffers
= count
;
2149 iris_pack_command(GENX(3DSTATE_VERTEX_BUFFERS
), cso
->vertex_buffers
, vb
) {
2150 vb
.DWordLength
= 4 * MAX2(cso
->num_buffers
, 1) - 1;
2153 uint32_t *vb_pack_dest
= &cso
->vertex_buffers
[1];
2156 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), vb_pack_dest
, vb
) {
2157 vb
.VertexBufferIndex
= start_slot
;
2158 vb
.NullVertexBuffer
= true;
2159 vb
.AddressModifyEnable
= true;
2163 for (unsigned i
= 0; i
< count
; i
++) {
2164 assert(!buffers
[i
].is_user_buffer
);
2166 pipe_resource_reference(&cso
->resources
[i
], buffers
[i
].buffer
.resource
);
2167 struct iris_resource
*res
= (void *) cso
->resources
[i
];
2169 iris_pack_state(GENX(VERTEX_BUFFER_STATE
), vb_pack_dest
, vb
) {
2170 vb
.VertexBufferIndex
= start_slot
+ i
;
2172 vb
.AddressModifyEnable
= true;
2173 vb
.BufferPitch
= buffers
[i
].stride
;
2174 vb
.BufferSize
= res
->bo
->size
;
2175 vb
.BufferStartingAddress
=
2176 ro_bo(NULL
, res
->bo
->gtt_offset
+ buffers
[i
].buffer_offset
);
2179 vb_pack_dest
+= GENX(VERTEX_BUFFER_STATE_length
);
2182 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_BUFFERS
;
2186 * Gallium CSO for vertex elements.
2188 struct iris_vertex_element_state
{
2189 uint32_t vertex_elements
[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length
)];
2190 uint32_t vf_instancing
[33 * GENX(3DSTATE_VF_INSTANCING_length
)];
2195 * The pipe->create_vertex_elements() driver hook.
2197 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
2198 * and 3DSTATE_VF_INSTANCING commands. SGVs are handled at draw time.
2201 iris_create_vertex_elements(struct pipe_context
*ctx
,
2203 const struct pipe_vertex_element
*state
)
2205 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2206 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2207 struct iris_vertex_element_state
*cso
=
2208 malloc(sizeof(struct iris_vertex_element_state
));
2213 * - create edge flag one
2215 * - if those are necessary, use count + 1/2/3... OR in the length
2217 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS
), cso
->vertex_elements
, ve
) {
2219 1 + GENX(VERTEX_ELEMENT_STATE_length
) * MAX2(count
, 1) - 2;
2222 uint32_t *ve_pack_dest
= &cso
->vertex_elements
[1];
2223 uint32_t *vfi_pack_dest
= cso
->vf_instancing
;
2226 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
2228 ve
.SourceElementFormat
= ISL_FORMAT_R32G32B32A32_FLOAT
;
2229 ve
.Component0Control
= VFCOMP_STORE_0
;
2230 ve
.Component1Control
= VFCOMP_STORE_0
;
2231 ve
.Component2Control
= VFCOMP_STORE_0
;
2232 ve
.Component3Control
= VFCOMP_STORE_1_FP
;
2235 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
2239 for (int i
= 0; i
< count
; i
++) {
2240 const struct iris_format_info fmt
=
2241 iris_format_for_usage(devinfo
, state
[i
].src_format
, 0);
2242 unsigned comp
[4] = { VFCOMP_STORE_SRC
, VFCOMP_STORE_SRC
,
2243 VFCOMP_STORE_SRC
, VFCOMP_STORE_SRC
};
2245 switch (isl_format_get_num_channels(fmt
.fmt
)) {
2246 case 0: comp
[0] = VFCOMP_STORE_0
;
2247 case 1: comp
[1] = VFCOMP_STORE_0
;
2248 case 2: comp
[2] = VFCOMP_STORE_0
;
2250 comp
[3] = isl_format_has_int_channel(fmt
.fmt
) ? VFCOMP_STORE_1_INT
2251 : VFCOMP_STORE_1_FP
;
2254 iris_pack_state(GENX(VERTEX_ELEMENT_STATE
), ve_pack_dest
, ve
) {
2255 ve
.VertexBufferIndex
= state
[i
].vertex_buffer_index
;
2257 ve
.SourceElementOffset
= state
[i
].src_offset
;
2258 ve
.SourceElementFormat
= fmt
.fmt
;
2259 ve
.Component0Control
= comp
[0];
2260 ve
.Component1Control
= comp
[1];
2261 ve
.Component2Control
= comp
[2];
2262 ve
.Component3Control
= comp
[3];
2265 iris_pack_command(GENX(3DSTATE_VF_INSTANCING
), vfi_pack_dest
, vi
) {
2266 vi
.VertexElementIndex
= i
;
2267 vi
.InstancingEnable
= state
[i
].instance_divisor
> 0;
2268 vi
.InstanceDataStepRate
= state
[i
].instance_divisor
;
2271 ve_pack_dest
+= GENX(VERTEX_ELEMENT_STATE_length
);
2272 vfi_pack_dest
+= GENX(3DSTATE_VF_INSTANCING_length
);
2279 * The pipe->bind_vertex_elements_state() driver hook.
2282 iris_bind_vertex_elements_state(struct pipe_context
*ctx
, void *state
)
2284 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2285 struct iris_vertex_element_state
*old_cso
= ice
->state
.cso_vertex_elements
;
2286 struct iris_vertex_element_state
*new_cso
= state
;
2288 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
2289 * we need to re-emit it to ensure we're overriding the right one.
2291 if (new_cso
&& cso_changed(count
))
2292 ice
->state
.dirty
|= IRIS_DIRTY_VF_SGVS
;
2294 ice
->state
.cso_vertex_elements
= state
;
2295 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_ELEMENTS
;
2299 * Gallium CSO for stream output (transform feedback) targets.
2301 struct iris_stream_output_target
{
2302 struct pipe_stream_output_target base
;
2304 uint32_t so_buffer
[GENX(3DSTATE_SO_BUFFER_length
)];
2306 /** Storage holding the offset where we're writing in the buffer */
2307 struct iris_state_ref offset
;
2311 * The pipe->create_stream_output_target() driver hook.
2313 * "Target" here refers to a destination buffer. We translate this into
2314 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
2315 * know which buffer this represents, or whether we ought to zero the
2316 * write-offsets, or append. Those are handled in the set() hook.
2318 static struct pipe_stream_output_target
*
2319 iris_create_stream_output_target(struct pipe_context
*ctx
,
2320 struct pipe_resource
*res
,
2321 unsigned buffer_offset
,
2322 unsigned buffer_size
)
2324 struct iris_stream_output_target
*cso
= calloc(1, sizeof(*cso
));
2328 pipe_reference_init(&cso
->base
.reference
, 1);
2329 pipe_resource_reference(&cso
->base
.buffer
, res
);
2330 cso
->base
.buffer_offset
= buffer_offset
;
2331 cso
->base
.buffer_size
= buffer_size
;
2332 cso
->base
.context
= ctx
;
2334 upload_state(ctx
->stream_uploader
, &cso
->offset
, 4 * sizeof(uint32_t), 4);
2336 iris_pack_command(GENX(3DSTATE_SO_BUFFER
), cso
->so_buffer
, sob
) {
2337 sob
.SurfaceBaseAddress
=
2338 rw_bo(NULL
, iris_resource_bo(res
)->gtt_offset
+ buffer_offset
);
2339 sob
.SOBufferEnable
= true;
2340 sob
.StreamOffsetWriteEnable
= true;
2341 sob
.StreamOutputBufferOffsetAddressEnable
= true;
2342 sob
.MOCS
= MOCS_WB
; // XXX: MOCS
2344 sob
.SurfaceSize
= MAX2(buffer_size
/ 4, 1) - 1;
2346 /* .SOBufferIndex, .StreamOffset, and .StreamOutputBufferOffsetAddress
2347 * are filled in later when we have stream IDs.
2355 iris_stream_output_target_destroy(struct pipe_context
*ctx
,
2356 struct pipe_stream_output_target
*state
)
2358 struct iris_stream_output_target
*cso
= (void *) state
;
2360 pipe_resource_reference(&cso
->base
.buffer
, NULL
);
2361 pipe_resource_reference(&cso
->offset
.res
, NULL
);
2367 * The pipe->set_stream_output_targets() driver hook.
2369 * At this point, we know which targets are bound to a particular index,
2370 * and also whether we want to append or start over. We can finish the
2371 * 3DSTATE_SO_BUFFER packets we started earlier.
2374 iris_set_stream_output_targets(struct pipe_context
*ctx
,
2375 unsigned num_targets
,
2376 struct pipe_stream_output_target
**targets
,
2377 const unsigned *offsets
)
2379 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2380 struct iris_genx_state
*genx
= ice
->state
.genx
;
2381 uint32_t *so_buffers
= genx
->so_buffers
;
2383 const bool active
= num_targets
> 0;
2384 if (ice
->state
.streamout_active
!= active
) {
2385 ice
->state
.streamout_active
= active
;
2386 ice
->state
.dirty
|= IRIS_DIRTY_STREAMOUT
;
2389 for (int i
= 0; i
< 4; i
++) {
2390 pipe_so_target_reference(&ice
->state
.so_target
[i
],
2391 i
< num_targets
? targets
[i
] : NULL
);
2394 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
2398 for (unsigned i
= 0; i
< 4; i
++,
2399 so_buffers
+= GENX(3DSTATE_SO_BUFFER_length
)) {
2401 if (i
>= num_targets
|| !targets
[i
]) {
2402 iris_pack_command(GENX(3DSTATE_SO_BUFFER
), so_buffers
, sob
)
2403 sob
.SOBufferIndex
= i
;
2407 struct iris_stream_output_target
*tgt
= (void *) targets
[i
];
2409 /* Note that offsets[i] will either be 0, causing us to zero
2410 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
2411 * "continue appending at the existing offset."
2413 assert(offsets
[i
] == 0 || offsets
[i
] == 0xFFFFFFFF);
2415 uint32_t dynamic
[GENX(3DSTATE_SO_BUFFER_length
)];
2416 iris_pack_state(GENX(3DSTATE_SO_BUFFER
), dynamic
, dyns
) {
2417 dyns
.SOBufferIndex
= i
;
2418 dyns
.StreamOffset
= offsets
[i
];
2419 dyns
.StreamOutputBufferOffsetAddress
=
2420 rw_bo(NULL
, iris_resource_bo(tgt
->offset
.res
)->gtt_offset
+ tgt
->offset
.offset
+ i
* sizeof(uint32_t));
2423 for (uint32_t j
= 0; j
< GENX(3DSTATE_SO_BUFFER_length
); j
++) {
2424 so_buffers
[j
] = tgt
->so_buffer
[j
] | dynamic
[j
];
2428 ice
->state
.dirty
|= IRIS_DIRTY_SO_BUFFERS
;
2432 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
2433 * 3DSTATE_STREAMOUT packets.
2435 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
2436 * hardware to record. We can create it entirely based on the shader, with
2437 * no dynamic state dependencies.
2439 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
2440 * state-based settings. We capture the shader-related ones here, and merge
2441 * the rest in at draw time.
2444 iris_create_so_decl_list(const struct pipe_stream_output_info
*info
,
2445 const struct brw_vue_map
*vue_map
)
2447 struct GENX(SO_DECL
) so_decl
[MAX_VERTEX_STREAMS
][128];
2448 int buffer_mask
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
2449 int next_offset
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
2450 int decls
[MAX_VERTEX_STREAMS
] = {0, 0, 0, 0};
2452 STATIC_ASSERT(ARRAY_SIZE(so_decl
[0]) >= MAX_PROGRAM_OUTPUTS
);
2454 memset(so_decl
, 0, sizeof(so_decl
));
2456 /* Construct the list of SO_DECLs to be emitted. The formatting of the
2457 * command feels strange -- each dword pair contains a SO_DECL per stream.
2459 for (unsigned i
= 0; i
< info
->num_outputs
; i
++) {
2460 const struct pipe_stream_output
*output
= &info
->output
[i
];
2461 const int buffer
= output
->output_buffer
;
2462 const int varying
= output
->register_index
;
2463 const unsigned stream_id
= output
->stream
;
2464 assert(stream_id
< MAX_VERTEX_STREAMS
);
2466 buffer_mask
[stream_id
] |= 1 << buffer
;
2468 assert(vue_map
->varying_to_slot
[varying
] >= 0);
2470 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
2471 * array. Instead, it simply increments DstOffset for the following
2472 * input by the number of components that should be skipped.
2474 * Our hardware is unusual in that it requires us to program SO_DECLs
2475 * for fake "hole" components, rather than simply taking the offset
2476 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
2477 * program as many size = 4 holes as we can, then a final hole to
2478 * accommodate the final 1, 2, or 3 remaining.
2480 int skip_components
= output
->dst_offset
- next_offset
[buffer
];
2482 while (skip_components
> 0) {
2483 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
2485 .OutputBufferSlot
= output
->output_buffer
,
2486 .ComponentMask
= (1 << MIN2(skip_components
, 4)) - 1,
2488 skip_components
-= 4;
2491 next_offset
[buffer
] = output
->dst_offset
+ output
->num_components
;
2493 so_decl
[stream_id
][decls
[stream_id
]++] = (struct GENX(SO_DECL
)) {
2494 .OutputBufferSlot
= output
->output_buffer
,
2495 .RegisterIndex
= vue_map
->varying_to_slot
[varying
],
2497 ((1 << output
->num_components
) - 1) << output
->start_component
,
2500 if (decls
[stream_id
] > max_decls
)
2501 max_decls
= decls
[stream_id
];
2504 unsigned dwords
= GENX(3DSTATE_STREAMOUT_length
) + (3 + 2 * max_decls
);
2505 uint32_t *map
= ralloc_size(NULL
, sizeof(uint32_t) * dwords
);
2506 uint32_t *so_decl_map
= map
+ GENX(3DSTATE_STREAMOUT_length
);
2508 iris_pack_command(GENX(3DSTATE_STREAMOUT
), map
, sol
) {
2509 int urb_entry_read_offset
= 0;
2510 int urb_entry_read_length
= (vue_map
->num_slots
+ 1) / 2 -
2511 urb_entry_read_offset
;
2513 /* We always read the whole vertex. This could be reduced at some
2514 * point by reading less and offsetting the register index in the
2517 sol
.Stream0VertexReadOffset
= urb_entry_read_offset
;
2518 sol
.Stream0VertexReadLength
= urb_entry_read_length
- 1;
2519 sol
.Stream1VertexReadOffset
= urb_entry_read_offset
;
2520 sol
.Stream1VertexReadLength
= urb_entry_read_length
- 1;
2521 sol
.Stream2VertexReadOffset
= urb_entry_read_offset
;
2522 sol
.Stream2VertexReadLength
= urb_entry_read_length
- 1;
2523 sol
.Stream3VertexReadOffset
= urb_entry_read_offset
;
2524 sol
.Stream3VertexReadLength
= urb_entry_read_length
- 1;
2526 /* Set buffer pitches; 0 means unbound. */
2527 sol
.Buffer0SurfacePitch
= 4 * info
->stride
[0];
2528 sol
.Buffer1SurfacePitch
= 4 * info
->stride
[1];
2529 sol
.Buffer2SurfacePitch
= 4 * info
->stride
[2];
2530 sol
.Buffer3SurfacePitch
= 4 * info
->stride
[3];
2533 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST
), so_decl_map
, list
) {
2534 list
.DWordLength
= 3 + 2 * max_decls
- 2;
2535 list
.StreamtoBufferSelects0
= buffer_mask
[0];
2536 list
.StreamtoBufferSelects1
= buffer_mask
[1];
2537 list
.StreamtoBufferSelects2
= buffer_mask
[2];
2538 list
.StreamtoBufferSelects3
= buffer_mask
[3];
2539 list
.NumEntries0
= decls
[0];
2540 list
.NumEntries1
= decls
[1];
2541 list
.NumEntries2
= decls
[2];
2542 list
.NumEntries3
= decls
[3];
2545 for (int i
= 0; i
< max_decls
; i
++) {
2546 iris_pack_state(GENX(SO_DECL_ENTRY
), so_decl_map
+ 3 + i
* 2, entry
) {
2547 entry
.Stream0Decl
= so_decl
[0][i
];
2548 entry
.Stream1Decl
= so_decl
[1][i
];
2549 entry
.Stream2Decl
= so_decl
[2][i
];
2550 entry
.Stream3Decl
= so_decl
[3][i
];
2558 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots
,
2559 const struct brw_vue_map
*last_vue_map
,
2560 bool two_sided_color
,
2561 unsigned *out_offset
,
2562 unsigned *out_length
)
2564 /* The compiler computes the first URB slot without considering COL/BFC
2565 * swizzling (because it doesn't know whether it's enabled), so we need
2566 * to do that here too. This may result in a smaller offset, which
2569 const unsigned first_slot
=
2570 brw_compute_first_urb_slot_required(fs_input_slots
, last_vue_map
);
2572 /* This becomes the URB read offset (counted in pairs of slots). */
2573 assert(first_slot
% 2 == 0);
2574 *out_offset
= first_slot
/ 2;
2576 /* We need to adjust the inputs read to account for front/back color
2577 * swizzling, as it can make the URB length longer.
2579 for (int c
= 0; c
<= 1; c
++) {
2580 if (fs_input_slots
& (VARYING_BIT_COL0
<< c
)) {
2581 /* If two sided color is enabled, the fragment shader's gl_Color
2582 * (COL0) input comes from either the gl_FrontColor (COL0) or
2583 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
2585 if (two_sided_color
)
2586 fs_input_slots
|= (VARYING_BIT_BFC0
<< c
);
2588 /* If front color isn't written, we opt to give them back color
2589 * instead of an undefined value. Switch from COL to BFC.
2591 if (last_vue_map
->varying_to_slot
[VARYING_SLOT_COL0
+ c
] == -1) {
2592 fs_input_slots
&= ~(VARYING_BIT_COL0
<< c
);
2593 fs_input_slots
|= (VARYING_BIT_BFC0
<< c
);
2598 /* Compute the minimum URB Read Length necessary for the FS inputs.
2600 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
2601 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
2603 * "This field should be set to the minimum length required to read the
2604 * maximum source attribute. The maximum source attribute is indicated
2605 * by the maximum value of the enabled Attribute # Source Attribute if
2606 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
2607 * enable is not set.
2608 * read_length = ceiling((max_source_attr + 1) / 2)
2610 * [errata] Corruption/Hang possible if length programmed larger than
2613 * Similar text exists for Ivy Bridge.
2615 * We find the last URB slot that's actually read by the FS.
2617 unsigned last_read_slot
= last_vue_map
->num_slots
- 1;
2618 while (last_read_slot
> first_slot
&& !(fs_input_slots
&
2619 (1ull << last_vue_map
->slot_to_varying
[last_read_slot
])))
2622 /* The URB read length is the difference of the two, counted in pairs. */
2623 *out_length
= DIV_ROUND_UP(last_read_slot
- first_slot
+ 1, 2);
2627 iris_emit_sbe_swiz(struct iris_batch
*batch
,
2628 const struct iris_context
*ice
,
2629 unsigned urb_read_offset
,
2630 unsigned sprite_coord_enables
)
2632 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) attr_overrides
[16] = {};
2633 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
2634 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
2635 const struct brw_vue_map
*vue_map
= ice
->shaders
.last_vue_map
;
2636 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
2638 /* XXX: this should be generated when putting programs in place */
2640 // XXX: raster->sprite_coord_enable
2642 for (int fs_attr
= 0; fs_attr
< VARYING_SLOT_MAX
; fs_attr
++) {
2643 const int input_index
= wm_prog_data
->urb_setup
[fs_attr
];
2644 if (input_index
< 0 || input_index
>= 16)
2647 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL
) *attr
=
2648 &attr_overrides
[input_index
];
2649 int slot
= vue_map
->varying_to_slot
[fs_attr
];
2651 /* Viewport and Layer are stored in the VUE header. We need to override
2652 * them to zero if earlier stages didn't write them, as GL requires that
2653 * they read back as zero when not explicitly set.
2656 case VARYING_SLOT_VIEWPORT
:
2657 case VARYING_SLOT_LAYER
:
2658 attr
->ComponentOverrideX
= true;
2659 attr
->ComponentOverrideW
= true;
2660 attr
->ConstantSource
= CONST_0000
;
2662 if (!(vue_map
->slots_valid
& VARYING_BIT_LAYER
))
2663 attr
->ComponentOverrideY
= true;
2664 if (!(vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
))
2665 attr
->ComponentOverrideZ
= true;
2668 case VARYING_SLOT_PRIMITIVE_ID
:
2669 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
2671 attr
->ComponentOverrideX
= true;
2672 attr
->ComponentOverrideY
= true;
2673 attr
->ComponentOverrideZ
= true;
2674 attr
->ComponentOverrideW
= true;
2675 attr
->ConstantSource
= PRIM_ID
;
2683 if (sprite_coord_enables
& (1 << input_index
))
2686 /* If there was only a back color written but not front, use back
2687 * as the color instead of undefined.
2689 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL0
)
2690 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC0
];
2691 if (slot
== -1 && fs_attr
== VARYING_SLOT_COL1
)
2692 slot
= vue_map
->varying_to_slot
[VARYING_SLOT_BFC1
];
2694 /* Not written by the previous stage - undefined. */
2696 attr
->ComponentOverrideX
= true;
2697 attr
->ComponentOverrideY
= true;
2698 attr
->ComponentOverrideZ
= true;
2699 attr
->ComponentOverrideW
= true;
2700 attr
->ConstantSource
= CONST_0001_FLOAT
;
2704 /* Compute the location of the attribute relative to the read offset,
2705 * which is counted in 256-bit increments (two 128-bit VUE slots).
2707 const int source_attr
= slot
- 2 * urb_read_offset
;
2708 assert(source_attr
>= 0 && source_attr
<= 32);
2709 attr
->SourceAttribute
= source_attr
;
2711 /* If we are doing two-sided color, and the VUE slot following this one
2712 * represents a back-facing color, then we need to instruct the SF unit
2713 * to do back-facing swizzling.
2715 if (cso_rast
->light_twoside
&&
2716 ((vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL0
&&
2717 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC0
) ||
2718 (vue_map
->slot_to_varying
[slot
] == VARYING_SLOT_COL1
&&
2719 vue_map
->slot_to_varying
[slot
+1] == VARYING_SLOT_BFC1
)))
2720 attr
->SwizzleSelect
= INPUTATTR_FACING
;
2723 iris_emit_cmd(batch
, GENX(3DSTATE_SBE_SWIZ
), sbes
) {
2724 for (int i
= 0; i
< 16; i
++)
2725 sbes
.Attribute
[i
] = attr_overrides
[i
];
2730 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data
*prog_data
,
2731 const struct iris_rasterizer_state
*cso
)
2733 unsigned overrides
= 0;
2735 if (prog_data
->urb_setup
[VARYING_SLOT_PNTC
] != -1)
2736 overrides
|= 1 << prog_data
->urb_setup
[VARYING_SLOT_PNTC
];
2738 for (int i
= 0; i
< 8; i
++) {
2739 if ((cso
->sprite_coord_enable
& (1 << i
)) &&
2740 prog_data
->urb_setup
[VARYING_SLOT_TEX0
+ i
] != -1)
2741 overrides
|= 1 << prog_data
->urb_setup
[VARYING_SLOT_TEX0
+ i
];
2748 iris_emit_sbe(struct iris_batch
*batch
, const struct iris_context
*ice
)
2750 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
2751 const struct brw_wm_prog_data
*wm_prog_data
= (void *)
2752 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
2753 const struct shader_info
*fs_info
=
2754 iris_get_shader_info(ice
, MESA_SHADER_FRAGMENT
);
2756 unsigned urb_read_offset
, urb_read_length
;
2757 iris_compute_sbe_urb_read_interval(fs_info
->inputs_read
,
2758 ice
->shaders
.last_vue_map
,
2759 cso_rast
->light_twoside
,
2760 &urb_read_offset
, &urb_read_length
);
2762 unsigned sprite_coord_overrides
=
2763 iris_calculate_point_sprite_overrides(wm_prog_data
, cso_rast
);
2765 iris_emit_cmd(batch
, GENX(3DSTATE_SBE
), sbe
) {
2766 sbe
.AttributeSwizzleEnable
= true;
2767 sbe
.NumberofSFOutputAttributes
= wm_prog_data
->num_varying_inputs
;
2768 sbe
.PointSpriteTextureCoordinateOrigin
= cso_rast
->sprite_coord_mode
;
2769 sbe
.VertexURBEntryReadOffset
= urb_read_offset
;
2770 sbe
.VertexURBEntryReadLength
= urb_read_length
;
2771 sbe
.ForceVertexURBEntryReadOffset
= true;
2772 sbe
.ForceVertexURBEntryReadLength
= true;
2773 sbe
.ConstantInterpolationEnable
= wm_prog_data
->flat_inputs
;
2774 sbe
.PointSpriteTextureCoordinateEnable
= sprite_coord_overrides
;
2776 for (int i
= 0; i
< 32; i
++) {
2777 sbe
.AttributeActiveComponentFormat
[i
] = ACTIVE_COMPONENT_XYZW
;
2781 iris_emit_sbe_swiz(batch
, ice
, urb_read_offset
, sprite_coord_overrides
);
2784 /* ------------------------------------------------------------------- */
2787 * Set sampler-related program key fields based on the current state.
2790 iris_populate_sampler_key(const struct iris_context
*ice
,
2791 struct brw_sampler_prog_key_data
*key
)
2793 for (int i
= 0; i
< MAX_SAMPLERS
; i
++) {
2794 key
->swizzles
[i
] = 0x688; /* XYZW */
2799 * Populate VS program key fields based on the current state.
2802 iris_populate_vs_key(const struct iris_context
*ice
,
2803 struct brw_vs_prog_key
*key
)
2805 iris_populate_sampler_key(ice
, &key
->tex
);
2809 * Populate TCS program key fields based on the current state.
2812 iris_populate_tcs_key(const struct iris_context
*ice
,
2813 struct brw_tcs_prog_key
*key
)
2815 iris_populate_sampler_key(ice
, &key
->tex
);
2819 * Populate TES program key fields based on the current state.
2822 iris_populate_tes_key(const struct iris_context
*ice
,
2823 struct brw_tes_prog_key
*key
)
2825 iris_populate_sampler_key(ice
, &key
->tex
);
2829 * Populate GS program key fields based on the current state.
2832 iris_populate_gs_key(const struct iris_context
*ice
,
2833 struct brw_gs_prog_key
*key
)
2835 iris_populate_sampler_key(ice
, &key
->tex
);
2839 * Populate FS program key fields based on the current state.
2842 iris_populate_fs_key(const struct iris_context
*ice
,
2843 struct brw_wm_prog_key
*key
)
2845 iris_populate_sampler_key(ice
, &key
->tex
);
2847 /* XXX: dirty flags? */
2848 const struct pipe_framebuffer_state
*fb
= &ice
->state
.framebuffer
;
2849 const struct iris_depth_stencil_alpha_state
*zsa
= ice
->state
.cso_zsa
;
2850 const struct iris_rasterizer_state
*rast
= ice
->state
.cso_rast
;
2851 const struct iris_blend_state
*blend
= ice
->state
.cso_blend
;
2853 key
->nr_color_regions
= fb
->nr_cbufs
;
2855 key
->clamp_fragment_color
= rast
->clamp_fragment_color
;
2857 key
->replicate_alpha
= fb
->nr_cbufs
> 1 &&
2858 (zsa
->alpha
.enabled
|| blend
->alpha_to_coverage
);
2860 /* XXX: only bother if COL0/1 are read */
2861 key
->flat_shade
= rast
->flatshade
;
2863 key
->persample_interp
= rast
->force_persample_interp
;
2864 key
->multisample_fbo
= rast
->multisample
&& fb
->samples
> 1;
2866 key
->coherent_fb_fetch
= true;
2868 // XXX: uint64_t input_slots_valid; - for >16 inputs
2870 // XXX: key->force_dual_color_blend for unigine
2871 // XXX: respect hint for high_quality_derivatives:1;
2875 iris_populate_cs_key(const struct iris_context
*ice
,
2876 struct brw_cs_prog_key
*key
)
2878 iris_populate_sampler_key(ice
, &key
->tex
);
2882 // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
2883 pkt
.SamplerCount
= \
2884 DIV_ROUND_UP(CLAMP(stage_state
->sampler_count
, 0, 16), 4); \
2885 pkt
.PerThreadScratchSpace
= prog_data
->total_scratch
== 0 ? 0 : \
2886 ffs(stage_state
->per_thread_scratch
) - 11; \
2891 KSP(const struct iris_compiled_shader
*shader
)
2893 struct iris_resource
*res
= (void *) shader
->assembly
.res
;
2894 return iris_bo_offset_from_base_address(res
->bo
) + shader
->assembly
.offset
;
2897 // Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
2898 // prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
2899 // this WA on C0 stepping.
2901 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
2902 pkt.KernelStartPointer = KSP(shader); \
2903 pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
2904 prog_data->binding_table.size_bytes / 4; \
2905 pkt.FloatingPointMode = prog_data->use_alt_mode; \
2907 pkt.DispatchGRFStartRegisterForURBData = \
2908 prog_data->dispatch_grf_start_reg; \
2909 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
2910 pkt.prefix##URBEntryReadOffset = 0; \
2912 pkt.StatisticsEnable = true; \
2916 * Encode most of 3DSTATE_VS based on the compiled shader.
2919 iris_store_vs_state(const struct gen_device_info
*devinfo
,
2920 struct iris_compiled_shader
*shader
)
2922 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
2923 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
2925 iris_pack_command(GENX(3DSTATE_VS
), shader
->derived_data
, vs
) {
2926 INIT_THREAD_DISPATCH_FIELDS(vs
, Vertex
);
2927 vs
.MaximumNumberofThreads
= devinfo
->max_vs_threads
- 1;
2928 vs
.SIMD8DispatchEnable
= true;
2929 vs
.UserClipDistanceCullTestEnableBitmask
=
2930 vue_prog_data
->cull_distance_mask
;
2935 * Encode most of 3DSTATE_HS based on the compiled shader.
2938 iris_store_tcs_state(const struct gen_device_info
*devinfo
,
2939 struct iris_compiled_shader
*shader
)
2941 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
2942 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
2943 struct brw_tcs_prog_data
*tcs_prog_data
= (void *) prog_data
;
2945 iris_pack_command(GENX(3DSTATE_HS
), shader
->derived_data
, hs
) {
2946 INIT_THREAD_DISPATCH_FIELDS(hs
, Vertex
);
2948 hs
.InstanceCount
= tcs_prog_data
->instances
- 1;
2949 hs
.MaximumNumberofThreads
= devinfo
->max_tcs_threads
- 1;
2950 hs
.IncludeVertexHandles
= true;
2955 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
2958 iris_store_tes_state(const struct gen_device_info
*devinfo
,
2959 struct iris_compiled_shader
*shader
)
2961 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
2962 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
2963 struct brw_tes_prog_data
*tes_prog_data
= (void *) prog_data
;
2965 uint32_t *te_state
= (void *) shader
->derived_data
;
2966 uint32_t *ds_state
= te_state
+ GENX(3DSTATE_TE_length
);
2968 iris_pack_command(GENX(3DSTATE_TE
), te_state
, te
) {
2969 te
.Partitioning
= tes_prog_data
->partitioning
;
2970 te
.OutputTopology
= tes_prog_data
->output_topology
;
2971 te
.TEDomain
= tes_prog_data
->domain
;
2973 te
.MaximumTessellationFactorOdd
= 63.0;
2974 te
.MaximumTessellationFactorNotOdd
= 64.0;
2977 iris_pack_command(GENX(3DSTATE_DS
), ds_state
, ds
) {
2978 INIT_THREAD_DISPATCH_FIELDS(ds
, Patch
);
2980 ds
.DispatchMode
= DISPATCH_MODE_SIMD8_SINGLE_PATCH
;
2981 ds
.MaximumNumberofThreads
= devinfo
->max_tes_threads
- 1;
2982 ds
.ComputeWCoordinateEnable
=
2983 tes_prog_data
->domain
== BRW_TESS_DOMAIN_TRI
;
2985 ds
.UserClipDistanceCullTestEnableBitmask
=
2986 vue_prog_data
->cull_distance_mask
;
2992 * Encode most of 3DSTATE_GS based on the compiled shader.
2995 iris_store_gs_state(const struct gen_device_info
*devinfo
,
2996 struct iris_compiled_shader
*shader
)
2998 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
2999 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
3000 struct brw_gs_prog_data
*gs_prog_data
= (void *) prog_data
;
3002 iris_pack_command(GENX(3DSTATE_GS
), shader
->derived_data
, gs
) {
3003 INIT_THREAD_DISPATCH_FIELDS(gs
, Vertex
);
3005 gs
.OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1;
3006 gs
.OutputTopology
= gs_prog_data
->output_topology
;
3007 gs
.ControlDataHeaderSize
=
3008 gs_prog_data
->control_data_header_size_hwords
;
3009 gs
.InstanceControl
= gs_prog_data
->invocations
- 1;
3010 gs
.DispatchMode
= DISPATCH_MODE_SIMD8
;
3011 gs
.IncludePrimitiveID
= gs_prog_data
->include_primitive_id
;
3012 gs
.ControlDataFormat
= gs_prog_data
->control_data_format
;
3013 gs
.ReorderMode
= TRAILING
;
3014 gs
.ExpectedVertexCount
= gs_prog_data
->vertices_in
;
3015 gs
.MaximumNumberofThreads
=
3016 GEN_GEN
== 8 ? (devinfo
->max_gs_threads
/ 2 - 1)
3017 : (devinfo
->max_gs_threads
- 1);
3019 if (gs_prog_data
->static_vertex_count
!= -1) {
3020 gs
.StaticOutput
= true;
3021 gs
.StaticOutputVertexCount
= gs_prog_data
->static_vertex_count
;
3023 gs
.IncludeVertexHandles
= vue_prog_data
->include_vue_handles
;
3025 gs
.UserClipDistanceCullTestEnableBitmask
=
3026 vue_prog_data
->cull_distance_mask
;
3028 const int urb_entry_write_offset
= 1;
3029 const uint32_t urb_entry_output_length
=
3030 DIV_ROUND_UP(vue_prog_data
->vue_map
.num_slots
, 2) -
3031 urb_entry_write_offset
;
3033 gs
.VertexURBEntryOutputReadOffset
= urb_entry_write_offset
;
3034 gs
.VertexURBEntryOutputLength
= MAX2(urb_entry_output_length
, 1);
3039 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
3042 iris_store_fs_state(const struct gen_device_info
*devinfo
,
3043 struct iris_compiled_shader
*shader
)
3045 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
3046 struct brw_wm_prog_data
*wm_prog_data
= (void *) shader
->prog_data
;
3048 uint32_t *ps_state
= (void *) shader
->derived_data
;
3049 uint32_t *psx_state
= ps_state
+ GENX(3DSTATE_PS_length
);
3051 iris_pack_command(GENX(3DSTATE_PS
), ps_state
, ps
) {
3052 ps
.VectorMaskEnable
= true;
3053 //ps.SamplerCount = ...
3054 // XXX: WABTPPrefetchDisable, see above, drop at C0
3055 ps
.BindingTableEntryCount
= GEN_GEN
== 11 ? 0 :
3056 prog_data
->binding_table
.size_bytes
/ 4;
3057 ps
.FloatingPointMode
= prog_data
->use_alt_mode
;
3058 ps
.MaximumNumberofThreadsPerPSD
= 64 - (GEN_GEN
== 8 ? 2 : 1);
3060 ps
.PushConstantEnable
= prog_data
->nr_params
> 0 ||
3061 prog_data
->ubo_ranges
[0].length
> 0;
3063 /* From the documentation for this packet:
3064 * "If the PS kernel does not need the Position XY Offsets to
3065 * compute a Position Value, then this field should be programmed
3066 * to POSOFFSET_NONE."
3068 * "SW Recommendation: If the PS kernel needs the Position Offsets
3069 * to compute a Position XY value, this field should match Position
3070 * ZW Interpolation Mode to ensure a consistent position.xyzw
3073 * We only require XY sample offsets. So, this recommendation doesn't
3074 * look useful at the moment. We might need this in future.
3076 ps
.PositionXYOffsetSelect
=
3077 wm_prog_data
->uses_pos_offset
? POSOFFSET_SAMPLE
: POSOFFSET_NONE
;
3078 ps
._8PixelDispatchEnable
= wm_prog_data
->dispatch_8
;
3079 ps
._16PixelDispatchEnable
= wm_prog_data
->dispatch_16
;
3080 ps
._32PixelDispatchEnable
= wm_prog_data
->dispatch_32
;
3082 // XXX: Disable SIMD32 with 16x MSAA
3084 ps
.DispatchGRFStartRegisterForConstantSetupData0
=
3085 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 0);
3086 ps
.DispatchGRFStartRegisterForConstantSetupData1
=
3087 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 1);
3088 ps
.DispatchGRFStartRegisterForConstantSetupData2
=
3089 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data
, ps
, 2);
3091 ps
.KernelStartPointer0
=
3092 KSP(shader
) + brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 0);
3093 ps
.KernelStartPointer1
=
3094 KSP(shader
) + brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 1);
3095 ps
.KernelStartPointer2
=
3096 KSP(shader
) + brw_wm_prog_data_prog_offset(wm_prog_data
, ps
, 2);
3099 iris_pack_command(GENX(3DSTATE_PS_EXTRA
), psx_state
, psx
) {
3100 psx
.PixelShaderValid
= true;
3101 psx
.PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
;
3102 psx
.PixelShaderKillsPixel
= wm_prog_data
->uses_kill
;
3103 psx
.AttributeEnable
= wm_prog_data
->num_varying_inputs
!= 0;
3104 psx
.PixelShaderUsesSourceDepth
= wm_prog_data
->uses_src_depth
;
3105 psx
.PixelShaderUsesSourceW
= wm_prog_data
->uses_src_w
;
3106 psx
.PixelShaderIsPerSample
= wm_prog_data
->persample_dispatch
;
3108 if (wm_prog_data
->uses_sample_mask
) {
3109 /* TODO: conservative rasterization */
3110 if (wm_prog_data
->post_depth_coverage
)
3111 psx
.InputCoverageMaskState
= ICMS_DEPTH_COVERAGE
;
3113 psx
.InputCoverageMaskState
= ICMS_NORMAL
;
3116 psx
.oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
;
3117 psx
.PixelShaderPullsBary
= wm_prog_data
->pulls_bary
;
3118 psx
.PixelShaderComputesStencil
= wm_prog_data
->computed_stencil
;
3125 * Compute the size of the derived data (shader command packets).
3127 * This must match the data written by the iris_store_xs_state() functions.
3130 iris_store_cs_state(const struct gen_device_info
*devinfo
,
3131 struct iris_compiled_shader
*shader
)
3133 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
3134 struct brw_cs_prog_data
*cs_prog_data
= (void *) shader
->prog_data
;
3135 void *map
= shader
->derived_data
;
3137 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA
), map
, desc
) {
3138 desc
.KernelStartPointer
= KSP(shader
);
3139 desc
.ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
;
3140 desc
.NumberofThreadsinGPGPUThreadGroup
= cs_prog_data
->threads
;
3141 desc
.SharedLocalMemorySize
=
3142 encode_slm_size(GEN_GEN
, prog_data
->total_shared
);
3143 desc
.BarrierEnable
= cs_prog_data
->uses_barrier
;
3144 desc
.CrossThreadConstantDataReadLength
=
3145 cs_prog_data
->push
.cross_thread
.regs
;
3150 iris_derived_program_state_size(enum iris_program_cache_id cache_id
)
3152 assert(cache_id
<= IRIS_CACHE_BLORP
);
3154 static const unsigned dwords
[] = {
3155 [IRIS_CACHE_VS
] = GENX(3DSTATE_VS_length
),
3156 [IRIS_CACHE_TCS
] = GENX(3DSTATE_HS_length
),
3157 [IRIS_CACHE_TES
] = GENX(3DSTATE_TE_length
) + GENX(3DSTATE_DS_length
),
3158 [IRIS_CACHE_GS
] = GENX(3DSTATE_GS_length
),
3160 GENX(3DSTATE_PS_length
) + GENX(3DSTATE_PS_EXTRA_length
),
3161 [IRIS_CACHE_CS
] = GENX(INTERFACE_DESCRIPTOR_DATA_length
),
3162 [IRIS_CACHE_BLORP
] = 0,
3165 return sizeof(uint32_t) * dwords
[cache_id
];
3169 * Create any state packets corresponding to the given shader stage
3170 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
3171 * This means that we can look up a program in the in-memory cache and
3172 * get most of the state packet without having to reconstruct it.
3175 iris_store_derived_program_state(const struct gen_device_info
*devinfo
,
3176 enum iris_program_cache_id cache_id
,
3177 struct iris_compiled_shader
*shader
)
3181 iris_store_vs_state(devinfo
, shader
);
3183 case IRIS_CACHE_TCS
:
3184 iris_store_tcs_state(devinfo
, shader
);
3186 case IRIS_CACHE_TES
:
3187 iris_store_tes_state(devinfo
, shader
);
3190 iris_store_gs_state(devinfo
, shader
);
3193 iris_store_fs_state(devinfo
, shader
);
3196 iris_store_cs_state(devinfo
, shader
);
3197 case IRIS_CACHE_BLORP
:
3204 /* ------------------------------------------------------------------- */
3207 * Configure the URB.
3209 * XXX: write a real comment.
3212 iris_upload_urb_config(struct iris_context
*ice
, struct iris_batch
*batch
)
3214 const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
3215 const unsigned push_size_kB
= 32;
3216 unsigned entries
[4];
3220 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
3221 if (!ice
->shaders
.prog
[i
]) {
3224 struct brw_vue_prog_data
*vue_prog_data
=
3225 (void *) ice
->shaders
.prog
[i
]->prog_data
;
3226 size
[i
] = vue_prog_data
->urb_entry_size
;
3228 assert(size
[i
] != 0);
3231 gen_get_urb_config(devinfo
, 1024 * push_size_kB
,
3232 1024 * ice
->shaders
.urb_size
,
3233 ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
] != NULL
,
3234 ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
] != NULL
,
3235 size
, entries
, start
);
3237 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
3238 iris_emit_cmd(batch
, GENX(3DSTATE_URB_VS
), urb
) {
3239 urb
._3DCommandSubOpcode
+= i
;
3240 urb
.VSURBStartingAddress
= start
[i
];
3241 urb
.VSURBEntryAllocationSize
= size
[i
] - 1;
3242 urb
.VSNumberofURBEntries
= entries
[i
];
3247 static const uint32_t push_constant_opcodes
[] = {
3248 [MESA_SHADER_VERTEX
] = 21,
3249 [MESA_SHADER_TESS_CTRL
] = 25, /* HS */
3250 [MESA_SHADER_TESS_EVAL
] = 26, /* DS */
3251 [MESA_SHADER_GEOMETRY
] = 22,
3252 [MESA_SHADER_FRAGMENT
] = 23,
3253 [MESA_SHADER_COMPUTE
] = 0,
3257 use_null_surface(struct iris_batch
*batch
, struct iris_context
*ice
)
3259 struct iris_bo
*state_bo
= iris_resource_bo(ice
->state
.unbound_tex
.res
);
3261 iris_use_pinned_bo(batch
, state_bo
, false);
3263 return ice
->state
.unbound_tex
.offset
;
3267 use_null_fb_surface(struct iris_batch
*batch
, struct iris_context
*ice
)
3269 struct iris_bo
*state_bo
= iris_resource_bo(ice
->state
.null_fb
.res
);
3271 iris_use_pinned_bo(batch
, state_bo
, false);
3273 return ice
->state
.null_fb
.offset
;
3277 * Add a surface to the validation list, as well as the buffer containing
3278 * the corresponding SURFACE_STATE.
3280 * Returns the binding table entry (offset to SURFACE_STATE).
3283 use_surface(struct iris_batch
*batch
,
3284 struct pipe_surface
*p_surf
,
3287 struct iris_surface
*surf
= (void *) p_surf
;
3289 iris_use_pinned_bo(batch
, iris_resource_bo(p_surf
->texture
), writeable
);
3290 iris_use_pinned_bo(batch
, iris_resource_bo(surf
->surface_state
.res
), false);
3292 return surf
->surface_state
.offset
;
3296 use_sampler_view(struct iris_batch
*batch
, struct iris_sampler_view
*isv
)
3298 iris_use_pinned_bo(batch
, isv
->res
->bo
, false);
3299 iris_use_pinned_bo(batch
, iris_resource_bo(isv
->surface_state
.res
), false);
3301 return isv
->surface_state
.offset
;
3305 use_const_buffer(struct iris_batch
*batch
,
3306 struct iris_context
*ice
,
3307 struct iris_const_buffer
*cbuf
)
3309 if (!cbuf
->surface_state
.res
)
3310 return use_null_surface(batch
, ice
);
3312 iris_use_pinned_bo(batch
, iris_resource_bo(cbuf
->data
.res
), false);
3313 iris_use_pinned_bo(batch
, iris_resource_bo(cbuf
->surface_state
.res
), false);
3315 return cbuf
->surface_state
.offset
;
3319 use_ssbo(struct iris_batch
*batch
, struct iris_context
*ice
,
3320 struct iris_shader_state
*shs
, int i
)
3323 return use_null_surface(batch
, ice
);
3325 struct iris_state_ref
*surf_state
= &shs
->ssbo_surface_state
[i
];
3327 iris_use_pinned_bo(batch
, iris_resource_bo(shs
->ssbo
[i
]), true);
3328 iris_use_pinned_bo(batch
, iris_resource_bo(surf_state
->res
), false);
3330 return surf_state
->offset
;
3334 use_image(struct iris_batch
*batch
, struct iris_context
*ice
,
3335 struct iris_shader_state
*shs
, int i
)
3337 if (!shs
->image
[i
].res
)
3338 return use_null_surface(batch
, ice
);
3340 struct iris_state_ref
*surf_state
= &shs
->image
[i
].surface_state
;
3342 iris_use_pinned_bo(batch
, iris_resource_bo(shs
->image
[i
].res
),
3343 shs
->image
[i
].access
& PIPE_IMAGE_ACCESS_WRITE
);
3344 iris_use_pinned_bo(batch
, iris_resource_bo(surf_state
->res
), false);
3346 return surf_state
->offset
;
3349 #define push_bt_entry(addr) \
3350 assert(addr >= binder_addr); \
3351 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
3354 * Populate the binding table for a given shader stage.
3356 * This fills out the table of pointers to surfaces required by the shader,
3357 * and also adds those buffers to the validation list so the kernel can make
3358 * resident before running our batch.
3361 iris_populate_binding_table(struct iris_context
*ice
,
3362 struct iris_batch
*batch
,
3363 gl_shader_stage stage
,
3365 struct iris_state_ref
*grid_size_surf
)
3367 const struct iris_binder
*binder
= &ice
->state
.binder
;
3368 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3372 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3373 uint32_t binder_addr
= binder
->bo
->gtt_offset
;
3375 //struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
3376 uint32_t *bt_map
= binder
->map
+ binder
->bt_offset
[stage
];
3379 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
3381 /* TCS passthrough doesn't need a binding table. */
3382 assert(stage
== MESA_SHADER_TESS_CTRL
);
3386 if (stage
== MESA_SHADER_COMPUTE
) {
3387 /* surface for gl_NumWorkGroups */
3388 assert(grid_size_surf
|| pin_only
);
3389 if (grid_size_surf
) {
3390 struct iris_bo
*bo
= iris_resource_bo(grid_size_surf
->res
);
3391 iris_use_pinned_bo(batch
, bo
, false);
3392 push_bt_entry(grid_size_surf
->offset
);
3396 if (stage
== MESA_SHADER_FRAGMENT
) {
3397 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
3398 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
3399 if (cso_fb
->nr_cbufs
) {
3400 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
3402 cso_fb
->cbufs
[i
] ? use_surface(batch
, cso_fb
->cbufs
[i
], true)
3403 : use_null_fb_surface(batch
, ice
);
3404 push_bt_entry(addr
);
3407 uint32_t addr
= use_null_fb_surface(batch
, ice
);
3408 push_bt_entry(addr
);
3412 //assert(prog_data->binding_table.texture_start ==
3413 //(ice->state.num_textures[stage] ? s : 0xd0d0d0d0));
3415 for (int i
= 0; i
< shs
->num_textures
; i
++) {
3416 struct iris_sampler_view
*view
= shs
->textures
[i
];
3417 uint32_t addr
= view
? use_sampler_view(batch
, view
)
3418 : use_null_surface(batch
, ice
);
3419 push_bt_entry(addr
);
3422 for (int i
= 0; i
< info
->num_images
; i
++) {
3423 uint32_t addr
= use_image(batch
, ice
, shs
, i
);
3424 push_bt_entry(addr
);
3427 const int num_ubos
= iris_get_shader_num_ubos(ice
, stage
);
3429 for (int i
= 0; i
< num_ubos
; i
++) {
3430 uint32_t addr
= use_const_buffer(batch
, ice
, &shs
->constbuf
[i
]);
3431 push_bt_entry(addr
);
3434 /* XXX: st is wasting 16 binding table slots for ABOs. Should add a cap
3435 * for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
3436 * in st_atom_storagebuf.c so it'll compact them into one range, with
3437 * SSBOs starting at info->num_abos. Ideally it'd reset num_abos to 0 too
3439 if (info
->num_abos
+ info
->num_ssbos
> 0) {
3440 for (int i
= 0; i
< IRIS_MAX_ABOS
+ info
->num_ssbos
; i
++) {
3441 uint32_t addr
= use_ssbo(batch
, ice
, shs
, i
);
3442 push_bt_entry(addr
);
3447 // XXX: not implemented yet
3448 assert(prog_data
->binding_table
.plane_start
[1] == 0xd0d0d0d0);
3449 assert(prog_data
->binding_table
.plane_start
[2] == 0xd0d0d0d0);
3454 iris_use_optional_res(struct iris_batch
*batch
,
3455 struct pipe_resource
*res
,
3459 struct iris_bo
*bo
= iris_resource_bo(res
);
3460 iris_use_pinned_bo(batch
, bo
, writeable
);
3464 /* ------------------------------------------------------------------- */
3467 * Pin any BOs which were installed by a previous batch, and restored
3468 * via the hardware logical context mechanism.
3470 * We don't need to re-emit all state every batch - the hardware context
3471 * mechanism will save and restore it for us. This includes pointers to
3472 * various BOs...which won't exist unless we ask the kernel to pin them
3473 * by adding them to the validation list.
3475 * We can skip buffers if we've re-emitted those packets, as we're
3476 * overwriting those stale pointers with new ones, and don't actually
3477 * refer to the old BOs.
3480 iris_restore_render_saved_bos(struct iris_context
*ice
,
3481 struct iris_batch
*batch
,
3482 const struct pipe_draw_info
*draw
)
3484 // XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch
3486 const uint64_t clean
= ~ice
->state
.dirty
;
3488 if (clean
& IRIS_DIRTY_CC_VIEWPORT
) {
3489 iris_use_optional_res(batch
, ice
->state
.last_res
.cc_vp
, false);
3492 if (clean
& IRIS_DIRTY_SF_CL_VIEWPORT
) {
3493 iris_use_optional_res(batch
, ice
->state
.last_res
.sf_cl_vp
, false);
3496 if (clean
& IRIS_DIRTY_BLEND_STATE
) {
3497 iris_use_optional_res(batch
, ice
->state
.last_res
.blend
, false);
3500 if (clean
& IRIS_DIRTY_COLOR_CALC_STATE
) {
3501 iris_use_optional_res(batch
, ice
->state
.last_res
.color_calc
, false);
3504 if (clean
& IRIS_DIRTY_SCISSOR_RECT
) {
3505 iris_use_optional_res(batch
, ice
->state
.last_res
.scissor
, false);
3508 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3509 if (!(clean
& (IRIS_DIRTY_CONSTANTS_VS
<< stage
)))
3512 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3513 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3518 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
3520 for (int i
= 0; i
< 4; i
++) {
3521 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[i
];
3523 if (range
->length
== 0)
3526 struct iris_const_buffer
*cbuf
= &shs
->constbuf
[range
->block
];
3527 struct iris_resource
*res
= (void *) cbuf
->data
.res
;
3530 iris_use_pinned_bo(batch
, res
->bo
, false);
3532 iris_use_pinned_bo(batch
, batch
->screen
->workaround_bo
, false);
3536 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3537 if (clean
& (IRIS_DIRTY_BINDINGS_VS
<< stage
)) {
3538 /* Re-pin any buffers referred to by the binding table. */
3539 iris_populate_binding_table(ice
, batch
, stage
, true, NULL
);
3543 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3544 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3545 struct pipe_resource
*res
= shs
->sampler_table
.res
;
3547 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false);
3550 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3551 if (clean
& (IRIS_DIRTY_VS
<< stage
)) {
3552 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3554 struct iris_bo
*bo
= iris_resource_bo(shader
->assembly
.res
);
3555 iris_use_pinned_bo(batch
, bo
, false);
3558 // XXX: scratch buffer
3562 if (clean
& IRIS_DIRTY_DEPTH_BUFFER
) {
3563 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
3565 if (cso_fb
->zsbuf
) {
3566 struct iris_resource
*zres
, *sres
;
3567 iris_get_depth_stencil_resources(cso_fb
->zsbuf
->texture
,
3569 // XXX: might not be writable...
3571 iris_use_pinned_bo(batch
, zres
->bo
, true);
3573 iris_use_pinned_bo(batch
, sres
->bo
, true);
3577 if (draw
->index_size
== 0 && ice
->state
.last_res
.index_buffer
) {
3578 /* This draw didn't emit a new index buffer, so we are inheriting the
3579 * older index buffer. This draw didn't need it, but future ones may.
3581 struct iris_bo
*bo
= iris_resource_bo(ice
->state
.last_res
.index_buffer
);
3582 iris_use_pinned_bo(batch
, bo
, false);
3585 if (clean
& IRIS_DIRTY_VERTEX_BUFFERS
) {
3586 struct iris_vertex_buffer_state
*cso
= &ice
->state
.genx
->vertex_buffers
;
3587 for (unsigned i
= 0; i
< cso
->num_buffers
; i
++) {
3588 struct iris_resource
*res
= (void *) cso
->resources
[i
];
3589 iris_use_pinned_bo(batch
, res
->bo
, false);
3595 iris_restore_compute_saved_bos(struct iris_context
*ice
,
3596 struct iris_batch
*batch
,
3597 const struct pipe_grid_info
*grid
)
3599 const uint64_t clean
= ~ice
->state
.dirty
;
3601 const int stage
= MESA_SHADER_COMPUTE
;
3602 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3604 if (clean
& IRIS_DIRTY_CONSTANTS_CS
) {
3605 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3608 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
3609 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[0];
3611 if (range
->length
> 0) {
3612 struct iris_const_buffer
*cbuf
= &shs
->constbuf
[range
->block
];
3613 struct iris_resource
*res
= (void *) cbuf
->data
.res
;
3616 iris_use_pinned_bo(batch
, res
->bo
, false);
3618 iris_use_pinned_bo(batch
, batch
->screen
->workaround_bo
, false);
3623 if (clean
& IRIS_DIRTY_BINDINGS_CS
) {
3624 /* Re-pin any buffers referred to by the binding table. */
3625 iris_populate_binding_table(ice
, batch
, stage
, true, NULL
);
3628 struct pipe_resource
*sampler_res
= shs
->sampler_table
.res
;
3630 iris_use_pinned_bo(batch
, iris_resource_bo(sampler_res
), false);
3632 if (clean
& IRIS_DIRTY_CS
) {
3633 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3635 struct iris_bo
*bo
= iris_resource_bo(shader
->assembly
.res
);
3636 iris_use_pinned_bo(batch
, bo
, false);
3639 // XXX: scratch buffer
3644 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
3647 iris_update_surface_base_address(struct iris_batch
*batch
,
3648 struct iris_binder
*binder
)
3650 if (batch
->last_surface_base_address
== binder
->bo
->gtt_offset
)
3653 flush_for_state_base_change(batch
);
3655 iris_emit_cmd(batch
, GENX(STATE_BASE_ADDRESS
), sba
) {
3656 // XXX: sba.SurfaceStateMemoryObjectControlState = MOCS_WB;
3657 sba
.SurfaceStateBaseAddressModifyEnable
= true;
3658 sba
.SurfaceStateBaseAddress
= ro_bo(binder
->bo
, 0);
3661 batch
->last_surface_base_address
= binder
->bo
->gtt_offset
;
3665 iris_upload_dirty_render_state(struct iris_context
*ice
,
3666 struct iris_batch
*batch
,
3667 const struct pipe_draw_info
*draw
)
3669 const uint64_t dirty
= ice
->state
.dirty
;
3674 struct iris_genx_state
*genx
= ice
->state
.genx
;
3675 struct iris_binder
*binder
= &ice
->state
.binder
;
3676 struct brw_wm_prog_data
*wm_prog_data
= (void *)
3677 ice
->shaders
.prog
[MESA_SHADER_FRAGMENT
]->prog_data
;
3679 if (dirty
& IRIS_DIRTY_CC_VIEWPORT
) {
3680 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
3681 uint32_t cc_vp_address
;
3683 /* XXX: could avoid streaming for depth_clip [0,1] case. */
3684 uint32_t *cc_vp_map
=
3685 stream_state(batch
, ice
->state
.dynamic_uploader
,
3686 &ice
->state
.last_res
.cc_vp
,
3687 4 * ice
->state
.num_viewports
*
3688 GENX(CC_VIEWPORT_length
), 32, &cc_vp_address
);
3689 for (int i
= 0; i
< ice
->state
.num_viewports
; i
++) {
3691 util_viewport_zmin_zmax(&ice
->state
.viewports
[i
],
3692 cso_rast
->clip_halfz
, &zmin
, &zmax
);
3693 if (cso_rast
->depth_clip_near
)
3695 if (cso_rast
->depth_clip_far
)
3698 iris_pack_state(GENX(CC_VIEWPORT
), cc_vp_map
, ccv
) {
3699 ccv
.MinimumDepth
= zmin
;
3700 ccv
.MaximumDepth
= zmax
;
3703 cc_vp_map
+= GENX(CC_VIEWPORT_length
);
3706 iris_emit_cmd(batch
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC
), ptr
) {
3707 ptr
.CCViewportPointer
= cc_vp_address
;
3711 if (dirty
& IRIS_DIRTY_SF_CL_VIEWPORT
) {
3712 iris_emit_cmd(batch
, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
), ptr
) {
3713 ptr
.SFClipViewportPointer
=
3714 emit_state(batch
, ice
->state
.dynamic_uploader
,
3715 &ice
->state
.last_res
.sf_cl_vp
,
3716 genx
->sf_cl_vp
, 4 * GENX(SF_CLIP_VIEWPORT_length
) *
3717 ice
->state
.num_viewports
, 64);
3723 // XXX: this is only flagged at setup, we assume a static configuration
3724 if (dirty
& IRIS_DIRTY_URB
) {
3725 iris_upload_urb_config(ice
, batch
);
3728 if (dirty
& IRIS_DIRTY_BLEND_STATE
) {
3729 struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
3730 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
3731 struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
3732 const int header_dwords
= GENX(BLEND_STATE_length
);
3733 const int rt_dwords
= cso_fb
->nr_cbufs
* GENX(BLEND_STATE_ENTRY_length
);
3734 uint32_t blend_offset
;
3735 uint32_t *blend_map
=
3736 stream_state(batch
, ice
->state
.dynamic_uploader
,
3737 &ice
->state
.last_res
.blend
,
3738 4 * (header_dwords
+ rt_dwords
), 64, &blend_offset
);
3740 uint32_t blend_state_header
;
3741 iris_pack_state(GENX(BLEND_STATE
), &blend_state_header
, bs
) {
3742 bs
.AlphaTestEnable
= cso_zsa
->alpha
.enabled
;
3743 bs
.AlphaTestFunction
= translate_compare_func(cso_zsa
->alpha
.func
);
3746 blend_map
[0] = blend_state_header
| cso_blend
->blend_state
[0];
3747 memcpy(&blend_map
[1], &cso_blend
->blend_state
[1], 4 * rt_dwords
);
3749 iris_emit_cmd(batch
, GENX(3DSTATE_BLEND_STATE_POINTERS
), ptr
) {
3750 ptr
.BlendStatePointer
= blend_offset
;
3751 ptr
.BlendStatePointerValid
= true;
3755 if (dirty
& IRIS_DIRTY_COLOR_CALC_STATE
) {
3756 struct iris_depth_stencil_alpha_state
*cso
= ice
->state
.cso_zsa
;
3759 stream_state(batch
, ice
->state
.dynamic_uploader
,
3760 &ice
->state
.last_res
.color_calc
,
3761 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length
),
3763 iris_pack_state(GENX(COLOR_CALC_STATE
), cc_map
, cc
) {
3764 cc
.AlphaTestFormat
= ALPHATEST_FLOAT32
;
3765 cc
.AlphaReferenceValueAsFLOAT32
= cso
->alpha
.ref_value
;
3766 cc
.BlendConstantColorRed
= ice
->state
.blend_color
.color
[0];
3767 cc
.BlendConstantColorGreen
= ice
->state
.blend_color
.color
[1];
3768 cc
.BlendConstantColorBlue
= ice
->state
.blend_color
.color
[2];
3769 cc
.BlendConstantColorAlpha
= ice
->state
.blend_color
.color
[3];
3771 iris_emit_cmd(batch
, GENX(3DSTATE_CC_STATE_POINTERS
), ptr
) {
3772 ptr
.ColorCalcStatePointer
= cc_offset
;
3773 ptr
.ColorCalcStatePointerValid
= true;
3777 /* Upload constants for TCS passthrough. */
3778 if ((dirty
& IRIS_DIRTY_CONSTANTS_TCS
) &&
3779 ice
->shaders
.prog
[MESA_SHADER_TESS_CTRL
] &&
3780 !ice
->shaders
.uncompiled
[MESA_SHADER_TESS_CTRL
]) {
3781 struct iris_compiled_shader
*tes_shader
= ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
];
3784 /* Passthrough always copies 2 vec4s, so when uploading data we ensure
3785 * it is in the right layout for TES.
3788 struct brw_tes_prog_data
*tes_prog_data
= (void *) tes_shader
->prog_data
;
3789 switch (tes_prog_data
->domain
) {
3790 case BRW_TESS_DOMAIN_QUAD
:
3791 for (int i
= 0; i
< 4; i
++)
3792 hdr
[7 - i
] = ice
->state
.default_outer_level
[i
];
3793 hdr
[3] = ice
->state
.default_inner_level
[0];
3794 hdr
[2] = ice
->state
.default_inner_level
[1];
3796 case BRW_TESS_DOMAIN_TRI
:
3797 for (int i
= 0; i
< 3; i
++)
3798 hdr
[7 - i
] = ice
->state
.default_outer_level
[i
];
3799 hdr
[4] = ice
->state
.default_inner_level
[0];
3801 case BRW_TESS_DOMAIN_ISOLINE
:
3802 hdr
[7] = ice
->state
.default_outer_level
[1];
3803 hdr
[6] = ice
->state
.default_outer_level
[0];
3807 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_TESS_CTRL
];
3808 struct iris_const_buffer
*cbuf
= &shs
->constbuf
[0];
3809 u_upload_data(ice
->ctx
.const_uploader
, 0, sizeof(hdr
), 32,
3810 &hdr
[0], &cbuf
->data
.offset
,
3814 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3815 if (!(dirty
& (IRIS_DIRTY_CONSTANTS_VS
<< stage
)))
3818 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3819 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3824 struct brw_stage_prog_data
*prog_data
= (void *) shader
->prog_data
;
3826 iris_emit_cmd(batch
, GENX(3DSTATE_CONSTANT_VS
), pkt
) {
3827 pkt
._3DCommandSubOpcode
= push_constant_opcodes
[stage
];
3829 /* The Skylake PRM contains the following restriction:
3831 * "The driver must ensure The following case does not occur
3832 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
3833 * buffer 3 read length equal to zero committed followed by a
3834 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3837 * To avoid this, we program the buffers in the highest slots.
3838 * This way, slot 0 is only used if slot 3 is also used.
3842 for (int i
= 3; i
>= 0; i
--) {
3843 const struct brw_ubo_range
*range
= &prog_data
->ubo_ranges
[i
];
3845 if (range
->length
== 0)
3848 struct iris_const_buffer
*cbuf
= &shs
->constbuf
[range
->block
];
3849 struct iris_resource
*res
= (void *) cbuf
->data
.res
;
3851 assert(cbuf
->data
.offset
% 32 == 0);
3853 pkt
.ConstantBody
.ReadLength
[n
] = range
->length
;
3854 pkt
.ConstantBody
.Buffer
[n
] =
3855 res
? ro_bo(res
->bo
, range
->start
* 32 + cbuf
->data
.offset
)
3856 : ro_bo(batch
->screen
->workaround_bo
, 0);
3863 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3864 if (dirty
& (IRIS_DIRTY_BINDINGS_VS
<< stage
)) {
3865 iris_emit_cmd(batch
, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS
), ptr
) {
3866 ptr
._3DCommandSubOpcode
= 38 + stage
;
3867 ptr
.PointertoVSBindingTable
= binder
->bt_offset
[stage
];
3872 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3873 if (dirty
& (IRIS_DIRTY_BINDINGS_VS
<< stage
)) {
3874 iris_populate_binding_table(ice
, batch
, stage
, false, NULL
);
3878 if (ice
->state
.need_border_colors
)
3879 iris_use_pinned_bo(batch
, ice
->state
.border_color_pool
.bo
, false);
3881 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3882 if (!(dirty
& (IRIS_DIRTY_SAMPLER_STATES_VS
<< stage
)) ||
3883 !ice
->shaders
.prog
[stage
])
3886 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
3887 struct pipe_resource
*res
= shs
->sampler_table
.res
;
3889 iris_use_pinned_bo(batch
, iris_resource_bo(res
), false);
3891 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS
), ptr
) {
3892 ptr
._3DCommandSubOpcode
= 43 + stage
;
3893 ptr
.PointertoVSSamplerState
= shs
->sampler_table
.offset
;
3897 if (dirty
& IRIS_DIRTY_MULTISAMPLE
) {
3898 iris_emit_cmd(batch
, GENX(3DSTATE_MULTISAMPLE
), ms
) {
3900 ice
->state
.cso_rast
->half_pixel_center
? CENTER
: UL_CORNER
;
3901 if (ice
->state
.framebuffer
.samples
> 0)
3902 ms
.NumberofMultisamples
= ffs(ice
->state
.framebuffer
.samples
) - 1;
3906 if (dirty
& IRIS_DIRTY_SAMPLE_MASK
) {
3907 iris_emit_cmd(batch
, GENX(3DSTATE_SAMPLE_MASK
), ms
) {
3908 ms
.SampleMask
= MAX2(ice
->state
.sample_mask
, 1);
3912 for (int stage
= 0; stage
<= MESA_SHADER_FRAGMENT
; stage
++) {
3913 if (!(dirty
& (IRIS_DIRTY_VS
<< stage
)))
3916 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
3919 struct iris_resource
*cache
= (void *) shader
->assembly
.res
;
3920 iris_use_pinned_bo(batch
, cache
->bo
, false);
3921 iris_batch_emit(batch
, shader
->derived_data
,
3922 iris_derived_program_state_size(stage
));
3924 if (stage
== MESA_SHADER_TESS_EVAL
) {
3925 iris_emit_cmd(batch
, GENX(3DSTATE_HS
), hs
);
3926 iris_emit_cmd(batch
, GENX(3DSTATE_TE
), te
);
3927 iris_emit_cmd(batch
, GENX(3DSTATE_DS
), ds
);
3928 } else if (stage
== MESA_SHADER_GEOMETRY
) {
3929 iris_emit_cmd(batch
, GENX(3DSTATE_GS
), gs
);
3934 if (ice
->state
.streamout_active
) {
3935 if (dirty
& IRIS_DIRTY_SO_BUFFERS
) {
3936 iris_batch_emit(batch
, genx
->so_buffers
,
3937 4 * 4 * GENX(3DSTATE_SO_BUFFER_length
));
3938 for (int i
= 0; i
< 4; i
++) {
3939 struct iris_stream_output_target
*tgt
=
3940 (void *) ice
->state
.so_target
[i
];
3942 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->base
.buffer
),
3944 iris_use_pinned_bo(batch
, iris_resource_bo(tgt
->offset
.res
),
3950 if ((dirty
& IRIS_DIRTY_SO_DECL_LIST
) && ice
->state
.streamout
) {
3951 uint32_t *decl_list
=
3952 ice
->state
.streamout
+ GENX(3DSTATE_STREAMOUT_length
);
3953 iris_batch_emit(batch
, decl_list
, 4 * ((decl_list
[0] & 0xff) + 2));
3956 if (dirty
& IRIS_DIRTY_STREAMOUT
) {
3957 const struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
3959 uint32_t dynamic_sol
[GENX(3DSTATE_STREAMOUT_length
)];
3960 iris_pack_command(GENX(3DSTATE_STREAMOUT
), dynamic_sol
, sol
) {
3961 sol
.SOFunctionEnable
= true;
3962 sol
.SOStatisticsEnable
= true;
3964 sol
.RenderingDisable
= cso_rast
->rasterizer_discard
&&
3965 !ice
->state
.prims_generated_query_active
;
3966 sol
.ReorderMode
= cso_rast
->flatshade_first
? LEADING
: TRAILING
;
3969 assert(ice
->state
.streamout
);
3971 iris_emit_merge(batch
, ice
->state
.streamout
, dynamic_sol
,
3972 GENX(3DSTATE_STREAMOUT_length
));
3975 if (dirty
& IRIS_DIRTY_STREAMOUT
) {
3976 iris_emit_cmd(batch
, GENX(3DSTATE_STREAMOUT
), sol
);
3980 if (dirty
& IRIS_DIRTY_CLIP
) {
3981 struct iris_rasterizer_state
*cso_rast
= ice
->state
.cso_rast
;
3982 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
3984 uint32_t dynamic_clip
[GENX(3DSTATE_CLIP_length
)];
3985 iris_pack_command(GENX(3DSTATE_CLIP
), &dynamic_clip
, cl
) {
3986 if (wm_prog_data
->barycentric_interp_modes
&
3987 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS
)
3988 cl
.NonPerspectiveBarycentricEnable
= true;
3990 cl
.ForceZeroRTAIndexEnable
= cso_fb
->layers
== 0;
3991 cl
.MaximumVPIndex
= ice
->state
.num_viewports
- 1;
3993 iris_emit_merge(batch
, cso_rast
->clip
, dynamic_clip
,
3994 ARRAY_SIZE(cso_rast
->clip
));
3997 if (dirty
& IRIS_DIRTY_RASTER
) {
3998 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
3999 iris_batch_emit(batch
, cso
->raster
, sizeof(cso
->raster
));
4000 iris_batch_emit(batch
, cso
->sf
, sizeof(cso
->sf
));
4004 /* XXX: FS program updates needs to flag IRIS_DIRTY_WM */
4005 if (dirty
& IRIS_DIRTY_WM
) {
4006 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
4007 uint32_t dynamic_wm
[GENX(3DSTATE_WM_length
)];
4009 iris_pack_command(GENX(3DSTATE_WM
), &dynamic_wm
, wm
) {
4010 wm
.BarycentricInterpolationMode
=
4011 wm_prog_data
->barycentric_interp_modes
;
4013 if (wm_prog_data
->early_fragment_tests
)
4014 wm
.EarlyDepthStencilControl
= EDSC_PREPS
;
4015 else if (wm_prog_data
->has_side_effects
)
4016 wm
.EarlyDepthStencilControl
= EDSC_PSEXEC
;
4018 iris_emit_merge(batch
, cso
->wm
, dynamic_wm
, ARRAY_SIZE(cso
->wm
));
4021 if (dirty
& IRIS_DIRTY_SBE
) {
4022 iris_emit_sbe(batch
, ice
);
4025 if (dirty
& IRIS_DIRTY_PS_BLEND
) {
4026 struct iris_blend_state
*cso_blend
= ice
->state
.cso_blend
;
4027 struct iris_depth_stencil_alpha_state
*cso_zsa
= ice
->state
.cso_zsa
;
4028 uint32_t dynamic_pb
[GENX(3DSTATE_PS_BLEND_length
)];
4029 iris_pack_command(GENX(3DSTATE_PS_BLEND
), &dynamic_pb
, pb
) {
4030 pb
.HasWriteableRT
= true; // XXX: comes from somewhere :(
4031 pb
.AlphaTestEnable
= cso_zsa
->alpha
.enabled
;
4034 iris_emit_merge(batch
, cso_blend
->ps_blend
, dynamic_pb
,
4035 ARRAY_SIZE(cso_blend
->ps_blend
));
4038 if (dirty
& IRIS_DIRTY_WM_DEPTH_STENCIL
) {
4039 struct iris_depth_stencil_alpha_state
*cso
= ice
->state
.cso_zsa
;
4040 struct pipe_stencil_ref
*p_stencil_refs
= &ice
->state
.stencil_ref
;
4042 uint32_t stencil_refs
[GENX(3DSTATE_WM_DEPTH_STENCIL_length
)];
4043 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL
), &stencil_refs
, wmds
) {
4044 wmds
.StencilReferenceValue
= p_stencil_refs
->ref_value
[0];
4045 wmds
.BackfaceStencilReferenceValue
= p_stencil_refs
->ref_value
[1];
4047 iris_emit_merge(batch
, cso
->wmds
, stencil_refs
, ARRAY_SIZE(cso
->wmds
));
4050 if (dirty
& IRIS_DIRTY_SCISSOR_RECT
) {
4051 uint32_t scissor_offset
=
4052 emit_state(batch
, ice
->state
.dynamic_uploader
,
4053 &ice
->state
.last_res
.scissor
,
4054 ice
->state
.scissors
,
4055 sizeof(struct pipe_scissor_state
) *
4056 ice
->state
.num_viewports
, 32);
4058 iris_emit_cmd(batch
, GENX(3DSTATE_SCISSOR_STATE_POINTERS
), ptr
) {
4059 ptr
.ScissorRectPointer
= scissor_offset
;
4063 if (dirty
& IRIS_DIRTY_DEPTH_BUFFER
) {
4064 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
4065 struct iris_depth_buffer_state
*cso_z
= &ice
->state
.genx
->depth_buffer
;
4067 iris_batch_emit(batch
, cso_z
->packets
, sizeof(cso_z
->packets
));
4069 if (cso_fb
->zsbuf
) {
4070 struct iris_resource
*zres
= (void *) cso_fb
->zsbuf
->texture
;
4071 // XXX: depth might not be writable...
4072 iris_use_pinned_bo(batch
, zres
->bo
, true);
4076 if (dirty
& IRIS_DIRTY_POLYGON_STIPPLE
) {
4077 iris_emit_cmd(batch
, GENX(3DSTATE_POLY_STIPPLE_PATTERN
), poly
) {
4078 for (int i
= 0; i
< 32; i
++) {
4079 poly
.PatternRow
[i
] = ice
->state
.poly_stipple
.stipple
[i
];
4084 if (dirty
& IRIS_DIRTY_LINE_STIPPLE
) {
4085 struct iris_rasterizer_state
*cso
= ice
->state
.cso_rast
;
4086 iris_batch_emit(batch
, cso
->line_stipple
, sizeof(cso
->line_stipple
));
4089 if (dirty
& IRIS_DIRTY_VF_TOPOLOGY
) {
4090 iris_emit_cmd(batch
, GENX(3DSTATE_VF_TOPOLOGY
), topo
) {
4091 topo
.PrimitiveTopologyType
=
4092 translate_prim_type(draw
->mode
, draw
->vertices_per_patch
);
4096 if (dirty
& IRIS_DIRTY_VERTEX_BUFFERS
) {
4097 struct iris_vertex_buffer_state
*cso
= &ice
->state
.genx
->vertex_buffers
;
4098 const unsigned vb_dwords
= GENX(VERTEX_BUFFER_STATE_length
);
4100 if (cso
->num_buffers
> 0) {
4101 iris_batch_emit(batch
, cso
->vertex_buffers
, sizeof(uint32_t) *
4102 (1 + vb_dwords
* cso
->num_buffers
));
4104 for (unsigned i
= 0; i
< cso
->num_buffers
; i
++) {
4105 struct iris_resource
*res
= (void *) cso
->resources
[i
];
4106 iris_use_pinned_bo(batch
, res
->bo
, false);
4111 if (dirty
& IRIS_DIRTY_VERTEX_ELEMENTS
) {
4112 struct iris_vertex_element_state
*cso
= ice
->state
.cso_vertex_elements
;
4113 const unsigned entries
= MAX2(cso
->count
, 1);
4114 iris_batch_emit(batch
, cso
->vertex_elements
, sizeof(uint32_t) *
4115 (1 + entries
* GENX(VERTEX_ELEMENT_STATE_length
)));
4116 iris_batch_emit(batch
, cso
->vf_instancing
, sizeof(uint32_t) *
4117 entries
* GENX(3DSTATE_VF_INSTANCING_length
));
4120 if (dirty
& IRIS_DIRTY_VF_SGVS
) {
4121 const struct brw_vs_prog_data
*vs_prog_data
= (void *)
4122 ice
->shaders
.prog
[MESA_SHADER_VERTEX
]->prog_data
;
4123 struct iris_vertex_element_state
*cso
= ice
->state
.cso_vertex_elements
;
4125 iris_emit_cmd(batch
, GENX(3DSTATE_VF_SGVS
), sgv
) {
4126 if (vs_prog_data
->uses_vertexid
) {
4127 sgv
.VertexIDEnable
= true;
4128 sgv
.VertexIDComponentNumber
= 2;
4129 sgv
.VertexIDElementOffset
= cso
->count
;
4132 if (vs_prog_data
->uses_instanceid
) {
4133 sgv
.InstanceIDEnable
= true;
4134 sgv
.InstanceIDComponentNumber
= 3;
4135 sgv
.InstanceIDElementOffset
= cso
->count
;
4140 if (dirty
& IRIS_DIRTY_VF
) {
4141 iris_emit_cmd(batch
, GENX(3DSTATE_VF
), vf
) {
4142 if (draw
->primitive_restart
) {
4143 vf
.IndexedDrawCutIndexEnable
= true;
4144 vf
.CutIndex
= draw
->restart_index
;
4149 // XXX: Gen8 - PMA fix
4153 iris_upload_render_state(struct iris_context
*ice
,
4154 struct iris_batch
*batch
,
4155 const struct pipe_draw_info
*draw
)
4157 /* Always pin the binder. If we're emitting new binding table pointers,
4158 * we need it. If not, we're probably inheriting old tables via the
4159 * context, and need it anyway. Since true zero-bindings cases are
4160 * practically non-existent, just pin it and avoid last_res tracking.
4162 iris_use_pinned_bo(batch
, ice
->state
.binder
.bo
, false);
4164 iris_upload_dirty_render_state(ice
, batch
, draw
);
4166 if (draw
->index_size
> 0) {
4169 if (draw
->has_user_indices
) {
4170 u_upload_data(ice
->ctx
.stream_uploader
, 0,
4171 draw
->count
* draw
->index_size
, 4, draw
->index
.user
,
4172 &offset
, &ice
->state
.last_res
.index_buffer
);
4174 pipe_resource_reference(&ice
->state
.last_res
.index_buffer
,
4175 draw
->index
.resource
);
4179 struct iris_bo
*bo
= iris_resource_bo(ice
->state
.last_res
.index_buffer
);
4181 iris_emit_cmd(batch
, GENX(3DSTATE_INDEX_BUFFER
), ib
) {
4182 ib
.IndexFormat
= draw
->index_size
>> 1;
4184 ib
.BufferSize
= bo
->size
;
4185 ib
.BufferStartingAddress
= ro_bo(bo
, offset
);
4189 #define _3DPRIM_END_OFFSET 0x2420
4190 #define _3DPRIM_START_VERTEX 0x2430
4191 #define _3DPRIM_VERTEX_COUNT 0x2434
4192 #define _3DPRIM_INSTANCE_COUNT 0x2438
4193 #define _3DPRIM_START_INSTANCE 0x243C
4194 #define _3DPRIM_BASE_VERTEX 0x2440
4196 if (draw
->indirect
) {
4197 /* We don't support this MultidrawIndirect. */
4198 assert(!draw
->indirect
->indirect_draw_count
);
4200 struct iris_bo
*bo
= iris_resource_bo(draw
->indirect
->buffer
);
4203 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4204 lrm
.RegisterAddress
= _3DPRIM_VERTEX_COUNT
;
4205 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 0);
4207 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4208 lrm
.RegisterAddress
= _3DPRIM_INSTANCE_COUNT
;
4209 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 4);
4211 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4212 lrm
.RegisterAddress
= _3DPRIM_START_VERTEX
;
4213 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 8);
4215 if (draw
->index_size
) {
4216 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4217 lrm
.RegisterAddress
= _3DPRIM_BASE_VERTEX
;
4218 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 12);
4220 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4221 lrm
.RegisterAddress
= _3DPRIM_START_INSTANCE
;
4222 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 16);
4225 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4226 lrm
.RegisterAddress
= _3DPRIM_START_INSTANCE
;
4227 lrm
.MemoryAddress
= ro_bo(bo
, draw
->indirect
->offset
+ 12);
4229 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_IMM
), lri
) {
4230 lri
.RegisterOffset
= _3DPRIM_BASE_VERTEX
;
4236 iris_emit_cmd(batch
, GENX(3DPRIMITIVE
), prim
) {
4237 prim
.StartInstanceLocation
= draw
->start_instance
;
4238 prim
.InstanceCount
= draw
->instance_count
;
4239 prim
.VertexCountPerInstance
= draw
->count
;
4240 prim
.VertexAccessType
= draw
->index_size
> 0 ? RANDOM
: SEQUENTIAL
;
4242 // XXX: this is probably bonkers.
4243 prim
.StartVertexLocation
= draw
->start
;
4245 prim
.IndirectParameterEnable
= draw
->indirect
!= NULL
;
4247 if (draw
->index_size
) {
4248 prim
.BaseVertexLocation
+= draw
->index_bias
;
4250 prim
.StartVertexLocation
+= draw
->index_bias
;
4253 //prim.BaseVertexLocation = ...;
4256 if (!batch
->contains_draw
) {
4257 iris_restore_render_saved_bos(ice
, batch
, draw
);
4258 batch
->contains_draw
= true;
4263 iris_upload_compute_state(struct iris_context
*ice
,
4264 struct iris_batch
*batch
,
4265 const struct pipe_grid_info
*grid
)
4267 const uint64_t dirty
= ice
->state
.dirty
;
4268 struct iris_screen
*screen
= batch
->screen
;
4269 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
4270 struct iris_binder
*binder
= &ice
->state
.binder
;
4271 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_COMPUTE
];
4272 struct iris_compiled_shader
*shader
=
4273 ice
->shaders
.prog
[MESA_SHADER_COMPUTE
];
4274 struct brw_stage_prog_data
*prog_data
= shader
->prog_data
;
4275 struct brw_cs_prog_data
*cs_prog_data
= (void *) prog_data
;
4277 struct pipe_resource
*grid_size_res
= NULL
;
4278 uint32_t grid_size_offset
;
4279 if (grid
->indirect
) {
4280 grid_size_res
= grid
->indirect
;
4281 grid_size_offset
= grid
->indirect_offset
;
4283 uint32_t *grid_size_map
=
4284 stream_state(batch
, ice
->state
.surface_uploader
, &grid_size_res
, 12, 4,
4286 grid_size_map
[0] = grid
->grid
[0];
4287 grid_size_map
[1] = grid
->grid
[1];
4288 grid_size_map
[2] = grid
->grid
[2];
4289 struct iris_bo
*grid_size_bo
= iris_resource_bo(grid_size_res
);
4290 grid_size_offset
-= iris_bo_offset_from_base_address(grid_size_bo
);
4293 struct iris_state_ref grid_size_surf
;
4294 memset(&grid_size_surf
, 0, sizeof(grid_size_surf
));
4295 void *grid_surf_state_map
=
4296 upload_state(ice
->state
.surface_uploader
,
4298 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
4299 assert(grid_surf_state_map
);
4300 struct iris_bo
*grid_size_bo
= iris_resource_bo(grid_size_res
);
4301 iris_use_pinned_bo(batch
, grid_size_bo
, false);
4302 grid_size_surf
.offset
+=
4303 iris_bo_offset_from_base_address(iris_resource_bo(grid_size_surf
.res
));
4304 isl_buffer_fill_state(&screen
->isl_dev
, grid_surf_state_map
,
4306 grid_size_bo
->gtt_offset
+ grid_size_offset
,
4308 .format
= ISL_FORMAT_RAW
,
4312 if (dirty
& IRIS_DIRTY_BINDINGS_CS
|| grid_size_res
)
4313 iris_populate_binding_table(ice
, batch
, MESA_SHADER_COMPUTE
, false,
4316 iris_use_optional_res(batch
, shs
->sampler_table
.res
, false);
4317 iris_use_pinned_bo(batch
, iris_resource_bo(shader
->assembly
.res
), false);
4319 if (ice
->state
.need_border_colors
)
4320 iris_use_pinned_bo(batch
, ice
->state
.border_color_pool
.bo
, false);
4322 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
4324 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4325 * the only bits that are changed are scoreboard related: Scoreboard
4326 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
4327 * these scoreboard related states, a MEDIA_STATE_FLUSH is sufficient."
4329 iris_emit_pipe_control_flush(batch
, PIPE_CONTROL_CS_STALL
);
4331 iris_emit_cmd(batch
, GENX(MEDIA_VFE_STATE
), vfe
) {
4332 if (prog_data
->total_scratch
) {
4333 /* Per Thread Scratch Space is in the range [0, 11] where
4334 * 0 = 1k, 1 = 2k, 2 = 4k, ..., 11 = 2M.
4336 // XXX: vfe.ScratchSpaceBasePointer
4337 //vfe.PerThreadScratchSpace =
4338 //ffs(stage_state->per_thread_scratch) - 11;
4341 vfe
.MaximumNumberofThreads
=
4342 devinfo
->max_cs_threads
* screen
->subslice_total
- 1;
4344 vfe
.ResetGatewayTimer
=
4345 Resettingrelativetimerandlatchingtheglobaltimestamp
;
4348 vfe
.NumberofURBEntries
= 2;
4349 vfe
.URBEntryAllocationSize
= 2;
4351 // XXX: Use Indirect Payload Storage?
4352 vfe
.CURBEAllocationSize
=
4353 ALIGN(cs_prog_data
->push
.per_thread
.regs
* cs_prog_data
->threads
+
4354 cs_prog_data
->push
.cross_thread
.regs
, 2);
4357 // XXX: hack iris_set_constant_buffers to upload compute shader constants
4358 // XXX: differently...?
4360 uint32_t curbe_data_offset
= 0;
4361 // TODO: Move subgroup-id into uniforms ubo so we can push uniforms
4362 assert(cs_prog_data
->push
.cross_thread
.dwords
== 0 &&
4363 cs_prog_data
->push
.per_thread
.dwords
== 1 &&
4364 cs_prog_data
->base
.param
[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID
);
4365 struct pipe_resource
*curbe_data_res
= NULL
;
4366 uint32_t *curbe_data_map
=
4367 stream_state(batch
, ice
->state
.dynamic_uploader
, &curbe_data_res
,
4368 ALIGN(cs_prog_data
->push
.total
.size
, 64), 64,
4369 &curbe_data_offset
);
4370 assert(curbe_data_map
);
4371 memset(curbe_data_map
, 0x5a, ALIGN(cs_prog_data
->push
.total
.size
, 64));
4372 iris_fill_cs_push_const_buffer(cs_prog_data
, curbe_data_map
);
4373 iris_emit_cmd(batch
, GENX(MEDIA_CURBE_LOAD
), curbe
) {
4374 curbe
.CURBETotalDataLength
=
4375 ALIGN(cs_prog_data
->push
.total
.size
, 64);
4376 curbe
.CURBEDataStartAddress
= curbe_data_offset
;
4379 struct pipe_resource
*desc_res
= NULL
;
4380 uint32_t desc
[GENX(INTERFACE_DESCRIPTOR_DATA_length
)];
4382 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA
), desc
, idd
) {
4383 idd
.SamplerStatePointer
= shs
->sampler_table
.offset
;
4384 idd
.BindingTablePointer
= binder
->bt_offset
[MESA_SHADER_COMPUTE
];
4385 idd
.ConstantURBEntryReadLength
= cs_prog_data
->push
.per_thread
.regs
;
4386 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4387 idd
.CrossThreadConstantDataReadLength
=
4388 cs_prog_data
->push
.cross_thread
.regs
;
4392 for (int i
= 0; i
< GENX(INTERFACE_DESCRIPTOR_DATA_length
); i
++)
4393 desc
[i
] |= ((uint32_t *) shader
->derived_data
)[i
];
4395 iris_emit_cmd(batch
, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD
), load
) {
4396 load
.InterfaceDescriptorTotalLength
=
4397 GENX(INTERFACE_DESCRIPTOR_DATA_length
) * sizeof(uint32_t);
4398 load
.InterfaceDescriptorDataStartAddress
=
4399 emit_state(batch
, ice
->state
.dynamic_uploader
,
4400 &desc_res
, desc
, sizeof(desc
), 32);
4403 pipe_resource_reference(&desc_res
, NULL
);
4405 // XXX: grid->indirect
4407 uint32_t group_size
= grid
->block
[0] * grid
->block
[1] * grid
->block
[2];
4408 uint32_t remainder
= group_size
& (cs_prog_data
->simd_size
- 1);
4409 uint32_t right_mask
;
4412 right_mask
= ~0u >> (32 - remainder
);
4414 right_mask
= ~0u >> (32 - cs_prog_data
->simd_size
);
4416 iris_emit_cmd(batch
, GENX(GPGPU_WALKER
), ggw
) {
4417 ggw
.SIMDSize
= cs_prog_data
->simd_size
/ 16;
4418 ggw
.ThreadDepthCounterMaximum
= 0;
4419 ggw
.ThreadHeightCounterMaximum
= 0;
4420 ggw
.ThreadWidthCounterMaximum
= cs_prog_data
->threads
- 1;
4421 ggw
.ThreadGroupIDXDimension
= grid
->grid
[0];
4422 ggw
.ThreadGroupIDYDimension
= grid
->grid
[1];
4423 ggw
.ThreadGroupIDZDimension
= grid
->grid
[2];
4424 ggw
.RightExecutionMask
= right_mask
;
4425 ggw
.BottomExecutionMask
= 0xffffffff;
4428 iris_emit_cmd(batch
, GENX(MEDIA_STATE_FLUSH
), msf
);
4430 if (!batch
->contains_draw
) {
4431 iris_restore_compute_saved_bos(ice
, batch
, grid
);
4432 batch
->contains_draw
= true;
4437 * State module teardown.
4440 iris_destroy_state(struct iris_context
*ice
)
4442 iris_free_vertex_buffers(&ice
->state
.genx
->vertex_buffers
);
4444 // XXX: unreference resources/surfaces.
4445 for (unsigned i
= 0; i
< ice
->state
.framebuffer
.nr_cbufs
; i
++) {
4446 pipe_surface_reference(&ice
->state
.framebuffer
.cbufs
[i
], NULL
);
4448 pipe_surface_reference(&ice
->state
.framebuffer
.zsbuf
, NULL
);
4450 for (int stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
4451 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
4452 pipe_resource_reference(&shs
->sampler_table
.res
, NULL
);
4454 free(ice
->state
.genx
);
4456 pipe_resource_reference(&ice
->state
.last_res
.cc_vp
, NULL
);
4457 pipe_resource_reference(&ice
->state
.last_res
.sf_cl_vp
, NULL
);
4458 pipe_resource_reference(&ice
->state
.last_res
.color_calc
, NULL
);
4459 pipe_resource_reference(&ice
->state
.last_res
.scissor
, NULL
);
4460 pipe_resource_reference(&ice
->state
.last_res
.blend
, NULL
);
4461 pipe_resource_reference(&ice
->state
.last_res
.index_buffer
, NULL
);
4464 /* ------------------------------------------------------------------- */
4467 iris_load_register_imm32(struct iris_batch
*batch
, uint32_t reg
,
4470 _iris_emit_lri(batch
, reg
, val
);
4474 iris_load_register_imm64(struct iris_batch
*batch
, uint32_t reg
,
4477 _iris_emit_lri(batch
, reg
+ 0, val
& 0xffffffff);
4478 _iris_emit_lri(batch
, reg
+ 4, val
>> 32);
4482 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
4485 iris_load_register_mem32(struct iris_batch
*batch
, uint32_t reg
,
4486 struct iris_bo
*bo
, uint32_t offset
)
4488 iris_emit_cmd(batch
, GENX(MI_LOAD_REGISTER_MEM
), lrm
) {
4489 lrm
.RegisterAddress
= reg
;
4490 lrm
.MemoryAddress
= ro_bo(bo
, offset
);
4495 * Load a 64-bit value from a buffer into a MMIO register via
4496 * two MI_LOAD_REGISTER_MEM commands.
4499 iris_load_register_mem64(struct iris_batch
*batch
, uint32_t reg
,
4500 struct iris_bo
*bo
, uint32_t offset
)
4502 iris_load_register_mem32(batch
, reg
+ 0, bo
, offset
+ 0);
4503 iris_load_register_mem32(batch
, reg
+ 4, bo
, offset
+ 4);
4507 iris_store_register_mem32(struct iris_batch
*batch
, uint32_t reg
,
4508 struct iris_bo
*bo
, uint32_t offset
,
4511 iris_emit_cmd(batch
, GENX(MI_STORE_REGISTER_MEM
), srm
) {
4512 srm
.RegisterAddress
= reg
;
4513 srm
.MemoryAddress
= rw_bo(bo
, offset
);
4514 srm
.PredicateEnable
= predicated
;
4519 iris_store_register_mem64(struct iris_batch
*batch
, uint32_t reg
,
4520 struct iris_bo
*bo
, uint32_t offset
,
4523 iris_store_register_mem32(batch
, reg
+ 0, bo
, offset
+ 0, predicated
);
4524 iris_store_register_mem32(batch
, reg
+ 4, bo
, offset
+ 4, predicated
);
4528 iris_store_data_imm32(struct iris_batch
*batch
,
4529 struct iris_bo
*bo
, uint32_t offset
,
4532 iris_emit_cmd(batch
, GENX(MI_STORE_DATA_IMM
), sdi
) {
4533 sdi
.Address
= rw_bo(bo
, offset
);
4534 sdi
.ImmediateData
= imm
;
4539 iris_store_data_imm64(struct iris_batch
*batch
,
4540 struct iris_bo
*bo
, uint32_t offset
,
4543 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
4544 * 2 in genxml but it's actually variable length and we need 5 DWords.
4546 void *map
= iris_get_command_space(batch
, 4 * 5);
4547 _iris_pack_command(batch
, GENX(MI_STORE_DATA_IMM
), map
, sdi
) {
4548 sdi
.DWordLength
= 5 - 2;
4549 sdi
.Address
= rw_bo(bo
, offset
);
4550 sdi
.ImmediateData
= imm
;
4555 iris_copy_mem_mem(struct iris_batch
*batch
,
4556 struct iris_bo
*dst_bo
, uint32_t dst_offset
,
4557 struct iris_bo
*src_bo
, uint32_t src_offset
,
4560 /* MI_COPY_MEM_MEM operates on DWords. */
4561 assert(bytes
% 4 == 0);
4562 assert(dst_offset
% 4 == 0);
4563 assert(src_offset
% 4 == 0);
4565 for (unsigned i
= 0; i
< bytes
; i
+= 4) {
4566 iris_emit_cmd(batch
, GENX(MI_COPY_MEM_MEM
), cp
) {
4567 cp
.DestinationMemoryAddress
= rw_bo(dst_bo
, dst_offset
+ i
);
4568 cp
.SourceMemoryAddress
= ro_bo(src_bo
, src_offset
+ i
);
4573 /* ------------------------------------------------------------------- */
4576 flags_to_post_sync_op(uint32_t flags
)
4578 if (flags
& PIPE_CONTROL_WRITE_IMMEDIATE
)
4579 return WriteImmediateData
;
4581 if (flags
& PIPE_CONTROL_WRITE_DEPTH_COUNT
)
4582 return WritePSDepthCount
;
4584 if (flags
& PIPE_CONTROL_WRITE_TIMESTAMP
)
4585 return WriteTimestamp
;
4591 * Do the given flags have a Post Sync or LRI Post Sync operation?
4593 static enum pipe_control_flags
4594 get_post_sync_flags(enum pipe_control_flags flags
)
4596 flags
&= PIPE_CONTROL_WRITE_IMMEDIATE
|
4597 PIPE_CONTROL_WRITE_DEPTH_COUNT
|
4598 PIPE_CONTROL_WRITE_TIMESTAMP
|
4599 PIPE_CONTROL_LRI_POST_SYNC_OP
;
4601 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
4602 * "LRI Post Sync Operation". So more than one bit set would be illegal.
4604 assert(util_bitcount(flags
) <= 1);
4609 // XXX: compute support
4610 #define IS_COMPUTE_PIPELINE(batch) (batch->engine != I915_EXEC_RENDER)
4613 * Emit a series of PIPE_CONTROL commands, taking into account any
4614 * workarounds necessary to actually accomplish the caller's request.
4616 * Unless otherwise noted, spec quotations in this function come from:
4618 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
4619 * Restrictions for PIPE_CONTROL.
4621 * You should not use this function directly. Use the helpers in
4622 * iris_pipe_control.c instead, which may split the pipe control further.
4625 iris_emit_raw_pipe_control(struct iris_batch
*batch
, uint32_t flags
,
4626 struct iris_bo
*bo
, uint32_t offset
, uint64_t imm
)
4628 UNUSED
const struct gen_device_info
*devinfo
= &batch
->screen
->devinfo
;
4629 enum pipe_control_flags post_sync_flags
= get_post_sync_flags(flags
);
4630 enum pipe_control_flags non_lri_post_sync_flags
=
4631 post_sync_flags
& ~PIPE_CONTROL_LRI_POST_SYNC_OP
;
4633 /* Recursive PIPE_CONTROL workarounds --------------------------------
4634 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
4636 * We do these first because we want to look at the original operation,
4637 * rather than any workarounds we set.
4639 if (GEN_GEN
== 9 && (flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
)) {
4640 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
4641 * lists several workarounds:
4643 * "Project: SKL, KBL, BXT
4645 * If the VF Cache Invalidation Enable is set to a 1 in a
4646 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
4647 * sets to 0, with the VF Cache Invalidation Enable set to 0
4648 * needs to be sent prior to the PIPE_CONTROL with VF Cache
4649 * Invalidation Enable set to a 1."
4651 iris_emit_raw_pipe_control(batch
, 0, NULL
, 0, 0);
4654 if (GEN_GEN
== 9 && IS_COMPUTE_PIPELINE(batch
) && post_sync_flags
) {
4655 /* Project: SKL / Argument: LRI Post Sync Operation [23]
4657 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
4658 * programmed prior to programming a PIPECONTROL command with "LRI
4659 * Post Sync Operation" in GPGPU mode of operation (i.e when
4660 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
4662 * The same text exists a few rows below for Post Sync Op.
4664 iris_emit_raw_pipe_control(batch
, PIPE_CONTROL_CS_STALL
, bo
, offset
, imm
);
4667 if (GEN_GEN
== 10 && (flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
)) {
4669 * "Before sending a PIPE_CONTROL command with bit 12 set, SW must issue
4670 * another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
4671 * = 0 and Pipe Control Flush Enable (bit 7) = 1"
4673 iris_emit_raw_pipe_control(batch
, PIPE_CONTROL_FLUSH_ENABLE
, bo
,
4677 /* "Flush Types" workarounds ---------------------------------------------
4678 * We do these now because they may add post-sync operations or CS stalls.
4681 if (flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
) {
4682 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
4684 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
4685 * 'Write PS Depth Count' or 'Write Timestamp'."
4688 flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
4689 post_sync_flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
4690 non_lri_post_sync_flags
|= PIPE_CONTROL_WRITE_IMMEDIATE
;
4691 bo
= batch
->screen
->workaround_bo
;
4695 /* #1130 from Gen10 workarounds page:
4697 * "Enable Depth Stall on every Post Sync Op if Render target Cache
4698 * Flush is not enabled in same PIPE CONTROL and Enable Pixel score
4699 * board stall if Render target cache flush is enabled."
4701 * Applicable to CNL B0 and C0 steppings only.
4703 * The wording here is unclear, and this workaround doesn't look anything
4704 * like the internal bug report recommendations, but leave it be for now...
4706 if (GEN_GEN
== 10) {
4707 if (flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
) {
4708 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
4709 } else if (flags
& non_lri_post_sync_flags
) {
4710 flags
|= PIPE_CONTROL_DEPTH_STALL
;
4714 if (flags
& PIPE_CONTROL_DEPTH_STALL
) {
4715 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
4717 * "This bit must be DISABLED for operations other than writing
4720 * This seems like nonsense. An Ivybridge workaround requires us to
4721 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
4722 * operation. Gen8+ requires us to emit depth stalls and depth cache
4723 * flushes together. So, it's hard to imagine this means anything other
4724 * than "we originally intended this to be used for PS_DEPTH_COUNT".
4726 * We ignore the supposed restriction and do nothing.
4730 if (flags
& (PIPE_CONTROL_RENDER_TARGET_FLUSH
|
4731 PIPE_CONTROL_STALL_AT_SCOREBOARD
)) {
4732 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
4734 * "This bit must be DISABLED for End-of-pipe (Read) fences,
4735 * PS_DEPTH_COUNT or TIMESTAMP queries."
4737 * TODO: Implement end-of-pipe checking.
4739 assert(!(post_sync_flags
& (PIPE_CONTROL_WRITE_DEPTH_COUNT
|
4740 PIPE_CONTROL_WRITE_TIMESTAMP
)));
4743 if (GEN_GEN
< 11 && (flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
)) {
4744 /* From the PIPE_CONTROL instruction table, bit 1:
4746 * "This bit is ignored if Depth Stall Enable is set.
4747 * Further, the render cache is not flushed even if Write Cache
4748 * Flush Enable bit is set."
4750 * We assert that the caller doesn't do this combination, to try and
4751 * prevent mistakes. It shouldn't hurt the GPU, though.
4753 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
4754 * and "Render Target Flush" combo is explicitly required for BTI
4755 * update workarounds.
4757 assert(!(flags
& (PIPE_CONTROL_DEPTH_STALL
|
4758 PIPE_CONTROL_RENDER_TARGET_FLUSH
)));
4761 /* PIPE_CONTROL page workarounds ------------------------------------- */
4763 if (GEN_GEN
<= 8 && (flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
)) {
4764 /* From the PIPE_CONTROL page itself:
4767 * Restriction: Pipe_control with CS-stall bit set must be issued
4768 * before a pipe-control command that has the State Cache
4769 * Invalidate bit set."
4771 flags
|= PIPE_CONTROL_CS_STALL
;
4774 if (flags
& PIPE_CONTROL_FLUSH_LLC
) {
4775 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
4778 * SW must always program Post-Sync Operation to "Write Immediate
4779 * Data" when Flush LLC is set."
4781 * For now, we just require the caller to do it.
4783 assert(flags
& PIPE_CONTROL_WRITE_IMMEDIATE
);
4786 /* "Post-Sync Operation" workarounds -------------------------------- */
4788 /* Project: All / Argument: Global Snapshot Count Reset [19]
4790 * "This bit must not be exercised on any product.
4791 * Requires stall bit ([20] of DW1) set."
4793 * We don't use this, so we just assert that it isn't used. The
4794 * PIPE_CONTROL instruction page indicates that they intended this
4795 * as a debug feature and don't think it is useful in production,
4796 * but it may actually be usable, should we ever want to.
4798 assert((flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
) == 0);
4800 if (flags
& (PIPE_CONTROL_MEDIA_STATE_CLEAR
|
4801 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
)) {
4802 /* Project: All / Arguments:
4804 * - Generic Media State Clear [16]
4805 * - Indirect State Pointers Disable [16]
4807 * "Requires stall bit ([20] of DW1) set."
4809 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
4810 * State Clear) says:
4812 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
4813 * programmed prior to programming a PIPECONTROL command with "Media
4814 * State Clear" set in GPGPU mode of operation"
4816 * This is a subset of the earlier rule, so there's nothing to do.
4818 flags
|= PIPE_CONTROL_CS_STALL
;
4821 if (flags
& PIPE_CONTROL_STORE_DATA_INDEX
) {
4822 /* Project: All / Argument: Store Data Index
4824 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
4827 * For now, we just assert that the caller does this. We might want to
4828 * automatically add a write to the workaround BO...
4830 assert(non_lri_post_sync_flags
!= 0);
4833 if (flags
& PIPE_CONTROL_SYNC_GFDT
) {
4834 /* Project: All / Argument: Sync GFDT
4836 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
4837 * than '0' or 0x2520[13] must be set."
4839 * For now, we just assert that the caller does this.
4841 assert(non_lri_post_sync_flags
!= 0);
4844 if (flags
& PIPE_CONTROL_TLB_INVALIDATE
) {
4845 /* Project: IVB+ / Argument: TLB inv
4847 * "Requires stall bit ([20] of DW1) set."
4849 * Also, from the PIPE_CONTROL instruction table:
4852 * Post Sync Operation or CS stall must be set to ensure a TLB
4853 * invalidation occurs. Otherwise no cycle will occur to the TLB
4854 * cache to invalidate."
4856 * This is not a subset of the earlier rule, so there's nothing to do.
4858 flags
|= PIPE_CONTROL_CS_STALL
;
4861 if (GEN_GEN
== 9 && devinfo
->gt
== 4) {
4862 /* TODO: The big Skylake GT4 post sync op workaround */
4865 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
4867 if (IS_COMPUTE_PIPELINE(batch
)) {
4868 if (GEN_GEN
>= 9 && (flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
)) {
4869 /* Project: SKL+ / Argument: Tex Invalidate
4870 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
4872 flags
|= PIPE_CONTROL_CS_STALL
;
4875 if (GEN_GEN
== 8 && (post_sync_flags
||
4876 (flags
& (PIPE_CONTROL_NOTIFY_ENABLE
|
4877 PIPE_CONTROL_DEPTH_STALL
|
4878 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
4879 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
4880 PIPE_CONTROL_DATA_CACHE_FLUSH
)))) {
4881 /* Project: BDW / Arguments:
4883 * - LRI Post Sync Operation [23]
4884 * - Post Sync Op [15:14]
4886 * - Depth Stall [13]
4887 * - Render Target Cache Flush [12]
4888 * - Depth Cache Flush [0]
4889 * - DC Flush Enable [5]
4891 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
4894 flags
|= PIPE_CONTROL_CS_STALL
;
4896 /* Also, from the PIPE_CONTROL instruction table, bit 20:
4899 * This bit must be always set when PIPE_CONTROL command is
4900 * programmed by GPGPU and MEDIA workloads, except for the cases
4901 * when only Read Only Cache Invalidation bits are set (State
4902 * Cache Invalidation Enable, Instruction cache Invalidation
4903 * Enable, Texture Cache Invalidation Enable, Constant Cache
4904 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
4905 * need not implemented when FF_DOP_CG is disable via "Fixed
4906 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
4908 * It sounds like we could avoid CS stalls in some cases, but we
4909 * don't currently bother. This list isn't exactly the list above,
4915 /* "Stall" workarounds ----------------------------------------------
4916 * These have to come after the earlier ones because we may have added
4917 * some additional CS stalls above.
4920 if (GEN_GEN
< 9 && (flags
& PIPE_CONTROL_CS_STALL
)) {
4921 /* Project: PRE-SKL, VLV, CHV
4923 * "[All Stepping][All SKUs]:
4925 * One of the following must also be set:
4927 * - Render Target Cache Flush Enable ([12] of DW1)
4928 * - Depth Cache Flush Enable ([0] of DW1)
4929 * - Stall at Pixel Scoreboard ([1] of DW1)
4930 * - Depth Stall ([13] of DW1)
4931 * - Post-Sync Operation ([13] of DW1)
4932 * - DC Flush Enable ([5] of DW1)"
4934 * If we don't already have one of those bits set, we choose to add
4935 * "Stall at Pixel Scoreboard". Some of the other bits require a
4936 * CS stall as a workaround (see above), which would send us into
4937 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
4938 * appears to be safe, so we choose that.
4940 const uint32_t wa_bits
= PIPE_CONTROL_RENDER_TARGET_FLUSH
|
4941 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
4942 PIPE_CONTROL_WRITE_IMMEDIATE
|
4943 PIPE_CONTROL_WRITE_DEPTH_COUNT
|
4944 PIPE_CONTROL_WRITE_TIMESTAMP
|
4945 PIPE_CONTROL_STALL_AT_SCOREBOARD
|
4946 PIPE_CONTROL_DEPTH_STALL
|
4947 PIPE_CONTROL_DATA_CACHE_FLUSH
;
4948 if (!(flags
& wa_bits
))
4949 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
4952 /* Emit --------------------------------------------------------------- */
4954 iris_emit_cmd(batch
, GENX(PIPE_CONTROL
), pc
) {
4955 pc
.LRIPostSyncOperation
= NoLRIOperation
;
4956 pc
.PipeControlFlushEnable
= flags
& PIPE_CONTROL_FLUSH_ENABLE
;
4957 pc
.DCFlushEnable
= flags
& PIPE_CONTROL_DATA_CACHE_FLUSH
;
4958 pc
.StoreDataIndex
= 0;
4959 pc
.CommandStreamerStallEnable
= flags
& PIPE_CONTROL_CS_STALL
;
4960 pc
.GlobalSnapshotCountReset
=
4961 flags
& PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET
;
4962 pc
.TLBInvalidate
= flags
& PIPE_CONTROL_TLB_INVALIDATE
;
4963 pc
.GenericMediaStateClear
= flags
& PIPE_CONTROL_MEDIA_STATE_CLEAR
;
4964 pc
.StallAtPixelScoreboard
= flags
& PIPE_CONTROL_STALL_AT_SCOREBOARD
;
4965 pc
.RenderTargetCacheFlushEnable
=
4966 flags
& PIPE_CONTROL_RENDER_TARGET_FLUSH
;
4967 pc
.DepthCacheFlushEnable
= flags
& PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
4968 pc
.StateCacheInvalidationEnable
=
4969 flags
& PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
4970 pc
.VFCacheInvalidationEnable
= flags
& PIPE_CONTROL_VF_CACHE_INVALIDATE
;
4971 pc
.ConstantCacheInvalidationEnable
=
4972 flags
& PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
4973 pc
.PostSyncOperation
= flags_to_post_sync_op(flags
);
4974 pc
.DepthStallEnable
= flags
& PIPE_CONTROL_DEPTH_STALL
;
4975 pc
.InstructionCacheInvalidateEnable
=
4976 flags
& PIPE_CONTROL_INSTRUCTION_INVALIDATE
;
4977 pc
.NotifyEnable
= flags
& PIPE_CONTROL_NOTIFY_ENABLE
;
4978 pc
.IndirectStatePointersDisable
=
4979 flags
& PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE
;
4980 pc
.TextureCacheInvalidationEnable
=
4981 flags
& PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
4982 pc
.Address
= rw_bo(bo
, offset
);
4983 pc
.ImmediateData
= imm
;
4988 genX(init_state
)(struct iris_context
*ice
)
4990 struct pipe_context
*ctx
= &ice
->ctx
;
4991 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
4993 ctx
->create_blend_state
= iris_create_blend_state
;
4994 ctx
->create_depth_stencil_alpha_state
= iris_create_zsa_state
;
4995 ctx
->create_rasterizer_state
= iris_create_rasterizer_state
;
4996 ctx
->create_sampler_state
= iris_create_sampler_state
;
4997 ctx
->create_sampler_view
= iris_create_sampler_view
;
4998 ctx
->create_surface
= iris_create_surface
;
4999 ctx
->create_vertex_elements_state
= iris_create_vertex_elements
;
5000 ctx
->bind_blend_state
= iris_bind_blend_state
;
5001 ctx
->bind_depth_stencil_alpha_state
= iris_bind_zsa_state
;
5002 ctx
->bind_sampler_states
= iris_bind_sampler_states
;
5003 ctx
->bind_rasterizer_state
= iris_bind_rasterizer_state
;
5004 ctx
->bind_vertex_elements_state
= iris_bind_vertex_elements_state
;
5005 ctx
->delete_blend_state
= iris_delete_state
;
5006 ctx
->delete_depth_stencil_alpha_state
= iris_delete_state
;
5007 ctx
->delete_fs_state
= iris_delete_state
;
5008 ctx
->delete_rasterizer_state
= iris_delete_state
;
5009 ctx
->delete_sampler_state
= iris_delete_state
;
5010 ctx
->delete_vertex_elements_state
= iris_delete_state
;
5011 ctx
->delete_tcs_state
= iris_delete_state
;
5012 ctx
->delete_tes_state
= iris_delete_state
;
5013 ctx
->delete_gs_state
= iris_delete_state
;
5014 ctx
->delete_vs_state
= iris_delete_state
;
5015 ctx
->set_blend_color
= iris_set_blend_color
;
5016 ctx
->set_clip_state
= iris_set_clip_state
;
5017 ctx
->set_constant_buffer
= iris_set_constant_buffer
;
5018 ctx
->set_shader_buffers
= iris_set_shader_buffers
;
5019 ctx
->set_shader_images
= iris_set_shader_images
;
5020 ctx
->set_sampler_views
= iris_set_sampler_views
;
5021 ctx
->set_tess_state
= iris_set_tess_state
;
5022 ctx
->set_framebuffer_state
= iris_set_framebuffer_state
;
5023 ctx
->set_polygon_stipple
= iris_set_polygon_stipple
;
5024 ctx
->set_sample_mask
= iris_set_sample_mask
;
5025 ctx
->set_scissor_states
= iris_set_scissor_states
;
5026 ctx
->set_stencil_ref
= iris_set_stencil_ref
;
5027 ctx
->set_vertex_buffers
= iris_set_vertex_buffers
;
5028 ctx
->set_viewport_states
= iris_set_viewport_states
;
5029 ctx
->sampler_view_destroy
= iris_sampler_view_destroy
;
5030 ctx
->surface_destroy
= iris_surface_destroy
;
5031 ctx
->draw_vbo
= iris_draw_vbo
;
5032 ctx
->launch_grid
= iris_launch_grid
;
5033 ctx
->create_stream_output_target
= iris_create_stream_output_target
;
5034 ctx
->stream_output_target_destroy
= iris_stream_output_target_destroy
;
5035 ctx
->set_stream_output_targets
= iris_set_stream_output_targets
;
5037 ice
->vtbl
.destroy_state
= iris_destroy_state
;
5038 ice
->vtbl
.init_render_context
= iris_init_render_context
;
5039 ice
->vtbl
.init_compute_context
= iris_init_compute_context
;
5040 ice
->vtbl
.upload_render_state
= iris_upload_render_state
;
5041 ice
->vtbl
.update_surface_base_address
= iris_update_surface_base_address
;
5042 ice
->vtbl
.upload_compute_state
= iris_upload_compute_state
;
5043 ice
->vtbl
.emit_raw_pipe_control
= iris_emit_raw_pipe_control
;
5044 ice
->vtbl
.load_register_imm32
= iris_load_register_imm32
;
5045 ice
->vtbl
.load_register_imm64
= iris_load_register_imm64
;
5046 ice
->vtbl
.load_register_mem32
= iris_load_register_mem32
;
5047 ice
->vtbl
.load_register_mem64
= iris_load_register_mem64
;
5048 ice
->vtbl
.store_register_mem32
= iris_store_register_mem32
;
5049 ice
->vtbl
.store_register_mem64
= iris_store_register_mem64
;
5050 ice
->vtbl
.store_data_imm32
= iris_store_data_imm32
;
5051 ice
->vtbl
.store_data_imm64
= iris_store_data_imm64
;
5052 ice
->vtbl
.copy_mem_mem
= iris_copy_mem_mem
;
5053 ice
->vtbl
.derived_program_state_size
= iris_derived_program_state_size
;
5054 ice
->vtbl
.store_derived_program_state
= iris_store_derived_program_state
;
5055 ice
->vtbl
.create_so_decl_list
= iris_create_so_decl_list
;
5056 ice
->vtbl
.populate_vs_key
= iris_populate_vs_key
;
5057 ice
->vtbl
.populate_tcs_key
= iris_populate_tcs_key
;
5058 ice
->vtbl
.populate_tes_key
= iris_populate_tes_key
;
5059 ice
->vtbl
.populate_gs_key
= iris_populate_gs_key
;
5060 ice
->vtbl
.populate_fs_key
= iris_populate_fs_key
;
5061 ice
->vtbl
.populate_cs_key
= iris_populate_cs_key
;
5063 ice
->state
.dirty
= ~0ull;
5065 ice
->state
.sample_mask
= 0xffff;
5066 ice
->state
.num_viewports
= 1;
5067 ice
->state
.genx
= calloc(1, sizeof(struct iris_genx_state
));
5069 /* Make a 1x1x1 null surface for unbound textures */
5070 void *null_surf_map
=
5071 upload_state(ice
->state
.surface_uploader
, &ice
->state
.unbound_tex
,
5072 4 * GENX(RENDER_SURFACE_STATE_length
), 64);
5073 isl_null_fill_state(&screen
->isl_dev
, null_surf_map
, isl_extent3d(1, 1, 1));
5074 ice
->state
.unbound_tex
.offset
+=
5075 iris_bo_offset_from_base_address(iris_resource_bo(ice
->state
.unbound_tex
.res
));