iris: Set MOCS for external surfaces to uncached
[mesa.git] / src / gallium / drivers / iris / iris_state.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_state.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * This is the main state upload code.
31 *
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
37 *
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
44 *
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
51 *
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
55 *
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
60 *
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
66 *
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
71 */
72
73 #include <stdio.h>
74 #include <errno.h>
75
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifdef DEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "drm-uapi/i915_drm.h"
99 #include "nir.h"
100 #include "intel/compiler/brw_compiler.h"
101 #include "intel/common/gen_aux_map.h"
102 #include "intel/common/gen_l3_config.h"
103 #include "intel/common/gen_sample_positions.h"
104 #include "iris_batch.h"
105 #include "iris_context.h"
106 #include "iris_defines.h"
107 #include "iris_pipe.h"
108 #include "iris_resource.h"
109
110 #include "iris_genx_macros.h"
111 #include "intel/common/gen_guardband.h"
112
113 #if GEN_GEN >= 12
114 /* TODO: Set PTE to MOCS 61 when the kernel is ready */
115 #define MOCS_PTE (3 << 1)
116 #define MOCS_WB (2 << 1)
117 #elif GEN_GEN >= 9
118 #define MOCS_PTE (1 << 1)
119 #define MOCS_WB (2 << 1)
120 #elif GEN_GEN == 8
121 #define MOCS_PTE 0x18
122 #define MOCS_WB 0x78
123 #endif
124
125 static uint32_t
126 mocs(const struct iris_bo *bo)
127 {
128 return bo && bo->external ? MOCS_PTE : MOCS_WB;
129 }
130
131 /**
132 * Statically assert that PIPE_* enums match the hardware packets.
133 * (As long as they match, we don't need to translate them.)
134 */
135 UNUSED static void pipe_asserts()
136 {
137 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
138
139 /* pipe_logicop happens to match the hardware. */
140 PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
141 PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
142 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
143 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
144 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
145 PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
146 PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
147 PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
148 PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
149 PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
150 PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
151 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
152 PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
153 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
154 PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
155 PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
156
157 /* pipe_blend_func happens to match the hardware. */
158 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
159 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
160 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
161 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
162 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
163 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
164 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
165 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
166 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
167 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
168 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
169 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
170 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
171 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
172 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
173 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
174 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
175 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
176 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
177
178 /* pipe_blend_func happens to match the hardware. */
179 PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
180 PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
181 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
182 PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
183 PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
184
185 /* pipe_stencil_op happens to match the hardware. */
186 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
187 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
188 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
189 PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
190 PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
191 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
192 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
193 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
194
195 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
196 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
197 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
198 #undef PIPE_ASSERT
199 }
200
201 static unsigned
202 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
203 {
204 static const unsigned map[] = {
205 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
206 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
207 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
208 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
209 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
210 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
211 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
212 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
213 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
214 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
215 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
216 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
217 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
218 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
219 [PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
220 };
221
222 return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
223 }
224
225 static unsigned
226 translate_compare_func(enum pipe_compare_func pipe_func)
227 {
228 static const unsigned map[] = {
229 [PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
230 [PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
231 [PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
232 [PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
233 [PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
234 [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
235 [PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
236 [PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
237 };
238 return map[pipe_func];
239 }
240
241 static unsigned
242 translate_shadow_func(enum pipe_compare_func pipe_func)
243 {
244 /* Gallium specifies the result of shadow comparisons as:
245 *
246 * 1 if ref <op> texel,
247 * 0 otherwise.
248 *
249 * The hardware does:
250 *
251 * 0 if texel <op> ref,
252 * 1 otherwise.
253 *
254 * So we need to flip the operator and also negate.
255 */
256 static const unsigned map[] = {
257 [PIPE_FUNC_NEVER] = PREFILTEROPALWAYS,
258 [PIPE_FUNC_LESS] = PREFILTEROPLEQUAL,
259 [PIPE_FUNC_EQUAL] = PREFILTEROPNOTEQUAL,
260 [PIPE_FUNC_LEQUAL] = PREFILTEROPLESS,
261 [PIPE_FUNC_GREATER] = PREFILTEROPGEQUAL,
262 [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
263 [PIPE_FUNC_GEQUAL] = PREFILTEROPGREATER,
264 [PIPE_FUNC_ALWAYS] = PREFILTEROPNEVER,
265 };
266 return map[pipe_func];
267 }
268
269 static unsigned
270 translate_cull_mode(unsigned pipe_face)
271 {
272 static const unsigned map[4] = {
273 [PIPE_FACE_NONE] = CULLMODE_NONE,
274 [PIPE_FACE_FRONT] = CULLMODE_FRONT,
275 [PIPE_FACE_BACK] = CULLMODE_BACK,
276 [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
277 };
278 return map[pipe_face];
279 }
280
281 static unsigned
282 translate_fill_mode(unsigned pipe_polymode)
283 {
284 static const unsigned map[4] = {
285 [PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
286 [PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
287 [PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
288 [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
289 };
290 return map[pipe_polymode];
291 }
292
293 static unsigned
294 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
295 {
296 static const unsigned map[] = {
297 [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
298 [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
299 [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
300 };
301 return map[pipe_mip];
302 }
303
304 static uint32_t
305 translate_wrap(unsigned pipe_wrap)
306 {
307 static const unsigned map[] = {
308 [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
309 [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
310 [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
311 [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
312 [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
313 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
314
315 /* These are unsupported. */
316 [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
317 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
318 };
319 return map[pipe_wrap];
320 }
321
322 /**
323 * Allocate space for some indirect state.
324 *
325 * Return a pointer to the map (to fill it out) and a state ref (for
326 * referring to the state in GPU commands).
327 */
328 static void *
329 upload_state(struct u_upload_mgr *uploader,
330 struct iris_state_ref *ref,
331 unsigned size,
332 unsigned alignment)
333 {
334 void *p = NULL;
335 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
336 return p;
337 }
338
339 /**
340 * Stream out temporary/short-lived state.
341 *
342 * This allocates space, pins the BO, and includes the BO address in the
343 * returned offset (which works because all state lives in 32-bit memory
344 * zones).
345 */
346 static uint32_t *
347 stream_state(struct iris_batch *batch,
348 struct u_upload_mgr *uploader,
349 struct pipe_resource **out_res,
350 unsigned size,
351 unsigned alignment,
352 uint32_t *out_offset)
353 {
354 void *ptr = NULL;
355
356 u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
357
358 struct iris_bo *bo = iris_resource_bo(*out_res);
359 iris_use_pinned_bo(batch, bo, false);
360
361 *out_offset += iris_bo_offset_from_base_address(bo);
362
363 iris_record_state_size(batch->state_sizes, *out_offset, size);
364
365 return ptr;
366 }
367
368 /**
369 * stream_state() + memcpy.
370 */
371 static uint32_t
372 emit_state(struct iris_batch *batch,
373 struct u_upload_mgr *uploader,
374 struct pipe_resource **out_res,
375 const void *data,
376 unsigned size,
377 unsigned alignment)
378 {
379 unsigned offset = 0;
380 uint32_t *map =
381 stream_state(batch, uploader, out_res, size, alignment, &offset);
382
383 if (map)
384 memcpy(map, data, size);
385
386 return offset;
387 }
388
389 /**
390 * Did field 'x' change between 'old_cso' and 'new_cso'?
391 *
392 * (If so, we may want to set some dirty flags.)
393 */
394 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
395 #define cso_changed_memcmp(x) \
396 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
397
398 static void
399 flush_before_state_base_change(struct iris_batch *batch)
400 {
401 /* Flush before emitting STATE_BASE_ADDRESS.
402 *
403 * This isn't documented anywhere in the PRM. However, it seems to be
404 * necessary prior to changing the surface state base adress. We've
405 * seen issues in Vulkan where we get GPU hangs when using multi-level
406 * command buffers which clear depth, reset state base address, and then
407 * go render stuff.
408 *
409 * Normally, in GL, we would trust the kernel to do sufficient stalls
410 * and flushes prior to executing our batch. However, it doesn't seem
411 * as if the kernel's flushing is always sufficient and we don't want to
412 * rely on it.
413 *
414 * We make this an end-of-pipe sync instead of a normal flush because we
415 * do not know the current status of the GPU. On Haswell at least,
416 * having a fast-clear operation in flight at the same time as a normal
417 * rendering operation can cause hangs. Since the kernel's flushing is
418 * insufficient, we need to ensure that any rendering operations from
419 * other processes are definitely complete before we try to do our own
420 * rendering. It's a bit of a big hammer but it appears to work.
421 */
422 iris_emit_end_of_pipe_sync(batch,
423 "change STATE_BASE_ADDRESS (flushes)",
424 PIPE_CONTROL_RENDER_TARGET_FLUSH |
425 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
426 PIPE_CONTROL_DATA_CACHE_FLUSH);
427 }
428
429 static void
430 flush_after_state_base_change(struct iris_batch *batch)
431 {
432 /* After re-setting the surface state base address, we have to do some
433 * cache flusing so that the sampler engine will pick up the new
434 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
435 * Shared Function > 3D Sampler > State > State Caching (page 96):
436 *
437 * Coherency with system memory in the state cache, like the texture
438 * cache is handled partially by software. It is expected that the
439 * command stream or shader will issue Cache Flush operation or
440 * Cache_Flush sampler message to ensure that the L1 cache remains
441 * coherent with system memory.
442 *
443 * [...]
444 *
445 * Whenever the value of the Dynamic_State_Base_Addr,
446 * Surface_State_Base_Addr are altered, the L1 state cache must be
447 * invalidated to ensure the new surface or sampler state is fetched
448 * from system memory.
449 *
450 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
451 * which, according the PIPE_CONTROL instruction documentation in the
452 * Broadwell PRM:
453 *
454 * Setting this bit is independent of any other bit in this packet.
455 * This bit controls the invalidation of the L1 and L2 state caches
456 * at the top of the pipe i.e. at the parsing time.
457 *
458 * Unfortunately, experimentation seems to indicate that state cache
459 * invalidation through a PIPE_CONTROL does nothing whatsoever in
460 * regards to surface state and binding tables. In stead, it seems that
461 * invalidating the texture cache is what is actually needed.
462 *
463 * XXX: As far as we have been able to determine through
464 * experimentation, shows that flush the texture cache appears to be
465 * sufficient. The theory here is that all of the sampling/rendering
466 * units cache the binding table in the texture cache. However, we have
467 * yet to be able to actually confirm this.
468 */
469 iris_emit_end_of_pipe_sync(batch,
470 "change STATE_BASE_ADDRESS (invalidates)",
471 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
472 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
473 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
474 }
475
476 static void
477 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
478 {
479 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
480 lri.RegisterOffset = reg;
481 lri.DataDWord = val;
482 }
483 }
484 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
485
486 static void
487 _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
488 {
489 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
490 lrr.SourceRegisterAddress = src;
491 lrr.DestinationRegisterAddress = dst;
492 }
493 }
494
495 static void
496 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
497 uint32_t src)
498 {
499 _iris_emit_lrr(batch, dst, src);
500 }
501
502 static void
503 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
504 uint32_t src)
505 {
506 _iris_emit_lrr(batch, dst, src);
507 _iris_emit_lrr(batch, dst + 4, src + 4);
508 }
509
510 static void
511 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
512 uint32_t val)
513 {
514 _iris_emit_lri(batch, reg, val);
515 }
516
517 static void
518 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
519 uint64_t val)
520 {
521 _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
522 _iris_emit_lri(batch, reg + 4, val >> 32);
523 }
524
525 /**
526 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
527 */
528 static void
529 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
530 struct iris_bo *bo, uint32_t offset)
531 {
532 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
533 lrm.RegisterAddress = reg;
534 lrm.MemoryAddress = ro_bo(bo, offset);
535 }
536 }
537
538 /**
539 * Load a 64-bit value from a buffer into a MMIO register via
540 * two MI_LOAD_REGISTER_MEM commands.
541 */
542 static void
543 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
544 struct iris_bo *bo, uint32_t offset)
545 {
546 iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
547 iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
548 }
549
550 static void
551 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
552 struct iris_bo *bo, uint32_t offset,
553 bool predicated)
554 {
555 iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
556 srm.RegisterAddress = reg;
557 srm.MemoryAddress = rw_bo(bo, offset);
558 srm.PredicateEnable = predicated;
559 }
560 }
561
562 static void
563 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
564 struct iris_bo *bo, uint32_t offset,
565 bool predicated)
566 {
567 iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
568 iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
569 }
570
571 static void
572 iris_store_data_imm32(struct iris_batch *batch,
573 struct iris_bo *bo, uint32_t offset,
574 uint32_t imm)
575 {
576 iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
577 sdi.Address = rw_bo(bo, offset);
578 sdi.ImmediateData = imm;
579 }
580 }
581
582 static void
583 iris_store_data_imm64(struct iris_batch *batch,
584 struct iris_bo *bo, uint32_t offset,
585 uint64_t imm)
586 {
587 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
588 * 2 in genxml but it's actually variable length and we need 5 DWords.
589 */
590 void *map = iris_get_command_space(batch, 4 * 5);
591 _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
592 sdi.DWordLength = 5 - 2;
593 sdi.Address = rw_bo(bo, offset);
594 sdi.ImmediateData = imm;
595 }
596 }
597
598 static void
599 iris_copy_mem_mem(struct iris_batch *batch,
600 struct iris_bo *dst_bo, uint32_t dst_offset,
601 struct iris_bo *src_bo, uint32_t src_offset,
602 unsigned bytes)
603 {
604 /* MI_COPY_MEM_MEM operates on DWords. */
605 assert(bytes % 4 == 0);
606 assert(dst_offset % 4 == 0);
607 assert(src_offset % 4 == 0);
608
609 for (unsigned i = 0; i < bytes; i += 4) {
610 iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
611 cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
612 cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
613 }
614 }
615 }
616
617 static void
618 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
619 {
620 #if GEN_GEN >= 8 && GEN_GEN < 10
621 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
622 *
623 * Software must clear the COLOR_CALC_STATE Valid field in
624 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
625 * with Pipeline Select set to GPGPU.
626 *
627 * The internal hardware docs recommend the same workaround for Gen9
628 * hardware too.
629 */
630 if (pipeline == GPGPU)
631 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
632 #endif
633
634
635 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
636 * PIPELINE_SELECT [DevBWR+]":
637 *
638 * "Project: DEVSNB+
639 *
640 * Software must ensure all the write caches are flushed through a
641 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
642 * command to invalidate read only caches prior to programming
643 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
644 */
645 iris_emit_pipe_control_flush(batch,
646 "workaround: PIPELINE_SELECT flushes (1/2)",
647 PIPE_CONTROL_RENDER_TARGET_FLUSH |
648 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
649 PIPE_CONTROL_DATA_CACHE_FLUSH |
650 PIPE_CONTROL_CS_STALL);
651
652 iris_emit_pipe_control_flush(batch,
653 "workaround: PIPELINE_SELECT flushes (2/2)",
654 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
655 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
656 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
657 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
658
659 iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
660 #if GEN_GEN >= 9
661 sel.MaskBits = 3;
662 #endif
663 sel.PipelineSelection = pipeline;
664 }
665 }
666
667 UNUSED static void
668 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
669 {
670 #if GEN_GEN == 9
671 /* Project: DevGLK
672 *
673 * "This chicken bit works around a hardware issue with barrier
674 * logic encountered when switching between GPGPU and 3D pipelines.
675 * To workaround the issue, this mode bit should be set after a
676 * pipeline is selected."
677 */
678 uint32_t reg_val;
679 iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
680 reg.GLKBarrierMode = value;
681 reg.GLKBarrierModeMask = 1;
682 }
683 iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
684 #endif
685 }
686
687 static void
688 init_state_base_address(struct iris_batch *batch)
689 {
690 flush_before_state_base_change(batch);
691
692 /* We program most base addresses once at context initialization time.
693 * Each base address points at a 4GB memory zone, and never needs to
694 * change. See iris_bufmgr.h for a description of the memory zones.
695 *
696 * The one exception is Surface State Base Address, which needs to be
697 * updated occasionally. See iris_binder.c for the details there.
698 */
699 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
700 sba.GeneralStateMOCS = MOCS_WB;
701 sba.StatelessDataPortAccessMOCS = MOCS_WB;
702 sba.DynamicStateMOCS = MOCS_WB;
703 sba.IndirectObjectMOCS = MOCS_WB;
704 sba.InstructionMOCS = MOCS_WB;
705 sba.SurfaceStateMOCS = MOCS_WB;
706
707 sba.GeneralStateBaseAddressModifyEnable = true;
708 sba.DynamicStateBaseAddressModifyEnable = true;
709 sba.IndirectObjectBaseAddressModifyEnable = true;
710 sba.InstructionBaseAddressModifyEnable = true;
711 sba.GeneralStateBufferSizeModifyEnable = true;
712 sba.DynamicStateBufferSizeModifyEnable = true;
713 #if (GEN_GEN >= 9)
714 sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
715 sba.BindlessSurfaceStateMOCS = MOCS_WB;
716 #endif
717 sba.IndirectObjectBufferSizeModifyEnable = true;
718 sba.InstructionBuffersizeModifyEnable = true;
719
720 sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
721 sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
722
723 sba.GeneralStateBufferSize = 0xfffff;
724 sba.IndirectObjectBufferSize = 0xfffff;
725 sba.InstructionBufferSize = 0xfffff;
726 sba.DynamicStateBufferSize = 0xfffff;
727 }
728
729 flush_after_state_base_change(batch);
730 }
731
732 static void
733 iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg,
734 bool has_slm, bool wants_dc_cache)
735 {
736 uint32_t reg_val;
737
738 #if GEN_GEN >= 12
739 #define L3_ALLOCATION_REG GENX(L3ALLOC)
740 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
741 #else
742 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
743 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
744 #endif
745
746 iris_pack_state(L3_ALLOCATION_REG, &reg_val, reg) {
747 #if GEN_GEN < 12
748 reg.SLMEnable = has_slm;
749 #endif
750 #if GEN_GEN == 11
751 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
752 * in L3CNTLREG register. The default setting of the bit is not the
753 * desirable behavior.
754 */
755 reg.ErrorDetectionBehaviorControl = true;
756 reg.UseFullWays = true;
757 #endif
758 reg.URBAllocation = cfg->n[GEN_L3P_URB];
759 reg.ROAllocation = cfg->n[GEN_L3P_RO];
760 reg.DCAllocation = cfg->n[GEN_L3P_DC];
761 reg.AllAllocation = cfg->n[GEN_L3P_ALL];
762 }
763 _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val);
764 }
765
766 static void
767 iris_emit_default_l3_config(struct iris_batch *batch,
768 const struct gen_device_info *devinfo,
769 bool compute)
770 {
771 bool wants_dc_cache = true;
772 bool has_slm = compute;
773 const struct gen_l3_weights w =
774 gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
775 const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
776 iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache);
777 }
778
779 #if GEN_GEN == 9 || GEN_GEN == 10
780 static void
781 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
782 {
783 uint32_t reg_val;
784
785 /* A fixed function pipe flush is required before modifying this field */
786 iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
787 : "disable preemption",
788 PIPE_CONTROL_RENDER_TARGET_FLUSH);
789
790 /* enable object level preemption */
791 iris_pack_state(GENX(CS_CHICKEN1), &reg_val, reg) {
792 reg.ReplayMode = enable;
793 reg.ReplayModeMask = true;
794 }
795 iris_emit_lri(batch, CS_CHICKEN1, reg_val);
796 }
797 #endif
798
799 #if GEN_GEN == 11
800 static void
801 iris_upload_slice_hashing_state(struct iris_batch *batch)
802 {
803 const struct gen_device_info *devinfo = &batch->screen->devinfo;
804 int subslices_delta =
805 devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1];
806 if (subslices_delta == 0)
807 return;
808
809 struct iris_context *ice = NULL;
810 ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
811 assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
812
813 unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
814 uint32_t hash_address;
815 struct pipe_resource *tmp = NULL;
816 uint32_t *map =
817 stream_state(batch, ice->state.dynamic_uploader, &tmp,
818 size, 64, &hash_address);
819 pipe_resource_reference(&tmp, NULL);
820
821 struct GENX(SLICE_HASH_TABLE) table0 = {
822 .Entry = {
823 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
824 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
825 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
826 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
827 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
828 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
829 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
830 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
831 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
832 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
833 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
834 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
835 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
836 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
837 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
838 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
839 }
840 };
841
842 struct GENX(SLICE_HASH_TABLE) table1 = {
843 .Entry = {
844 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
845 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
846 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
847 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
848 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
849 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
850 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
851 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
852 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
853 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
854 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
855 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
856 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
857 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
858 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
859 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
860 }
861 };
862
863 const struct GENX(SLICE_HASH_TABLE) *table =
864 subslices_delta < 0 ? &table0 : &table1;
865 GENX(SLICE_HASH_TABLE_pack)(NULL, map, table);
866
867 iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
868 ptr.SliceHashStatePointerValid = true;
869 ptr.SliceHashTableStatePointer = hash_address;
870 }
871
872 iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
873 mode.SliceHashingTableEnable = true;
874 }
875 }
876 #endif
877
878 static void
879 iris_alloc_push_constants(struct iris_batch *batch)
880 {
881 /* For now, we set a static partitioning of the push constant area,
882 * assuming that all stages could be in use.
883 *
884 * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
885 * see if that improves performance by offering more space to
886 * the VS/FS when those aren't in use. Also, try dynamically
887 * enabling/disabling it like i965 does. This would be more
888 * stalls and may not actually help; we don't know yet.
889 */
890 for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
891 iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
892 alloc._3DCommandSubOpcode = 18 + i;
893 alloc.ConstantBufferOffset = 6 * i;
894 alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
895 }
896 }
897 }
898
899 /**
900 * Upload the initial GPU state for a render context.
901 *
902 * This sets some invariant state that needs to be programmed a particular
903 * way, but we never actually change.
904 */
905 static void
906 iris_init_render_context(struct iris_batch *batch)
907 {
908 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
909 uint32_t reg_val;
910
911 emit_pipeline_select(batch, _3D);
912
913 iris_emit_default_l3_config(batch, devinfo, false);
914
915 init_state_base_address(batch);
916
917 #if GEN_GEN >= 9
918 iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
919 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
920 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
921 }
922 iris_emit_lri(batch, CS_DEBUG_MODE2, reg_val);
923 #else
924 iris_pack_state(GENX(INSTPM), &reg_val, reg) {
925 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
926 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
927 }
928 iris_emit_lri(batch, INSTPM, reg_val);
929 #endif
930
931 #if GEN_GEN == 9
932 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
933 reg.FloatBlendOptimizationEnable = true;
934 reg.FloatBlendOptimizationEnableMask = true;
935 reg.PartialResolveDisableInVC = true;
936 reg.PartialResolveDisableInVCMask = true;
937 }
938 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
939
940 if (devinfo->is_geminilake)
941 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
942 #endif
943
944 #if GEN_GEN == 11
945 iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
946 reg.HeaderlessMessageforPreemptableContexts = 1;
947 reg.HeaderlessMessageforPreemptableContextsMask = 1;
948 }
949 iris_emit_lri(batch, SAMPLER_MODE, reg_val);
950
951 /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
952 iris_pack_state(GENX(HALF_SLICE_CHICKEN7), &reg_val, reg) {
953 reg.EnabledTexelOffsetPrecisionFix = 1;
954 reg.EnabledTexelOffsetPrecisionFixMask = 1;
955 }
956 iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
957
958 /* Hardware specification recommends disabling repacking for the
959 * compatibility with decompression mechanism in display controller.
960 */
961 if (devinfo->disable_ccs_repack) {
962 iris_pack_state(GENX(CACHE_MODE_0), &reg_val, reg) {
963 reg.DisableRepackingforCompression = true;
964 reg.DisableRepackingforCompressionMask = true;
965 }
966 iris_emit_lri(batch, CACHE_MODE_0, reg_val);
967 }
968
969 iris_upload_slice_hashing_state(batch);
970 #endif
971
972 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
973 * changing it dynamically. We set it to the maximum size here, and
974 * instead include the render target dimensions in the viewport, so
975 * viewport extents clipping takes care of pruning stray geometry.
976 */
977 iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
978 rect.ClippedDrawingRectangleXMax = UINT16_MAX;
979 rect.ClippedDrawingRectangleYMax = UINT16_MAX;
980 }
981
982 /* Set the initial MSAA sample positions. */
983 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
984 GEN_SAMPLE_POS_1X(pat._1xSample);
985 GEN_SAMPLE_POS_2X(pat._2xSample);
986 GEN_SAMPLE_POS_4X(pat._4xSample);
987 GEN_SAMPLE_POS_8X(pat._8xSample);
988 #if GEN_GEN >= 9
989 GEN_SAMPLE_POS_16X(pat._16xSample);
990 #endif
991 }
992
993 /* Use the legacy AA line coverage computation. */
994 iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
995
996 /* Disable chromakeying (it's for media) */
997 iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
998
999 /* We want regular rendering, not special HiZ operations. */
1000 iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
1001
1002 /* No polygon stippling offsets are necessary. */
1003 /* TODO: may need to set an offset for origin-UL framebuffers */
1004 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1005
1006 iris_alloc_push_constants(batch);
1007
1008 #if GEN_GEN == 10
1009 /* Gen11+ is enabled for us by the kernel. */
1010 iris_enable_obj_preemption(batch, true);
1011 #endif
1012 }
1013
1014 static void
1015 iris_init_compute_context(struct iris_batch *batch)
1016 {
1017 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
1018
1019 emit_pipeline_select(batch, GPGPU);
1020
1021 iris_emit_default_l3_config(batch, devinfo, true);
1022
1023 init_state_base_address(batch);
1024
1025 #if GEN_GEN == 9
1026 if (devinfo->is_geminilake)
1027 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1028 #endif
1029 }
1030
1031 struct iris_vertex_buffer_state {
1032 /** The VERTEX_BUFFER_STATE hardware structure. */
1033 uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1034
1035 /** The resource to source vertex data from. */
1036 struct pipe_resource *resource;
1037
1038 int offset;
1039 };
1040
1041 struct iris_depth_buffer_state {
1042 /* Depth/HiZ/Stencil related hardware packets. */
1043 uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1044 GENX(3DSTATE_STENCIL_BUFFER_length) +
1045 GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1046 GENX(3DSTATE_CLEAR_PARAMS_length)];
1047 };
1048
1049 /**
1050 * Generation-specific context state (ice->state.genx->...).
1051 *
1052 * Most state can go in iris_context directly, but these encode hardware
1053 * packets which vary by generation.
1054 */
1055 struct iris_genx_state {
1056 struct iris_vertex_buffer_state vertex_buffers[33];
1057 uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1058
1059 struct iris_depth_buffer_state depth_buffer;
1060
1061 uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1062
1063 #if GEN_GEN == 8
1064 bool pma_fix_enabled;
1065 #endif
1066
1067 #if GEN_GEN == 9
1068 /* Is object level preemption enabled? */
1069 bool object_preemption;
1070 #endif
1071
1072 struct {
1073 #if GEN_GEN == 8
1074 struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1075 #endif
1076 } shaders[MESA_SHADER_STAGES];
1077 };
1078
1079 /**
1080 * The pipe->set_blend_color() driver hook.
1081 *
1082 * This corresponds to our COLOR_CALC_STATE.
1083 */
1084 static void
1085 iris_set_blend_color(struct pipe_context *ctx,
1086 const struct pipe_blend_color *state)
1087 {
1088 struct iris_context *ice = (struct iris_context *) ctx;
1089
1090 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1091 memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1092 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1093 }
1094
1095 /**
1096 * Gallium CSO for blend state (see pipe_blend_state).
1097 */
1098 struct iris_blend_state {
1099 /** Partial 3DSTATE_PS_BLEND */
1100 uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1101
1102 /** Partial BLEND_STATE */
1103 uint32_t blend_state[GENX(BLEND_STATE_length) +
1104 BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1105
1106 bool alpha_to_coverage; /* for shader key */
1107
1108 /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1109 uint8_t blend_enables;
1110
1111 /** Bitfield of whether color writes are enabled for RT[i] */
1112 uint8_t color_write_enables;
1113
1114 /** Does RT[0] use dual color blending? */
1115 bool dual_color_blending;
1116 };
1117
1118 static enum pipe_blendfactor
1119 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1120 {
1121 if (alpha_to_one) {
1122 if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1123 return PIPE_BLENDFACTOR_ONE;
1124
1125 if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1126 return PIPE_BLENDFACTOR_ZERO;
1127 }
1128
1129 return f;
1130 }
1131
1132 /**
1133 * The pipe->create_blend_state() driver hook.
1134 *
1135 * Translates a pipe_blend_state into iris_blend_state.
1136 */
1137 static void *
1138 iris_create_blend_state(struct pipe_context *ctx,
1139 const struct pipe_blend_state *state)
1140 {
1141 struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1142 uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1143
1144 cso->blend_enables = 0;
1145 cso->color_write_enables = 0;
1146 STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1147
1148 cso->alpha_to_coverage = state->alpha_to_coverage;
1149
1150 bool indep_alpha_blend = false;
1151
1152 for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1153 const struct pipe_rt_blend_state *rt =
1154 &state->rt[state->independent_blend_enable ? i : 0];
1155
1156 enum pipe_blendfactor src_rgb =
1157 fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1158 enum pipe_blendfactor src_alpha =
1159 fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1160 enum pipe_blendfactor dst_rgb =
1161 fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1162 enum pipe_blendfactor dst_alpha =
1163 fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1164
1165 if (rt->rgb_func != rt->alpha_func ||
1166 src_rgb != src_alpha || dst_rgb != dst_alpha)
1167 indep_alpha_blend = true;
1168
1169 if (rt->blend_enable)
1170 cso->blend_enables |= 1u << i;
1171
1172 if (rt->colormask)
1173 cso->color_write_enables |= 1u << i;
1174
1175 iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1176 be.LogicOpEnable = state->logicop_enable;
1177 be.LogicOpFunction = state->logicop_func;
1178
1179 be.PreBlendSourceOnlyClampEnable = false;
1180 be.ColorClampRange = COLORCLAMP_RTFORMAT;
1181 be.PreBlendColorClampEnable = true;
1182 be.PostBlendColorClampEnable = true;
1183
1184 be.ColorBufferBlendEnable = rt->blend_enable;
1185
1186 be.ColorBlendFunction = rt->rgb_func;
1187 be.AlphaBlendFunction = rt->alpha_func;
1188 be.SourceBlendFactor = src_rgb;
1189 be.SourceAlphaBlendFactor = src_alpha;
1190 be.DestinationBlendFactor = dst_rgb;
1191 be.DestinationAlphaBlendFactor = dst_alpha;
1192
1193 be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
1194 be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1195 be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
1196 be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1197 }
1198 blend_entry += GENX(BLEND_STATE_ENTRY_length);
1199 }
1200
1201 iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1202 /* pb.HasWriteableRT is filled in at draw time.
1203 * pb.AlphaTestEnable is filled in at draw time.
1204 *
1205 * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1206 * setting it when dual color blending without an appropriate shader.
1207 */
1208
1209 pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1210 pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1211
1212 pb.SourceBlendFactor =
1213 fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1214 pb.SourceAlphaBlendFactor =
1215 fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1216 pb.DestinationBlendFactor =
1217 fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1218 pb.DestinationAlphaBlendFactor =
1219 fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1220 }
1221
1222 iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1223 bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1224 bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1225 bs.AlphaToOneEnable = state->alpha_to_one;
1226 bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1227 bs.ColorDitherEnable = state->dither;
1228 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1229 }
1230
1231 cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1232
1233 return cso;
1234 }
1235
1236 /**
1237 * The pipe->bind_blend_state() driver hook.
1238 *
1239 * Bind a blending CSO and flag related dirty bits.
1240 */
1241 static void
1242 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1243 {
1244 struct iris_context *ice = (struct iris_context *) ctx;
1245 struct iris_blend_state *cso = state;
1246
1247 ice->state.cso_blend = cso;
1248 ice->state.blend_enables = cso ? cso->blend_enables : 0;
1249
1250 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1251 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1252 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1253 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
1254
1255 if (GEN_GEN == 8)
1256 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1257 }
1258
1259 /**
1260 * Return true if the FS writes to any color outputs which are not disabled
1261 * via color masking.
1262 */
1263 static bool
1264 has_writeable_rt(const struct iris_blend_state *cso_blend,
1265 const struct shader_info *fs_info)
1266 {
1267 if (!fs_info)
1268 return false;
1269
1270 unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1271
1272 if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1273 rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1274
1275 return cso_blend->color_write_enables & rt_outputs;
1276 }
1277
1278 /**
1279 * Gallium CSO for depth, stencil, and alpha testing state.
1280 */
1281 struct iris_depth_stencil_alpha_state {
1282 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1283 uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1284
1285 #if GEN_GEN >= 12
1286 uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1287 #endif
1288
1289 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1290 struct pipe_alpha_state alpha;
1291
1292 /** Outbound to resolve and cache set tracking. */
1293 bool depth_writes_enabled;
1294 bool stencil_writes_enabled;
1295
1296 /** Outbound to Gen8-9 PMA stall equations */
1297 bool depth_test_enabled;
1298 };
1299
1300 /**
1301 * The pipe->create_depth_stencil_alpha_state() driver hook.
1302 *
1303 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1304 * testing state since we need pieces of it in a variety of places.
1305 */
1306 static void *
1307 iris_create_zsa_state(struct pipe_context *ctx,
1308 const struct pipe_depth_stencil_alpha_state *state)
1309 {
1310 struct iris_depth_stencil_alpha_state *cso =
1311 malloc(sizeof(struct iris_depth_stencil_alpha_state));
1312
1313 bool two_sided_stencil = state->stencil[1].enabled;
1314
1315 cso->alpha = state->alpha;
1316 cso->depth_writes_enabled = state->depth.writemask;
1317 cso->depth_test_enabled = state->depth.enabled;
1318 cso->stencil_writes_enabled =
1319 state->stencil[0].writemask != 0 ||
1320 (two_sided_stencil && state->stencil[1].writemask != 0);
1321
1322 /* The state tracker needs to optimize away EQUAL writes for us. */
1323 assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
1324
1325 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1326 wmds.StencilFailOp = state->stencil[0].fail_op;
1327 wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1328 wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1329 wmds.StencilTestFunction =
1330 translate_compare_func(state->stencil[0].func);
1331 wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1332 wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1333 wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1334 wmds.BackfaceStencilTestFunction =
1335 translate_compare_func(state->stencil[1].func);
1336 wmds.DepthTestFunction = translate_compare_func(state->depth.func);
1337 wmds.DoubleSidedStencilEnable = two_sided_stencil;
1338 wmds.StencilTestEnable = state->stencil[0].enabled;
1339 wmds.StencilBufferWriteEnable =
1340 state->stencil[0].writemask != 0 ||
1341 (two_sided_stencil && state->stencil[1].writemask != 0);
1342 wmds.DepthTestEnable = state->depth.enabled;
1343 wmds.DepthBufferWriteEnable = state->depth.writemask;
1344 wmds.StencilTestMask = state->stencil[0].valuemask;
1345 wmds.StencilWriteMask = state->stencil[0].writemask;
1346 wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1347 wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1348 /* wmds.[Backface]StencilReferenceValue are merged later */
1349 }
1350
1351 #if GEN_GEN >= 12
1352 iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1353 depth_bounds.DepthBoundsTestValueModifyDisable = false;
1354 depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1355 depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test;
1356 depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min;
1357 depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max;
1358 }
1359 #endif
1360
1361 return cso;
1362 }
1363
1364 /**
1365 * The pipe->bind_depth_stencil_alpha_state() driver hook.
1366 *
1367 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1368 */
1369 static void
1370 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1371 {
1372 struct iris_context *ice = (struct iris_context *) ctx;
1373 struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1374 struct iris_depth_stencil_alpha_state *new_cso = state;
1375
1376 if (new_cso) {
1377 if (cso_changed(alpha.ref_value))
1378 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1379
1380 if (cso_changed(alpha.enabled))
1381 ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1382
1383 if (cso_changed(alpha.func))
1384 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1385
1386 if (cso_changed(depth_writes_enabled))
1387 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1388
1389 ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1390 ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1391
1392 #if GEN_GEN >= 12
1393 if (cso_changed(depth_bounds))
1394 ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1395 #endif
1396 }
1397
1398 ice->state.cso_zsa = new_cso;
1399 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1400 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1401 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1402
1403 if (GEN_GEN == 8)
1404 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1405 }
1406
1407 #if GEN_GEN == 8
1408 static bool
1409 want_pma_fix(struct iris_context *ice)
1410 {
1411 UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1412 UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
1413 const struct brw_wm_prog_data *wm_prog_data = (void *)
1414 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1415 const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1416 const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1417 const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1418
1419 /* In very specific combinations of state, we can instruct Gen8-9 hardware
1420 * to avoid stalling at the pixel mask array. The state equations are
1421 * documented in these places:
1422 *
1423 * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
1424 * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1425 *
1426 * Both equations share some common elements:
1427 *
1428 * no_hiz_op =
1429 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1430 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1431 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1432 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1433 *
1434 * killpixels =
1435 * 3DSTATE_WM::ForceKillPix != ForceOff &&
1436 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1437 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1438 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1439 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1440 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1441 *
1442 * (Technically the stencil PMA treats ForceKillPix differently,
1443 * but I think this is a documentation oversight, and we don't
1444 * ever use it in this way, so it doesn't matter).
1445 *
1446 * common_pma_fix =
1447 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
1448 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1449 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1450 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1451 * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1452 * 3DSTATE_PS_EXTRA::PixelShaderValid &&
1453 * no_hiz_op
1454 *
1455 * These are always true:
1456 *
1457 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1458 * 3DSTATE_PS_EXTRA::PixelShaderValid
1459 *
1460 * Also, we never use the normal drawing path for HiZ ops; these are true:
1461 *
1462 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1463 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1464 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1465 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
1466 *
1467 * This happens sometimes:
1468 *
1469 * 3DSTATE_WM::ForceThreadDispatch != 1
1470 *
1471 * However, we choose to ignore it as it either agrees with the signal
1472 * (dispatch was already enabled, so nothing out of the ordinary), or
1473 * there are no framebuffer attachments (so no depth or HiZ anyway,
1474 * meaning the PMA signal will already be disabled).
1475 */
1476
1477 if (!cso_fb->zsbuf)
1478 return false;
1479
1480 struct iris_resource *zres, *sres;
1481 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1482
1483 /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1484 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1485 */
1486 if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1487 return false;
1488
1489 /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1490 if (wm_prog_data->early_fragment_tests)
1491 return false;
1492
1493 /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1494 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1495 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1496 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1497 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1498 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1499 */
1500 bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1501 cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled;
1502
1503 /* The Gen8 depth PMA equation becomes:
1504 *
1505 * depth_writes =
1506 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1507 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1508 *
1509 * stencil_writes =
1510 * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1511 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1512 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1513 *
1514 * Z_PMA_OPT =
1515 * common_pma_fix &&
1516 * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1517 * ((killpixels && (depth_writes || stencil_writes)) ||
1518 * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1519 *
1520 */
1521 if (!cso_zsa->depth_test_enabled)
1522 return false;
1523
1524 return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1525 (killpixels && (cso_zsa->depth_writes_enabled ||
1526 (sres && cso_zsa->stencil_writes_enabled)));
1527 }
1528 #endif
1529
1530 void
1531 genX(update_pma_fix)(struct iris_context *ice,
1532 struct iris_batch *batch,
1533 bool enable)
1534 {
1535 #if GEN_GEN == 8
1536 struct iris_genx_state *genx = ice->state.genx;
1537
1538 if (genx->pma_fix_enabled == enable)
1539 return;
1540
1541 genx->pma_fix_enabled = enable;
1542
1543 /* According to the Broadwell PIPE_CONTROL documentation, software should
1544 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1545 * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
1546 *
1547 * The Gen9 docs say to use a depth stall rather than a command streamer
1548 * stall. However, the hardware seems to violently disagree. A full
1549 * command streamer stall seems to be needed in both cases.
1550 */
1551 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1552 PIPE_CONTROL_CS_STALL |
1553 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1554 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1555
1556 uint32_t reg_val;
1557 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
1558 reg.NPPMAFixEnable = enable;
1559 reg.NPEarlyZFailsDisable = enable;
1560 reg.NPPMAFixEnableMask = true;
1561 reg.NPEarlyZFailsDisableMask = true;
1562 }
1563 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
1564
1565 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1566 * Flush bits is often necessary. We do it regardless because it's easier.
1567 * The render cache flush is also necessary if stencil writes are enabled.
1568 *
1569 * Again, the Gen9 docs give a different set of flushes but the Broadwell
1570 * flushes seem to work just as well.
1571 */
1572 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1573 PIPE_CONTROL_DEPTH_STALL |
1574 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1575 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1576 #endif
1577 }
1578
1579 /**
1580 * Gallium CSO for rasterizer state.
1581 */
1582 struct iris_rasterizer_state {
1583 uint32_t sf[GENX(3DSTATE_SF_length)];
1584 uint32_t clip[GENX(3DSTATE_CLIP_length)];
1585 uint32_t raster[GENX(3DSTATE_RASTER_length)];
1586 uint32_t wm[GENX(3DSTATE_WM_length)];
1587 uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1588
1589 uint8_t num_clip_plane_consts;
1590 bool clip_halfz; /* for CC_VIEWPORT */
1591 bool depth_clip_near; /* for CC_VIEWPORT */
1592 bool depth_clip_far; /* for CC_VIEWPORT */
1593 bool flatshade; /* for shader state */
1594 bool flatshade_first; /* for stream output */
1595 bool clamp_fragment_color; /* for shader state */
1596 bool light_twoside; /* for shader state */
1597 bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1598 bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1599 bool line_stipple_enable;
1600 bool poly_stipple_enable;
1601 bool multisample;
1602 bool force_persample_interp;
1603 bool conservative_rasterization;
1604 bool fill_mode_point_or_line;
1605 enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1606 uint16_t sprite_coord_enable;
1607 };
1608
1609 static float
1610 get_line_width(const struct pipe_rasterizer_state *state)
1611 {
1612 float line_width = state->line_width;
1613
1614 /* From the OpenGL 4.4 spec:
1615 *
1616 * "The actual width of non-antialiased lines is determined by rounding
1617 * the supplied width to the nearest integer, then clamping it to the
1618 * implementation-dependent maximum non-antialiased line width."
1619 */
1620 if (!state->multisample && !state->line_smooth)
1621 line_width = roundf(state->line_width);
1622
1623 if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1624 /* For 1 pixel line thickness or less, the general anti-aliasing
1625 * algorithm gives up, and a garbage line is generated. Setting a
1626 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1627 * (one-pixel-wide), non-antialiased lines.
1628 *
1629 * Lines rendered with zero Line Width are rasterized using the
1630 * "Grid Intersection Quantization" rules as specified by the
1631 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1632 */
1633 line_width = 0.0f;
1634 }
1635
1636 return line_width;
1637 }
1638
1639 /**
1640 * The pipe->create_rasterizer_state() driver hook.
1641 */
1642 static void *
1643 iris_create_rasterizer_state(struct pipe_context *ctx,
1644 const struct pipe_rasterizer_state *state)
1645 {
1646 struct iris_rasterizer_state *cso =
1647 malloc(sizeof(struct iris_rasterizer_state));
1648
1649 cso->multisample = state->multisample;
1650 cso->force_persample_interp = state->force_persample_interp;
1651 cso->clip_halfz = state->clip_halfz;
1652 cso->depth_clip_near = state->depth_clip_near;
1653 cso->depth_clip_far = state->depth_clip_far;
1654 cso->flatshade = state->flatshade;
1655 cso->flatshade_first = state->flatshade_first;
1656 cso->clamp_fragment_color = state->clamp_fragment_color;
1657 cso->light_twoside = state->light_twoside;
1658 cso->rasterizer_discard = state->rasterizer_discard;
1659 cso->half_pixel_center = state->half_pixel_center;
1660 cso->sprite_coord_mode = state->sprite_coord_mode;
1661 cso->sprite_coord_enable = state->sprite_coord_enable;
1662 cso->line_stipple_enable = state->line_stipple_enable;
1663 cso->poly_stipple_enable = state->poly_stipple_enable;
1664 cso->conservative_rasterization =
1665 state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1666
1667 cso->fill_mode_point_or_line =
1668 state->fill_front == PIPE_POLYGON_MODE_LINE ||
1669 state->fill_front == PIPE_POLYGON_MODE_POINT ||
1670 state->fill_back == PIPE_POLYGON_MODE_LINE ||
1671 state->fill_back == PIPE_POLYGON_MODE_POINT;
1672
1673 if (state->clip_plane_enable != 0)
1674 cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1675 else
1676 cso->num_clip_plane_consts = 0;
1677
1678 float line_width = get_line_width(state);
1679
1680 iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1681 sf.StatisticsEnable = true;
1682 sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1683 sf.LineEndCapAntialiasingRegionWidth =
1684 state->line_smooth ? _10pixels : _05pixels;
1685 sf.LastPixelEnable = state->line_last_pixel;
1686 sf.LineWidth = line_width;
1687 sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1688 !state->point_quad_rasterization;
1689 sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1690 sf.PointWidth = state->point_size;
1691
1692 if (state->flatshade_first) {
1693 sf.TriangleFanProvokingVertexSelect = 1;
1694 } else {
1695 sf.TriangleStripListProvokingVertexSelect = 2;
1696 sf.TriangleFanProvokingVertexSelect = 2;
1697 sf.LineStripListProvokingVertexSelect = 1;
1698 }
1699 }
1700
1701 iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1702 rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1703 rr.CullMode = translate_cull_mode(state->cull_face);
1704 rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1705 rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1706 rr.DXMultisampleRasterizationEnable = state->multisample;
1707 rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1708 rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1709 rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1710 rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1711 rr.GlobalDepthOffsetScale = state->offset_scale;
1712 rr.GlobalDepthOffsetClamp = state->offset_clamp;
1713 rr.SmoothPointEnable = state->point_smooth;
1714 rr.AntialiasingEnable = state->line_smooth;
1715 rr.ScissorRectangleEnable = state->scissor;
1716 #if GEN_GEN >= 9
1717 rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1718 rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1719 rr.ConservativeRasterizationEnable =
1720 cso->conservative_rasterization;
1721 #else
1722 rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1723 #endif
1724 }
1725
1726 iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1727 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1728 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1729 */
1730 cl.EarlyCullEnable = true;
1731 cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1732 cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1733 cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1734 cl.GuardbandClipTestEnable = true;
1735 cl.ClipEnable = true;
1736 cl.MinimumPointWidth = 0.125;
1737 cl.MaximumPointWidth = 255.875;
1738
1739 if (state->flatshade_first) {
1740 cl.TriangleFanProvokingVertexSelect = 1;
1741 } else {
1742 cl.TriangleStripListProvokingVertexSelect = 2;
1743 cl.TriangleFanProvokingVertexSelect = 2;
1744 cl.LineStripListProvokingVertexSelect = 1;
1745 }
1746 }
1747
1748 iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1749 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1750 * filled in at draw time from the FS program.
1751 */
1752 wm.LineAntialiasingRegionWidth = _10pixels;
1753 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1754 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1755 wm.LineStippleEnable = state->line_stipple_enable;
1756 wm.PolygonStippleEnable = state->poly_stipple_enable;
1757 }
1758
1759 /* Remap from 0..255 back to 1..256 */
1760 const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1761
1762 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1763 if (state->line_stipple_enable) {
1764 line.LineStipplePattern = state->line_stipple_pattern;
1765 line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1766 line.LineStippleRepeatCount = line_stipple_factor;
1767 }
1768 }
1769
1770 return cso;
1771 }
1772
1773 /**
1774 * The pipe->bind_rasterizer_state() driver hook.
1775 *
1776 * Bind a rasterizer CSO and flag related dirty bits.
1777 */
1778 static void
1779 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1780 {
1781 struct iris_context *ice = (struct iris_context *) ctx;
1782 struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1783 struct iris_rasterizer_state *new_cso = state;
1784
1785 if (new_cso) {
1786 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1787 if (cso_changed_memcmp(line_stipple))
1788 ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1789
1790 if (cso_changed(half_pixel_center))
1791 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1792
1793 if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1794 ice->state.dirty |= IRIS_DIRTY_WM;
1795
1796 if (cso_changed(rasterizer_discard))
1797 ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1798
1799 if (cso_changed(flatshade_first))
1800 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1801
1802 if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1803 cso_changed(clip_halfz))
1804 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1805
1806 if (cso_changed(sprite_coord_enable) ||
1807 cso_changed(sprite_coord_mode) ||
1808 cso_changed(light_twoside))
1809 ice->state.dirty |= IRIS_DIRTY_SBE;
1810
1811 if (cso_changed(conservative_rasterization))
1812 ice->state.dirty |= IRIS_DIRTY_FS;
1813 }
1814
1815 ice->state.cso_rast = new_cso;
1816 ice->state.dirty |= IRIS_DIRTY_RASTER;
1817 ice->state.dirty |= IRIS_DIRTY_CLIP;
1818 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_RASTERIZER];
1819 }
1820
1821 /**
1822 * Return true if the given wrap mode requires the border color to exist.
1823 *
1824 * (We can skip uploading it if the sampler isn't going to use it.)
1825 */
1826 static bool
1827 wrap_mode_needs_border_color(unsigned wrap_mode)
1828 {
1829 return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1830 }
1831
1832 /**
1833 * Gallium CSO for sampler state.
1834 */
1835 struct iris_sampler_state {
1836 union pipe_color_union border_color;
1837 bool needs_border_color;
1838
1839 uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1840 };
1841
1842 /**
1843 * The pipe->create_sampler_state() driver hook.
1844 *
1845 * We fill out SAMPLER_STATE (except for the border color pointer), and
1846 * store that on the CPU. It doesn't make sense to upload it to a GPU
1847 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1848 * all bound sampler states to be in contiguous memor.
1849 */
1850 static void *
1851 iris_create_sampler_state(struct pipe_context *ctx,
1852 const struct pipe_sampler_state *state)
1853 {
1854 struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1855
1856 if (!cso)
1857 return NULL;
1858
1859 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1860 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1861
1862 unsigned wrap_s = translate_wrap(state->wrap_s);
1863 unsigned wrap_t = translate_wrap(state->wrap_t);
1864 unsigned wrap_r = translate_wrap(state->wrap_r);
1865
1866 memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1867
1868 cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
1869 wrap_mode_needs_border_color(wrap_t) ||
1870 wrap_mode_needs_border_color(wrap_r);
1871
1872 float min_lod = state->min_lod;
1873 unsigned mag_img_filter = state->mag_img_filter;
1874
1875 // XXX: explain this code ported from ilo...I don't get it at all...
1876 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
1877 state->min_lod > 0.0f) {
1878 min_lod = 0.0f;
1879 mag_img_filter = state->min_img_filter;
1880 }
1881
1882 iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
1883 samp.TCXAddressControlMode = wrap_s;
1884 samp.TCYAddressControlMode = wrap_t;
1885 samp.TCZAddressControlMode = wrap_r;
1886 samp.CubeSurfaceControlMode = state->seamless_cube_map;
1887 samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
1888 samp.MinModeFilter = state->min_img_filter;
1889 samp.MagModeFilter = mag_img_filter;
1890 samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
1891 samp.MaximumAnisotropy = RATIO21;
1892
1893 if (state->max_anisotropy >= 2) {
1894 if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
1895 samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
1896 samp.AnisotropicAlgorithm = EWAApproximation;
1897 }
1898
1899 if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
1900 samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
1901
1902 samp.MaximumAnisotropy =
1903 MIN2((state->max_anisotropy - 2) / 2, RATIO161);
1904 }
1905
1906 /* Set address rounding bits if not using nearest filtering. */
1907 if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
1908 samp.UAddressMinFilterRoundingEnable = true;
1909 samp.VAddressMinFilterRoundingEnable = true;
1910 samp.RAddressMinFilterRoundingEnable = true;
1911 }
1912
1913 if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
1914 samp.UAddressMagFilterRoundingEnable = true;
1915 samp.VAddressMagFilterRoundingEnable = true;
1916 samp.RAddressMagFilterRoundingEnable = true;
1917 }
1918
1919 if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
1920 samp.ShadowFunction = translate_shadow_func(state->compare_func);
1921
1922 const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
1923
1924 samp.LODPreClampMode = CLAMP_MODE_OGL;
1925 samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
1926 samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
1927 samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
1928
1929 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1930 }
1931
1932 return cso;
1933 }
1934
1935 /**
1936 * The pipe->bind_sampler_states() driver hook.
1937 */
1938 static void
1939 iris_bind_sampler_states(struct pipe_context *ctx,
1940 enum pipe_shader_type p_stage,
1941 unsigned start, unsigned count,
1942 void **states)
1943 {
1944 struct iris_context *ice = (struct iris_context *) ctx;
1945 gl_shader_stage stage = stage_from_pipe(p_stage);
1946 struct iris_shader_state *shs = &ice->state.shaders[stage];
1947
1948 assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
1949
1950 bool dirty = false;
1951
1952 for (int i = 0; i < count; i++) {
1953 if (shs->samplers[start + i] != states[i]) {
1954 shs->samplers[start + i] = states[i];
1955 dirty = true;
1956 }
1957 }
1958
1959 if (dirty)
1960 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
1961 }
1962
1963 /**
1964 * Upload the sampler states into a contiguous area of GPU memory, for
1965 * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
1966 *
1967 * Also fill out the border color state pointers.
1968 */
1969 static void
1970 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
1971 {
1972 struct iris_shader_state *shs = &ice->state.shaders[stage];
1973 const struct shader_info *info = iris_get_shader_info(ice, stage);
1974
1975 /* We assume the state tracker will call pipe->bind_sampler_states()
1976 * if the program's number of textures changes.
1977 */
1978 unsigned count = info ? util_last_bit(info->textures_used) : 0;
1979
1980 if (!count)
1981 return;
1982
1983 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
1984 * in the dynamic state memory zone, so we can point to it via the
1985 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
1986 */
1987 unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
1988 uint32_t *map =
1989 upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
1990 if (unlikely(!map))
1991 return;
1992
1993 struct pipe_resource *res = shs->sampler_table.res;
1994 shs->sampler_table.offset +=
1995 iris_bo_offset_from_base_address(iris_resource_bo(res));
1996
1997 iris_record_state_size(ice->state.sizes, shs->sampler_table.offset, size);
1998
1999 /* Make sure all land in the same BO */
2000 iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
2001
2002 ice->state.need_border_colors &= ~(1 << stage);
2003
2004 for (int i = 0; i < count; i++) {
2005 struct iris_sampler_state *state = shs->samplers[i];
2006 struct iris_sampler_view *tex = shs->textures[i];
2007
2008 if (!state) {
2009 memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2010 } else if (!state->needs_border_color) {
2011 memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
2012 } else {
2013 ice->state.need_border_colors |= 1 << stage;
2014
2015 /* We may need to swizzle the border color for format faking.
2016 * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2017 * This means we need to move the border color's A channel into
2018 * the R or G channels so that those read swizzles will move it
2019 * back into A.
2020 */
2021 union pipe_color_union *color = &state->border_color;
2022 union pipe_color_union tmp;
2023 if (tex) {
2024 enum pipe_format internal_format = tex->res->internal_format;
2025
2026 if (util_format_is_alpha(internal_format)) {
2027 unsigned char swz[4] = {
2028 PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2029 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2030 };
2031 util_format_apply_color_swizzle(&tmp, color, swz, true);
2032 color = &tmp;
2033 } else if (util_format_is_luminance_alpha(internal_format) &&
2034 internal_format != PIPE_FORMAT_L8A8_SRGB) {
2035 unsigned char swz[4] = {
2036 PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2037 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2038 };
2039 util_format_apply_color_swizzle(&tmp, color, swz, true);
2040 color = &tmp;
2041 }
2042 }
2043
2044 /* Stream out the border color and merge the pointer. */
2045 uint32_t offset = iris_upload_border_color(ice, color);
2046
2047 uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2048 iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2049 dyns.BorderColorPointer = offset;
2050 }
2051
2052 for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2053 map[j] = state->sampler_state[j] | dynamic[j];
2054 }
2055
2056 map += GENX(SAMPLER_STATE_length);
2057 }
2058 }
2059
2060 static enum isl_channel_select
2061 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2062 {
2063 switch (swz) {
2064 case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2065 case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2066 case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2067 case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2068 case PIPE_SWIZZLE_1: return SCS_ONE;
2069 case PIPE_SWIZZLE_0: return SCS_ZERO;
2070 default: unreachable("invalid swizzle");
2071 }
2072 }
2073
2074 static void
2075 fill_buffer_surface_state(struct isl_device *isl_dev,
2076 struct iris_resource *res,
2077 void *map,
2078 enum isl_format format,
2079 struct isl_swizzle swizzle,
2080 unsigned offset,
2081 unsigned size)
2082 {
2083 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2084 const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2085
2086 /* The ARB_texture_buffer_specification says:
2087 *
2088 * "The number of texels in the buffer texture's texel array is given by
2089 *
2090 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2091 *
2092 * where <buffer_size> is the size of the buffer object, in basic
2093 * machine units and <components> and <base_type> are the element count
2094 * and base data type for elements, as specified in Table X.1. The
2095 * number of texels in the texel array is then clamped to the
2096 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2097 *
2098 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2099 * so that when ISL divides by stride to obtain the number of texels, that
2100 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2101 */
2102 unsigned final_size =
2103 MIN3(size, res->bo->size - res->offset - offset,
2104 IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2105
2106 isl_buffer_fill_state(isl_dev, map,
2107 .address = res->bo->gtt_offset + res->offset + offset,
2108 .size_B = final_size,
2109 .format = format,
2110 .swizzle = swizzle,
2111 .stride_B = cpp,
2112 .mocs = mocs(res->bo));
2113 }
2114
2115 #define SURFACE_STATE_ALIGNMENT 64
2116
2117 /**
2118 * Allocate several contiguous SURFACE_STATE structures, one for each
2119 * supported auxiliary surface mode.
2120 */
2121 static void *
2122 alloc_surface_states(struct u_upload_mgr *mgr,
2123 struct iris_state_ref *ref,
2124 unsigned aux_usages)
2125 {
2126 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2127
2128 /* If this changes, update this to explicitly align pointers */
2129 STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2130
2131 assert(aux_usages != 0);
2132
2133 void *map =
2134 upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size,
2135 SURFACE_STATE_ALIGNMENT);
2136
2137 ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res));
2138
2139 return map;
2140 }
2141
2142 #if GEN_GEN == 8
2143 /**
2144 * Return an ISL surface for use with non-coherent render target reads.
2145 *
2146 * In a few complex cases, we can't use the SURFACE_STATE for normal render
2147 * target writes. We need to make a separate one for sampling which refers
2148 * to the single slice of the texture being read.
2149 */
2150 static void
2151 get_rt_read_isl_surf(const struct gen_device_info *devinfo,
2152 struct iris_resource *res,
2153 enum pipe_texture_target target,
2154 struct isl_view *view,
2155 uint32_t *tile_x_sa,
2156 uint32_t *tile_y_sa,
2157 struct isl_surf *surf)
2158 {
2159
2160 *surf = res->surf;
2161
2162 const enum isl_dim_layout dim_layout =
2163 iris_get_isl_dim_layout(devinfo, res->surf.tiling, target);
2164
2165 surf->dim = target_to_isl_surf_dim(target);
2166
2167 if (surf->dim_layout == dim_layout)
2168 return;
2169
2170 /* The layout of the specified texture target is not compatible with the
2171 * actual layout of the miptree structure in memory -- You're entering
2172 * dangerous territory, this can only possibly work if you only intended
2173 * to access a single level and slice of the texture, and the hardware
2174 * supports the tile offset feature in order to allow non-tile-aligned
2175 * base offsets, since we'll have to point the hardware to the first
2176 * texel of the level instead of relying on the usual base level/layer
2177 * controls.
2178 */
2179 assert(view->levels == 1 && view->array_len == 1);
2180 assert(*tile_x_sa == 0 && *tile_y_sa == 0);
2181
2182 res->offset += iris_resource_get_tile_offsets(res, view->base_level,
2183 view->base_array_layer,
2184 tile_x_sa, tile_y_sa);
2185 const unsigned l = view->base_level;
2186
2187 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
2188 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
2189 minify(surf->logical_level0_px.height, l);
2190 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
2191 minify(surf->logical_level0_px.depth, l);
2192
2193 surf->logical_level0_px.array_len = 1;
2194 surf->levels = 1;
2195 surf->dim_layout = dim_layout;
2196
2197 view->base_level = 0;
2198 view->base_array_layer = 0;
2199 }
2200 #endif
2201
2202 static void
2203 fill_surface_state(struct isl_device *isl_dev,
2204 void *map,
2205 struct iris_resource *res,
2206 struct isl_surf *surf,
2207 struct isl_view *view,
2208 unsigned aux_usage,
2209 uint32_t tile_x_sa,
2210 uint32_t tile_y_sa)
2211 {
2212 struct isl_surf_fill_state_info f = {
2213 .surf = surf,
2214 .view = view,
2215 .mocs = mocs(res->bo),
2216 .address = res->bo->gtt_offset + res->offset,
2217 .x_offset_sa = tile_x_sa,
2218 .y_offset_sa = tile_y_sa,
2219 };
2220
2221 assert(!iris_resource_unfinished_aux_import(res));
2222
2223 if (aux_usage != ISL_AUX_USAGE_NONE) {
2224 f.aux_surf = &res->aux.surf;
2225 f.aux_usage = aux_usage;
2226 f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
2227
2228 struct iris_bo *clear_bo = NULL;
2229 uint64_t clear_offset = 0;
2230 f.clear_color =
2231 iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
2232 if (clear_bo) {
2233 f.clear_address = clear_bo->gtt_offset + clear_offset;
2234 f.use_clear_address = isl_dev->info->gen > 9;
2235 }
2236 }
2237
2238 isl_surf_fill_state_s(isl_dev, map, &f);
2239 }
2240
2241 /**
2242 * The pipe->create_sampler_view() driver hook.
2243 */
2244 static struct pipe_sampler_view *
2245 iris_create_sampler_view(struct pipe_context *ctx,
2246 struct pipe_resource *tex,
2247 const struct pipe_sampler_view *tmpl)
2248 {
2249 struct iris_context *ice = (struct iris_context *) ctx;
2250 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2251 const struct gen_device_info *devinfo = &screen->devinfo;
2252 struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2253
2254 if (!isv)
2255 return NULL;
2256
2257 /* initialize base object */
2258 isv->base = *tmpl;
2259 isv->base.context = ctx;
2260 isv->base.texture = NULL;
2261 pipe_reference_init(&isv->base.reference, 1);
2262 pipe_resource_reference(&isv->base.texture, tex);
2263
2264 if (util_format_is_depth_or_stencil(tmpl->format)) {
2265 struct iris_resource *zres, *sres;
2266 const struct util_format_description *desc =
2267 util_format_description(tmpl->format);
2268
2269 iris_get_depth_stencil_resources(tex, &zres, &sres);
2270
2271 tex = util_format_has_depth(desc) ? &zres->base : &sres->base;
2272 }
2273
2274 isv->res = (struct iris_resource *) tex;
2275
2276 void *map = alloc_surface_states(ice->state.surface_uploader,
2277 &isv->surface_state,
2278 isv->res->aux.sampler_usages);
2279 if (!unlikely(map))
2280 return NULL;
2281
2282 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2283
2284 if (isv->base.target == PIPE_TEXTURE_CUBE ||
2285 isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2286 usage |= ISL_SURF_USAGE_CUBE_BIT;
2287
2288 const struct iris_format_info fmt =
2289 iris_format_for_usage(devinfo, tmpl->format, usage);
2290
2291 isv->clear_color = isv->res->aux.clear_color;
2292
2293 isv->view = (struct isl_view) {
2294 .format = fmt.fmt,
2295 .swizzle = (struct isl_swizzle) {
2296 .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2297 .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2298 .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2299 .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2300 },
2301 .usage = usage,
2302 };
2303
2304 /* Fill out SURFACE_STATE for this view. */
2305 if (tmpl->target != PIPE_BUFFER) {
2306 isv->view.base_level = tmpl->u.tex.first_level;
2307 isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2308 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2309 isv->view.base_array_layer = tmpl->u.tex.first_layer;
2310 isv->view.array_len =
2311 tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2312
2313 if (iris_resource_unfinished_aux_import(isv->res))
2314 iris_resource_finish_aux_import(&screen->base, isv->res);
2315
2316 unsigned aux_modes = isv->res->aux.sampler_usages;
2317 while (aux_modes) {
2318 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2319
2320 /* If we have a multisampled depth buffer, do not create a sampler
2321 * surface state with HiZ.
2322 */
2323 fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
2324 &isv->view, aux_usage, 0, 0);
2325
2326 map += SURFACE_STATE_ALIGNMENT;
2327 }
2328 } else {
2329 fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
2330 isv->view.format, isv->view.swizzle,
2331 tmpl->u.buf.offset, tmpl->u.buf.size);
2332 }
2333
2334 return &isv->base;
2335 }
2336
2337 static void
2338 iris_sampler_view_destroy(struct pipe_context *ctx,
2339 struct pipe_sampler_view *state)
2340 {
2341 struct iris_sampler_view *isv = (void *) state;
2342 pipe_resource_reference(&state->texture, NULL);
2343 pipe_resource_reference(&isv->surface_state.res, NULL);
2344 free(isv);
2345 }
2346
2347 /**
2348 * The pipe->create_surface() driver hook.
2349 *
2350 * In Gallium nomenclature, "surfaces" are a view of a resource that
2351 * can be bound as a render target or depth/stencil buffer.
2352 */
2353 static struct pipe_surface *
2354 iris_create_surface(struct pipe_context *ctx,
2355 struct pipe_resource *tex,
2356 const struct pipe_surface *tmpl)
2357 {
2358 struct iris_context *ice = (struct iris_context *) ctx;
2359 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2360 const struct gen_device_info *devinfo = &screen->devinfo;
2361
2362 isl_surf_usage_flags_t usage = 0;
2363 if (tmpl->writable)
2364 usage = ISL_SURF_USAGE_STORAGE_BIT;
2365 else if (util_format_is_depth_or_stencil(tmpl->format))
2366 usage = ISL_SURF_USAGE_DEPTH_BIT;
2367 else
2368 usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2369
2370 const struct iris_format_info fmt =
2371 iris_format_for_usage(devinfo, tmpl->format, usage);
2372
2373 if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2374 !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2375 /* Framebuffer validation will reject this invalid case, but it
2376 * hasn't had the opportunity yet. In the meantime, we need to
2377 * avoid hitting ISL asserts about unsupported formats below.
2378 */
2379 return NULL;
2380 }
2381
2382 struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2383 struct pipe_surface *psurf = &surf->base;
2384 struct iris_resource *res = (struct iris_resource *) tex;
2385
2386 if (!surf)
2387 return NULL;
2388
2389 pipe_reference_init(&psurf->reference, 1);
2390 pipe_resource_reference(&psurf->texture, tex);
2391 psurf->context = ctx;
2392 psurf->format = tmpl->format;
2393 psurf->width = tex->width0;
2394 psurf->height = tex->height0;
2395 psurf->texture = tex;
2396 psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2397 psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2398 psurf->u.tex.level = tmpl->u.tex.level;
2399
2400 uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2401
2402 struct isl_view *view = &surf->view;
2403 *view = (struct isl_view) {
2404 .format = fmt.fmt,
2405 .base_level = tmpl->u.tex.level,
2406 .levels = 1,
2407 .base_array_layer = tmpl->u.tex.first_layer,
2408 .array_len = array_len,
2409 .swizzle = ISL_SWIZZLE_IDENTITY,
2410 .usage = usage,
2411 };
2412
2413 #if GEN_GEN == 8
2414 enum pipe_texture_target target = (tex->target == PIPE_TEXTURE_3D &&
2415 array_len == 1) ? PIPE_TEXTURE_2D :
2416 tex->target == PIPE_TEXTURE_1D_ARRAY ?
2417 PIPE_TEXTURE_2D_ARRAY : tex->target;
2418
2419 struct isl_view *read_view = &surf->read_view;
2420 *read_view = (struct isl_view) {
2421 .format = fmt.fmt,
2422 .base_level = tmpl->u.tex.level,
2423 .levels = 1,
2424 .base_array_layer = tmpl->u.tex.first_layer,
2425 .array_len = array_len,
2426 .swizzle = ISL_SWIZZLE_IDENTITY,
2427 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2428 };
2429 #endif
2430
2431 surf->clear_color = res->aux.clear_color;
2432
2433 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2434 if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2435 ISL_SURF_USAGE_STENCIL_BIT))
2436 return psurf;
2437
2438
2439 void *map = alloc_surface_states(ice->state.surface_uploader,
2440 &surf->surface_state,
2441 res->aux.possible_usages);
2442 if (!unlikely(map)) {
2443 pipe_resource_reference(&surf->surface_state.res, NULL);
2444 return NULL;
2445 }
2446
2447 #if GEN_GEN == 8
2448 void *map_read = alloc_surface_states(ice->state.surface_uploader,
2449 &surf->surface_state_read,
2450 res->aux.possible_usages);
2451 if (!unlikely(map_read)) {
2452 pipe_resource_reference(&surf->surface_state_read.res, NULL);
2453 return NULL;
2454 }
2455 #endif
2456
2457 if (!isl_format_is_compressed(res->surf.format)) {
2458 if (iris_resource_unfinished_aux_import(res))
2459 iris_resource_finish_aux_import(&screen->base, res);
2460
2461 /* This is a normal surface. Fill out a SURFACE_STATE for each possible
2462 * auxiliary surface mode and return the pipe_surface.
2463 */
2464 unsigned aux_modes = res->aux.possible_usages;
2465 while (aux_modes) {
2466 #if GEN_GEN == 8
2467 uint32_t offset = res->offset;
2468 #endif
2469 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2470 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2471 view, aux_usage, 0, 0);
2472 map += SURFACE_STATE_ALIGNMENT;
2473
2474 #if GEN_GEN == 8
2475 struct isl_surf surf;
2476 uint32_t tile_x_sa = 0, tile_y_sa = 0;
2477 get_rt_read_isl_surf(devinfo, res, target, read_view,
2478 &tile_x_sa, &tile_y_sa, &surf);
2479 fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view,
2480 aux_usage, tile_x_sa, tile_y_sa);
2481 /* Restore offset because we change offset in case of handling
2482 * non_coherent fb fetch
2483 */
2484 res->offset = offset;
2485 map_read += SURFACE_STATE_ALIGNMENT;
2486 #endif
2487 }
2488
2489 return psurf;
2490 }
2491
2492 /* The resource has a compressed format, which is not renderable, but we
2493 * have a renderable view format. We must be attempting to upload blocks
2494 * of compressed data via an uncompressed view.
2495 *
2496 * In this case, we can assume there are no auxiliary buffers, a single
2497 * miplevel, and that the resource is single-sampled. Gallium may try
2498 * and create an uncompressed view with multiple layers, however.
2499 */
2500 assert(!isl_format_is_compressed(fmt.fmt));
2501 assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
2502 assert(res->surf.samples == 1);
2503 assert(view->levels == 1);
2504
2505 struct isl_surf isl_surf;
2506 uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0;
2507
2508 if (view->base_level > 0) {
2509 /* We can't rely on the hardware's miplevel selection with such
2510 * a substantial lie about the format, so we select a single image
2511 * using the Tile X/Y Offset fields. In this case, we can't handle
2512 * multiple array slices.
2513 *
2514 * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2515 * hard-coded to align to exactly the block size of the compressed
2516 * texture. This means that, when reinterpreted as a non-compressed
2517 * texture, the tile offsets may be anything and we can't rely on
2518 * X/Y Offset.
2519 *
2520 * Return NULL to force the state tracker to take fallback paths.
2521 */
2522 if (view->array_len > 1 || GEN_GEN == 8)
2523 return NULL;
2524
2525 const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D;
2526 isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2527 view->base_level,
2528 is_3d ? 0 : view->base_array_layer,
2529 is_3d ? view->base_array_layer : 0,
2530 &isl_surf,
2531 &offset_B, &tile_x_sa, &tile_y_sa);
2532
2533 /* We use address and tile offsets to access a single level/layer
2534 * as a subimage, so reset level/layer so it doesn't offset again.
2535 */
2536 view->base_array_layer = 0;
2537 view->base_level = 0;
2538 } else {
2539 /* Level 0 doesn't require tile offsets, and the hardware can find
2540 * array slices using QPitch even with the format override, so we
2541 * can allow layers in this case. Copy the original ISL surface.
2542 */
2543 memcpy(&isl_surf, &res->surf, sizeof(isl_surf));
2544 }
2545
2546 /* Scale down the image dimensions by the block size. */
2547 const struct isl_format_layout *fmtl =
2548 isl_format_get_layout(res->surf.format);
2549 isl_surf.format = fmt.fmt;
2550 isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf);
2551 isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf);
2552 tile_x_sa /= fmtl->bw;
2553 tile_y_sa /= fmtl->bh;
2554
2555 psurf->width = isl_surf.logical_level0_px.width;
2556 psurf->height = isl_surf.logical_level0_px.height;
2557
2558 struct isl_surf_fill_state_info f = {
2559 .surf = &isl_surf,
2560 .view = view,
2561 .mocs = mocs(res->bo),
2562 .address = res->bo->gtt_offset + offset_B,
2563 .x_offset_sa = tile_x_sa,
2564 .y_offset_sa = tile_y_sa,
2565 };
2566
2567 isl_surf_fill_state_s(&screen->isl_dev, map, &f);
2568 return psurf;
2569 }
2570
2571 #if GEN_GEN < 9
2572 static void
2573 fill_default_image_param(struct brw_image_param *param)
2574 {
2575 memset(param, 0, sizeof(*param));
2576 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2577 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2578 * detailed explanation of these parameters.
2579 */
2580 param->swizzling[0] = 0xff;
2581 param->swizzling[1] = 0xff;
2582 }
2583
2584 static void
2585 fill_buffer_image_param(struct brw_image_param *param,
2586 enum pipe_format pfmt,
2587 unsigned size)
2588 {
2589 const unsigned cpp = util_format_get_blocksize(pfmt);
2590
2591 fill_default_image_param(param);
2592 param->size[0] = size / cpp;
2593 param->stride[0] = cpp;
2594 }
2595 #else
2596 #define isl_surf_fill_image_param(x, ...)
2597 #define fill_default_image_param(x, ...)
2598 #define fill_buffer_image_param(x, ...)
2599 #endif
2600
2601 /**
2602 * The pipe->set_shader_images() driver hook.
2603 */
2604 static void
2605 iris_set_shader_images(struct pipe_context *ctx,
2606 enum pipe_shader_type p_stage,
2607 unsigned start_slot, unsigned count,
2608 const struct pipe_image_view *p_images)
2609 {
2610 struct iris_context *ice = (struct iris_context *) ctx;
2611 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2612 const struct gen_device_info *devinfo = &screen->devinfo;
2613 gl_shader_stage stage = stage_from_pipe(p_stage);
2614 struct iris_shader_state *shs = &ice->state.shaders[stage];
2615 #if GEN_GEN == 8
2616 struct iris_genx_state *genx = ice->state.genx;
2617 struct brw_image_param *image_params = genx->shaders[stage].image_param;
2618 #endif
2619
2620 shs->bound_image_views &= ~u_bit_consecutive(start_slot, count);
2621
2622 for (unsigned i = 0; i < count; i++) {
2623 struct iris_image_view *iv = &shs->image[start_slot + i];
2624
2625 if (p_images && p_images[i].resource) {
2626 const struct pipe_image_view *img = &p_images[i];
2627 struct iris_resource *res = (void *) img->resource;
2628
2629 void *map =
2630 alloc_surface_states(ice->state.surface_uploader,
2631 &iv->surface_state, 1 << ISL_AUX_USAGE_NONE);
2632 if (!unlikely(map))
2633 return;
2634
2635 util_copy_image_view(&iv->base, img);
2636
2637 shs->bound_image_views |= 1 << (start_slot + i);
2638
2639 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2640 res->bind_stages |= 1 << stage;
2641
2642 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
2643 enum isl_format isl_fmt =
2644 iris_format_for_usage(devinfo, img->format, usage).fmt;
2645
2646 bool untyped_fallback = false;
2647
2648 if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
2649 /* On Gen8, try to use typed surfaces reads (which support a
2650 * limited number of formats), and if not possible, fall back
2651 * to untyped reads.
2652 */
2653 untyped_fallback = GEN_GEN == 8 &&
2654 !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt);
2655
2656 if (untyped_fallback)
2657 isl_fmt = ISL_FORMAT_RAW;
2658 else
2659 isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt);
2660 }
2661
2662 if (res->base.target != PIPE_BUFFER) {
2663 struct isl_view view = {
2664 .format = isl_fmt,
2665 .base_level = img->u.tex.level,
2666 .levels = 1,
2667 .base_array_layer = img->u.tex.first_layer,
2668 .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2669 .swizzle = ISL_SWIZZLE_IDENTITY,
2670 .usage = usage,
2671 };
2672
2673 if (untyped_fallback) {
2674 fill_buffer_surface_state(&screen->isl_dev, res, map,
2675 isl_fmt, ISL_SWIZZLE_IDENTITY,
2676 0, res->bo->size);
2677 } else {
2678 /* Images don't support compression */
2679 unsigned aux_modes = 1 << ISL_AUX_USAGE_NONE;
2680 while (aux_modes) {
2681 enum isl_aux_usage usage = u_bit_scan(&aux_modes);
2682
2683 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2684 &view, usage, 0, 0);
2685
2686 map += SURFACE_STATE_ALIGNMENT;
2687 }
2688 }
2689
2690 isl_surf_fill_image_param(&screen->isl_dev,
2691 &image_params[start_slot + i],
2692 &res->surf, &view);
2693 } else {
2694 util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset,
2695 img->u.buf.offset + img->u.buf.size);
2696
2697 fill_buffer_surface_state(&screen->isl_dev, res, map,
2698 isl_fmt, ISL_SWIZZLE_IDENTITY,
2699 img->u.buf.offset, img->u.buf.size);
2700 fill_buffer_image_param(&image_params[start_slot + i],
2701 img->format, img->u.buf.size);
2702 }
2703 } else {
2704 pipe_resource_reference(&iv->base.resource, NULL);
2705 pipe_resource_reference(&iv->surface_state.res, NULL);
2706 fill_default_image_param(&image_params[start_slot + i]);
2707 }
2708 }
2709
2710 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
2711 ice->state.dirty |=
2712 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2713 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2714
2715 /* Broadwell also needs brw_image_params re-uploaded */
2716 if (GEN_GEN < 9) {
2717 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
2718 shs->sysvals_need_upload = true;
2719 }
2720 }
2721
2722
2723 /**
2724 * The pipe->set_sampler_views() driver hook.
2725 */
2726 static void
2727 iris_set_sampler_views(struct pipe_context *ctx,
2728 enum pipe_shader_type p_stage,
2729 unsigned start, unsigned count,
2730 struct pipe_sampler_view **views)
2731 {
2732 struct iris_context *ice = (struct iris_context *) ctx;
2733 gl_shader_stage stage = stage_from_pipe(p_stage);
2734 struct iris_shader_state *shs = &ice->state.shaders[stage];
2735
2736 shs->bound_sampler_views &= ~u_bit_consecutive(start, count);
2737
2738 for (unsigned i = 0; i < count; i++) {
2739 struct pipe_sampler_view *pview = views ? views[i] : NULL;
2740 pipe_sampler_view_reference((struct pipe_sampler_view **)
2741 &shs->textures[start + i], pview);
2742 struct iris_sampler_view *view = (void *) pview;
2743 if (view) {
2744 view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2745 view->res->bind_stages |= 1 << stage;
2746
2747 shs->bound_sampler_views |= 1 << (start + i);
2748 }
2749 }
2750
2751 ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
2752 ice->state.dirty |=
2753 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2754 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2755 }
2756
2757 /**
2758 * The pipe->set_tess_state() driver hook.
2759 */
2760 static void
2761 iris_set_tess_state(struct pipe_context *ctx,
2762 const float default_outer_level[4],
2763 const float default_inner_level[2])
2764 {
2765 struct iris_context *ice = (struct iris_context *) ctx;
2766 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
2767
2768 memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
2769 memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
2770
2771 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
2772 shs->sysvals_need_upload = true;
2773 }
2774
2775 static void
2776 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
2777 {
2778 struct iris_surface *surf = (void *) p_surf;
2779 pipe_resource_reference(&p_surf->texture, NULL);
2780 pipe_resource_reference(&surf->surface_state.res, NULL);
2781 pipe_resource_reference(&surf->surface_state_read.res, NULL);
2782 free(surf);
2783 }
2784
2785 static void
2786 iris_set_clip_state(struct pipe_context *ctx,
2787 const struct pipe_clip_state *state)
2788 {
2789 struct iris_context *ice = (struct iris_context *) ctx;
2790 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
2791 struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
2792 struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
2793
2794 memcpy(&ice->state.clip_planes, state, sizeof(*state));
2795
2796 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS |
2797 IRIS_DIRTY_CONSTANTS_TES;
2798 shs->sysvals_need_upload = true;
2799 gshs->sysvals_need_upload = true;
2800 tshs->sysvals_need_upload = true;
2801 }
2802
2803 /**
2804 * The pipe->set_polygon_stipple() driver hook.
2805 */
2806 static void
2807 iris_set_polygon_stipple(struct pipe_context *ctx,
2808 const struct pipe_poly_stipple *state)
2809 {
2810 struct iris_context *ice = (struct iris_context *) ctx;
2811 memcpy(&ice->state.poly_stipple, state, sizeof(*state));
2812 ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
2813 }
2814
2815 /**
2816 * The pipe->set_sample_mask() driver hook.
2817 */
2818 static void
2819 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
2820 {
2821 struct iris_context *ice = (struct iris_context *) ctx;
2822
2823 /* We only support 16x MSAA, so we have 16 bits of sample maks.
2824 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2825 */
2826 ice->state.sample_mask = sample_mask & 0xffff;
2827 ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
2828 }
2829
2830 /**
2831 * The pipe->set_scissor_states() driver hook.
2832 *
2833 * This corresponds to our SCISSOR_RECT state structures. It's an
2834 * exact match, so we just store them, and memcpy them out later.
2835 */
2836 static void
2837 iris_set_scissor_states(struct pipe_context *ctx,
2838 unsigned start_slot,
2839 unsigned num_scissors,
2840 const struct pipe_scissor_state *rects)
2841 {
2842 struct iris_context *ice = (struct iris_context *) ctx;
2843
2844 for (unsigned i = 0; i < num_scissors; i++) {
2845 if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
2846 /* If the scissor was out of bounds and got clamped to 0 width/height
2847 * at the bounds, the subtraction of 1 from maximums could produce a
2848 * negative number and thus not clip anything. Instead, just provide
2849 * a min > max scissor inside the bounds, which produces the expected
2850 * no rendering.
2851 */
2852 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
2853 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
2854 };
2855 } else {
2856 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
2857 .minx = rects[i].minx, .miny = rects[i].miny,
2858 .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
2859 };
2860 }
2861 }
2862
2863 ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
2864 }
2865
2866 /**
2867 * The pipe->set_stencil_ref() driver hook.
2868 *
2869 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
2870 */
2871 static void
2872 iris_set_stencil_ref(struct pipe_context *ctx,
2873 const struct pipe_stencil_ref *state)
2874 {
2875 struct iris_context *ice = (struct iris_context *) ctx;
2876 memcpy(&ice->state.stencil_ref, state, sizeof(*state));
2877 if (GEN_GEN == 8)
2878 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
2879 else
2880 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
2881 }
2882
2883 static float
2884 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
2885 {
2886 return copysignf(state->scale[axis], sign) + state->translate[axis];
2887 }
2888
2889 /**
2890 * The pipe->set_viewport_states() driver hook.
2891 *
2892 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
2893 * the guardband yet, as we need the framebuffer dimensions, but we can
2894 * at least fill out the rest.
2895 */
2896 static void
2897 iris_set_viewport_states(struct pipe_context *ctx,
2898 unsigned start_slot,
2899 unsigned count,
2900 const struct pipe_viewport_state *states)
2901 {
2902 struct iris_context *ice = (struct iris_context *) ctx;
2903
2904 memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
2905
2906 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
2907
2908 if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
2909 !ice->state.cso_rast->depth_clip_far))
2910 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
2911 }
2912
2913 /**
2914 * The pipe->set_framebuffer_state() driver hook.
2915 *
2916 * Sets the current draw FBO, including color render targets, depth,
2917 * and stencil buffers.
2918 */
2919 static void
2920 iris_set_framebuffer_state(struct pipe_context *ctx,
2921 const struct pipe_framebuffer_state *state)
2922 {
2923 struct iris_context *ice = (struct iris_context *) ctx;
2924 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2925 struct isl_device *isl_dev = &screen->isl_dev;
2926 struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
2927 struct iris_resource *zres;
2928 struct iris_resource *stencil_res;
2929
2930 unsigned samples = util_framebuffer_get_num_samples(state);
2931 unsigned layers = util_framebuffer_get_num_layers(state);
2932
2933 if (cso->samples != samples) {
2934 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
2935
2936 /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
2937 if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
2938 ice->state.dirty |= IRIS_DIRTY_FS;
2939 }
2940
2941 if (cso->nr_cbufs != state->nr_cbufs) {
2942 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
2943 }
2944
2945 if ((cso->layers == 0) != (layers == 0)) {
2946 ice->state.dirty |= IRIS_DIRTY_CLIP;
2947 }
2948
2949 if (cso->width != state->width || cso->height != state->height) {
2950 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
2951 }
2952
2953 if (cso->zsbuf || state->zsbuf) {
2954 ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
2955 }
2956
2957 util_copy_framebuffer_state(cso, state);
2958 cso->samples = samples;
2959 cso->layers = layers;
2960
2961 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
2962
2963 struct isl_view view = {
2964 .base_level = 0,
2965 .levels = 1,
2966 .base_array_layer = 0,
2967 .array_len = 1,
2968 .swizzle = ISL_SWIZZLE_IDENTITY,
2969 };
2970
2971 struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
2972
2973 if (cso->zsbuf) {
2974 iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
2975 &stencil_res);
2976
2977 view.base_level = cso->zsbuf->u.tex.level;
2978 view.base_array_layer = cso->zsbuf->u.tex.first_layer;
2979 view.array_len =
2980 cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
2981
2982 if (zres) {
2983 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
2984
2985 info.depth_surf = &zres->surf;
2986 info.depth_address = zres->bo->gtt_offset + zres->offset;
2987 info.mocs = mocs(zres->bo);
2988
2989 view.format = zres->surf.format;
2990
2991 if (iris_resource_level_has_hiz(zres, view.base_level)) {
2992 info.hiz_usage = zres->aux.usage;
2993 info.hiz_surf = &zres->aux.surf;
2994 info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
2995 }
2996 }
2997
2998 if (stencil_res) {
2999 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3000 info.stencil_aux_usage = stencil_res->aux.usage;
3001 info.stencil_surf = &stencil_res->surf;
3002 info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
3003 if (!zres) {
3004 view.format = stencil_res->surf.format;
3005 info.mocs = mocs(stencil_res->bo);
3006 }
3007 }
3008 }
3009
3010 isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3011
3012 /* Make a null surface for unbound buffers */
3013 void *null_surf_map =
3014 upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3015 4 * GENX(RENDER_SURFACE_STATE_length), 64);
3016 isl_null_fill_state(&screen->isl_dev, null_surf_map,
3017 isl_extent3d(MAX2(cso->width, 1),
3018 MAX2(cso->height, 1),
3019 cso->layers ? cso->layers : 1));
3020 ice->state.null_fb.offset +=
3021 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3022
3023 /* Render target change */
3024 ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
3025
3026 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3027
3028 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3029
3030 if (GEN_GEN == 8)
3031 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3032
3033 #if GEN_GEN == 11
3034 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
3035 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
3036
3037 /* The PIPE_CONTROL command description says:
3038 *
3039 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
3040 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
3041 * Target Cache Flush by enabling this bit. When render target flush
3042 * is set due to new association of BTI, PS Scoreboard Stall bit must
3043 * be set in this packet."
3044 */
3045 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
3046 iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3047 "workaround: RT BTI change [draw]",
3048 PIPE_CONTROL_RENDER_TARGET_FLUSH |
3049 PIPE_CONTROL_STALL_AT_SCOREBOARD);
3050 #endif
3051 }
3052
3053 /**
3054 * The pipe->set_constant_buffer() driver hook.
3055 *
3056 * This uploads any constant data in user buffers, and references
3057 * any UBO resources containing constant data.
3058 */
3059 static void
3060 iris_set_constant_buffer(struct pipe_context *ctx,
3061 enum pipe_shader_type p_stage, unsigned index,
3062 const struct pipe_constant_buffer *input)
3063 {
3064 struct iris_context *ice = (struct iris_context *) ctx;
3065 gl_shader_stage stage = stage_from_pipe(p_stage);
3066 struct iris_shader_state *shs = &ice->state.shaders[stage];
3067 struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3068
3069 /* TODO: Only do this if the buffer changes? */
3070 pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3071
3072 if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3073 shs->bound_cbufs |= 1u << index;
3074
3075 if (input->user_buffer) {
3076 void *map = NULL;
3077 pipe_resource_reference(&cbuf->buffer, NULL);
3078 u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3079 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3080
3081 if (!cbuf->buffer) {
3082 /* Allocation was unsuccessful - just unbind */
3083 iris_set_constant_buffer(ctx, p_stage, index, NULL);
3084 return;
3085 }
3086
3087 assert(map);
3088 memcpy(map, input->user_buffer, input->buffer_size);
3089 } else if (input->buffer) {
3090 pipe_resource_reference(&cbuf->buffer, input->buffer);
3091
3092 cbuf->buffer_offset = input->buffer_offset;
3093 }
3094
3095 cbuf->buffer_size =
3096 MIN2(input->buffer_size,
3097 iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3098
3099 struct iris_resource *res = (void *) cbuf->buffer;
3100 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3101 res->bind_stages |= 1 << stage;
3102 } else {
3103 shs->bound_cbufs &= ~(1u << index);
3104 pipe_resource_reference(&cbuf->buffer, NULL);
3105 }
3106
3107 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
3108 }
3109
3110 static void
3111 upload_sysvals(struct iris_context *ice,
3112 gl_shader_stage stage)
3113 {
3114 UNUSED struct iris_genx_state *genx = ice->state.genx;
3115 struct iris_shader_state *shs = &ice->state.shaders[stage];
3116
3117 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3118 if (!shader || shader->num_system_values == 0)
3119 return;
3120
3121 assert(shader->num_cbufs > 0);
3122
3123 unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3124 struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3125 unsigned upload_size = shader->num_system_values * sizeof(uint32_t);
3126 uint32_t *map = NULL;
3127
3128 assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3129 u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3130 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3131
3132 for (int i = 0; i < shader->num_system_values; i++) {
3133 uint32_t sysval = shader->system_values[i];
3134 uint32_t value = 0;
3135
3136 if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3137 #if GEN_GEN == 8
3138 unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3139 unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3140 struct brw_image_param *param =
3141 &genx->shaders[stage].image_param[img];
3142
3143 assert(offset < sizeof(struct brw_image_param));
3144 value = ((uint32_t *) param)[offset];
3145 #endif
3146 } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3147 value = 0;
3148 } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3149 int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3150 int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3151 value = fui(ice->state.clip_planes.ucp[plane][comp]);
3152 } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3153 if (stage == MESA_SHADER_TESS_CTRL) {
3154 value = ice->state.vertices_per_patch;
3155 } else {
3156 assert(stage == MESA_SHADER_TESS_EVAL);
3157 const struct shader_info *tcs_info =
3158 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3159 if (tcs_info)
3160 value = tcs_info->tess.tcs_vertices_out;
3161 else
3162 value = ice->state.vertices_per_patch;
3163 }
3164 } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3165 sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3166 unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3167 value = fui(ice->state.default_outer_level[i]);
3168 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3169 value = fui(ice->state.default_inner_level[0]);
3170 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3171 value = fui(ice->state.default_inner_level[1]);
3172 } else {
3173 assert(!"unhandled system value");
3174 }
3175
3176 *map++ = value;
3177 }
3178
3179 cbuf->buffer_size = upload_size;
3180 iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3181 &shs->constbuf_surf_state[sysval_cbuf_index], false);
3182
3183 shs->sysvals_need_upload = false;
3184 }
3185
3186 /**
3187 * The pipe->set_shader_buffers() driver hook.
3188 *
3189 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
3190 * SURFACE_STATE here, as the buffer offset may change each time.
3191 */
3192 static void
3193 iris_set_shader_buffers(struct pipe_context *ctx,
3194 enum pipe_shader_type p_stage,
3195 unsigned start_slot, unsigned count,
3196 const struct pipe_shader_buffer *buffers,
3197 unsigned writable_bitmask)
3198 {
3199 struct iris_context *ice = (struct iris_context *) ctx;
3200 gl_shader_stage stage = stage_from_pipe(p_stage);
3201 struct iris_shader_state *shs = &ice->state.shaders[stage];
3202
3203 unsigned modified_bits = u_bit_consecutive(start_slot, count);
3204
3205 shs->bound_ssbos &= ~modified_bits;
3206 shs->writable_ssbos &= ~modified_bits;
3207 shs->writable_ssbos |= writable_bitmask << start_slot;
3208
3209 for (unsigned i = 0; i < count; i++) {
3210 if (buffers && buffers[i].buffer) {
3211 struct iris_resource *res = (void *) buffers[i].buffer;
3212 struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3213 struct iris_state_ref *surf_state =
3214 &shs->ssbo_surf_state[start_slot + i];
3215 pipe_resource_reference(&ssbo->buffer, &res->base);
3216 ssbo->buffer_offset = buffers[i].buffer_offset;
3217 ssbo->buffer_size =
3218 MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3219
3220 shs->bound_ssbos |= 1 << (start_slot + i);
3221
3222 iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true);
3223
3224 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3225 res->bind_stages |= 1 << stage;
3226
3227 util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
3228 ssbo->buffer_offset + ssbo->buffer_size);
3229 } else {
3230 pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3231 pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3232 NULL);
3233 }
3234 }
3235
3236 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
3237 }
3238
3239 static void
3240 iris_delete_state(struct pipe_context *ctx, void *state)
3241 {
3242 free(state);
3243 }
3244
3245 /**
3246 * The pipe->set_vertex_buffers() driver hook.
3247 *
3248 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3249 */
3250 static void
3251 iris_set_vertex_buffers(struct pipe_context *ctx,
3252 unsigned start_slot, unsigned count,
3253 const struct pipe_vertex_buffer *buffers)
3254 {
3255 struct iris_context *ice = (struct iris_context *) ctx;
3256 struct iris_genx_state *genx = ice->state.genx;
3257
3258 ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
3259
3260 for (unsigned i = 0; i < count; i++) {
3261 const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3262 struct iris_vertex_buffer_state *state =
3263 &genx->vertex_buffers[start_slot + i];
3264
3265 if (!buffer) {
3266 pipe_resource_reference(&state->resource, NULL);
3267 continue;
3268 }
3269
3270 /* We may see user buffers that are NULL bindings. */
3271 assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3272
3273 pipe_resource_reference(&state->resource, buffer->buffer.resource);
3274 struct iris_resource *res = (void *) state->resource;
3275
3276 state->offset = (int) buffer->buffer_offset;
3277
3278 if (res) {
3279 ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3280 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3281 }
3282
3283 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3284 vb.VertexBufferIndex = start_slot + i;
3285 vb.AddressModifyEnable = true;
3286 vb.BufferPitch = buffer->stride;
3287 if (res) {
3288 vb.BufferSize = res->bo->size - (int) buffer->buffer_offset;
3289 vb.BufferStartingAddress =
3290 ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
3291 vb.MOCS = mocs(res->bo);
3292 } else {
3293 vb.NullVertexBuffer = true;
3294 }
3295 }
3296 }
3297
3298 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3299 }
3300
3301 /**
3302 * Gallium CSO for vertex elements.
3303 */
3304 struct iris_vertex_element_state {
3305 uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3306 uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3307 uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3308 uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3309 unsigned count;
3310 };
3311
3312 /**
3313 * The pipe->create_vertex_elements() driver hook.
3314 *
3315 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3316 * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3317 * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3318 * needed. In these cases we will need information available at draw time.
3319 * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3320 * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3321 * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3322 */
3323 static void *
3324 iris_create_vertex_elements(struct pipe_context *ctx,
3325 unsigned count,
3326 const struct pipe_vertex_element *state)
3327 {
3328 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3329 const struct gen_device_info *devinfo = &screen->devinfo;
3330 struct iris_vertex_element_state *cso =
3331 malloc(sizeof(struct iris_vertex_element_state));
3332
3333 cso->count = count;
3334
3335 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3336 ve.DWordLength =
3337 1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3338 }
3339
3340 uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3341 uint32_t *vfi_pack_dest = cso->vf_instancing;
3342
3343 if (count == 0) {
3344 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3345 ve.Valid = true;
3346 ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3347 ve.Component0Control = VFCOMP_STORE_0;
3348 ve.Component1Control = VFCOMP_STORE_0;
3349 ve.Component2Control = VFCOMP_STORE_0;
3350 ve.Component3Control = VFCOMP_STORE_1_FP;
3351 }
3352
3353 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3354 }
3355 }
3356
3357 for (int i = 0; i < count; i++) {
3358 const struct iris_format_info fmt =
3359 iris_format_for_usage(devinfo, state[i].src_format, 0);
3360 unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3361 VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3362
3363 switch (isl_format_get_num_channels(fmt.fmt)) {
3364 case 0: comp[0] = VFCOMP_STORE_0; /* fallthrough */
3365 case 1: comp[1] = VFCOMP_STORE_0; /* fallthrough */
3366 case 2: comp[2] = VFCOMP_STORE_0; /* fallthrough */
3367 case 3:
3368 comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3369 : VFCOMP_STORE_1_FP;
3370 break;
3371 }
3372 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3373 ve.EdgeFlagEnable = false;
3374 ve.VertexBufferIndex = state[i].vertex_buffer_index;
3375 ve.Valid = true;
3376 ve.SourceElementOffset = state[i].src_offset;
3377 ve.SourceElementFormat = fmt.fmt;
3378 ve.Component0Control = comp[0];
3379 ve.Component1Control = comp[1];
3380 ve.Component2Control = comp[2];
3381 ve.Component3Control = comp[3];
3382 }
3383
3384 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3385 vi.VertexElementIndex = i;
3386 vi.InstancingEnable = state[i].instance_divisor > 0;
3387 vi.InstanceDataStepRate = state[i].instance_divisor;
3388 }
3389
3390 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3391 vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3392 }
3393
3394 /* An alternative version of the last VE and VFI is stored so it
3395 * can be used at draw time in case Vertex Shader uses EdgeFlag
3396 */
3397 if (count) {
3398 const unsigned edgeflag_index = count - 1;
3399 const struct iris_format_info fmt =
3400 iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3401 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3402 ve.EdgeFlagEnable = true ;
3403 ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3404 ve.Valid = true;
3405 ve.SourceElementOffset = state[edgeflag_index].src_offset;
3406 ve.SourceElementFormat = fmt.fmt;
3407 ve.Component0Control = VFCOMP_STORE_SRC;
3408 ve.Component1Control = VFCOMP_STORE_0;
3409 ve.Component2Control = VFCOMP_STORE_0;
3410 ve.Component3Control = VFCOMP_STORE_0;
3411 }
3412 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3413 /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3414 * at draw time, as it should change if SGVs are emitted.
3415 */
3416 vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3417 vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3418 }
3419 }
3420
3421 return cso;
3422 }
3423
3424 /**
3425 * The pipe->bind_vertex_elements_state() driver hook.
3426 */
3427 static void
3428 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3429 {
3430 struct iris_context *ice = (struct iris_context *) ctx;
3431 struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3432 struct iris_vertex_element_state *new_cso = state;
3433
3434 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3435 * we need to re-emit it to ensure we're overriding the right one.
3436 */
3437 if (new_cso && cso_changed(count))
3438 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3439
3440 ice->state.cso_vertex_elements = state;
3441 ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3442 }
3443
3444 /**
3445 * The pipe->create_stream_output_target() driver hook.
3446 *
3447 * "Target" here refers to a destination buffer. We translate this into
3448 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
3449 * know which buffer this represents, or whether we ought to zero the
3450 * write-offsets, or append. Those are handled in the set() hook.
3451 */
3452 static struct pipe_stream_output_target *
3453 iris_create_stream_output_target(struct pipe_context *ctx,
3454 struct pipe_resource *p_res,
3455 unsigned buffer_offset,
3456 unsigned buffer_size)
3457 {
3458 struct iris_resource *res = (void *) p_res;
3459 struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3460 if (!cso)
3461 return NULL;
3462
3463 res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3464
3465 pipe_reference_init(&cso->base.reference, 1);
3466 pipe_resource_reference(&cso->base.buffer, p_res);
3467 cso->base.buffer_offset = buffer_offset;
3468 cso->base.buffer_size = buffer_size;
3469 cso->base.context = ctx;
3470
3471 util_range_add(&res->base, &res->valid_buffer_range, buffer_offset,
3472 buffer_offset + buffer_size);
3473
3474 upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
3475
3476 return &cso->base;
3477 }
3478
3479 static void
3480 iris_stream_output_target_destroy(struct pipe_context *ctx,
3481 struct pipe_stream_output_target *state)
3482 {
3483 struct iris_stream_output_target *cso = (void *) state;
3484
3485 pipe_resource_reference(&cso->base.buffer, NULL);
3486 pipe_resource_reference(&cso->offset.res, NULL);
3487
3488 free(cso);
3489 }
3490
3491 /**
3492 * The pipe->set_stream_output_targets() driver hook.
3493 *
3494 * At this point, we know which targets are bound to a particular index,
3495 * and also whether we want to append or start over. We can finish the
3496 * 3DSTATE_SO_BUFFER packets we started earlier.
3497 */
3498 static void
3499 iris_set_stream_output_targets(struct pipe_context *ctx,
3500 unsigned num_targets,
3501 struct pipe_stream_output_target **targets,
3502 const unsigned *offsets)
3503 {
3504 struct iris_context *ice = (struct iris_context *) ctx;
3505 struct iris_genx_state *genx = ice->state.genx;
3506 uint32_t *so_buffers = genx->so_buffers;
3507
3508 const bool active = num_targets > 0;
3509 if (ice->state.streamout_active != active) {
3510 ice->state.streamout_active = active;
3511 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3512
3513 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3514 * it's a non-pipelined command. If we're switching streamout on, we
3515 * may have missed emitting it earlier, so do so now. (We're already
3516 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3517 */
3518 if (active) {
3519 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3520 } else {
3521 uint32_t flush = 0;
3522 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3523 struct iris_stream_output_target *tgt =
3524 (void *) ice->state.so_target[i];
3525 if (tgt) {
3526 struct iris_resource *res = (void *) tgt->base.buffer;
3527
3528 flush |= iris_flush_bits_for_history(res);
3529 iris_dirty_for_history(ice, res);
3530 }
3531 }
3532 iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3533 "make streamout results visible", flush);
3534 }
3535 }
3536
3537 for (int i = 0; i < 4; i++) {
3538 pipe_so_target_reference(&ice->state.so_target[i],
3539 i < num_targets ? targets[i] : NULL);
3540 }
3541
3542 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3543 if (!active)
3544 return;
3545
3546 for (unsigned i = 0; i < 4; i++,
3547 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3548
3549 struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3550 unsigned offset = offsets[i];
3551
3552 if (!tgt) {
3553 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3554 #if GEN_GEN < 12
3555 sob.SOBufferIndex = i;
3556 #else
3557 sob._3DCommandOpcode = 0;
3558 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3559 #endif
3560 }
3561 continue;
3562 }
3563
3564 struct iris_resource *res = (void *) tgt->base.buffer;
3565
3566 /* Note that offsets[i] will either be 0, causing us to zero
3567 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3568 * "continue appending at the existing offset."
3569 */
3570 assert(offset == 0 || offset == 0xFFFFFFFF);
3571
3572 /* We might be called by Begin (offset = 0), Pause, then Resume
3573 * (offset = 0xFFFFFFFF) before ever drawing (where these commands
3574 * will actually be sent to the GPU). In this case, we don't want
3575 * to append - we still want to do our initial zeroing.
3576 */
3577 if (!tgt->zeroed)
3578 offset = 0;
3579
3580 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3581 #if GEN_GEN < 12
3582 sob.SOBufferIndex = i;
3583 #else
3584 sob._3DCommandOpcode = 0;
3585 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3586 #endif
3587 sob.SurfaceBaseAddress =
3588 rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset);
3589 sob.SOBufferEnable = true;
3590 sob.StreamOffsetWriteEnable = true;
3591 sob.StreamOutputBufferOffsetAddressEnable = true;
3592 sob.MOCS = mocs(res->bo);
3593
3594 sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3595 sob.StreamOffset = offset;
3596 sob.StreamOutputBufferOffsetAddress =
3597 rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
3598 tgt->offset.offset);
3599 }
3600 }
3601
3602 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3603 }
3604
3605 /**
3606 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3607 * 3DSTATE_STREAMOUT packets.
3608 *
3609 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3610 * hardware to record. We can create it entirely based on the shader, with
3611 * no dynamic state dependencies.
3612 *
3613 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3614 * state-based settings. We capture the shader-related ones here, and merge
3615 * the rest in at draw time.
3616 */
3617 static uint32_t *
3618 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3619 const struct brw_vue_map *vue_map)
3620 {
3621 struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3622 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3623 int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3624 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3625 int max_decls = 0;
3626 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3627
3628 memset(so_decl, 0, sizeof(so_decl));
3629
3630 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3631 * command feels strange -- each dword pair contains a SO_DECL per stream.
3632 */
3633 for (unsigned i = 0; i < info->num_outputs; i++) {
3634 const struct pipe_stream_output *output = &info->output[i];
3635 const int buffer = output->output_buffer;
3636 const int varying = output->register_index;
3637 const unsigned stream_id = output->stream;
3638 assert(stream_id < MAX_VERTEX_STREAMS);
3639
3640 buffer_mask[stream_id] |= 1 << buffer;
3641
3642 assert(vue_map->varying_to_slot[varying] >= 0);
3643
3644 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3645 * array. Instead, it simply increments DstOffset for the following
3646 * input by the number of components that should be skipped.
3647 *
3648 * Our hardware is unusual in that it requires us to program SO_DECLs
3649 * for fake "hole" components, rather than simply taking the offset
3650 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3651 * program as many size = 4 holes as we can, then a final hole to
3652 * accommodate the final 1, 2, or 3 remaining.
3653 */
3654 int skip_components = output->dst_offset - next_offset[buffer];
3655
3656 while (skip_components > 0) {
3657 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3658 .HoleFlag = 1,
3659 .OutputBufferSlot = output->output_buffer,
3660 .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
3661 };
3662 skip_components -= 4;
3663 }
3664
3665 next_offset[buffer] = output->dst_offset + output->num_components;
3666
3667 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3668 .OutputBufferSlot = output->output_buffer,
3669 .RegisterIndex = vue_map->varying_to_slot[varying],
3670 .ComponentMask =
3671 ((1 << output->num_components) - 1) << output->start_component,
3672 };
3673
3674 if (decls[stream_id] > max_decls)
3675 max_decls = decls[stream_id];
3676 }
3677
3678 unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
3679 uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
3680 uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
3681
3682 iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
3683 int urb_entry_read_offset = 0;
3684 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
3685 urb_entry_read_offset;
3686
3687 /* We always read the whole vertex. This could be reduced at some
3688 * point by reading less and offsetting the register index in the
3689 * SO_DECLs.
3690 */
3691 sol.Stream0VertexReadOffset = urb_entry_read_offset;
3692 sol.Stream0VertexReadLength = urb_entry_read_length - 1;
3693 sol.Stream1VertexReadOffset = urb_entry_read_offset;
3694 sol.Stream1VertexReadLength = urb_entry_read_length - 1;
3695 sol.Stream2VertexReadOffset = urb_entry_read_offset;
3696 sol.Stream2VertexReadLength = urb_entry_read_length - 1;
3697 sol.Stream3VertexReadOffset = urb_entry_read_offset;
3698 sol.Stream3VertexReadLength = urb_entry_read_length - 1;
3699
3700 /* Set buffer pitches; 0 means unbound. */
3701 sol.Buffer0SurfacePitch = 4 * info->stride[0];
3702 sol.Buffer1SurfacePitch = 4 * info->stride[1];
3703 sol.Buffer2SurfacePitch = 4 * info->stride[2];
3704 sol.Buffer3SurfacePitch = 4 * info->stride[3];
3705 }
3706
3707 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
3708 list.DWordLength = 3 + 2 * max_decls - 2;
3709 list.StreamtoBufferSelects0 = buffer_mask[0];
3710 list.StreamtoBufferSelects1 = buffer_mask[1];
3711 list.StreamtoBufferSelects2 = buffer_mask[2];
3712 list.StreamtoBufferSelects3 = buffer_mask[3];
3713 list.NumEntries0 = decls[0];
3714 list.NumEntries1 = decls[1];
3715 list.NumEntries2 = decls[2];
3716 list.NumEntries3 = decls[3];
3717 }
3718
3719 for (int i = 0; i < max_decls; i++) {
3720 iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
3721 entry.Stream0Decl = so_decl[0][i];
3722 entry.Stream1Decl = so_decl[1][i];
3723 entry.Stream2Decl = so_decl[2][i];
3724 entry.Stream3Decl = so_decl[3][i];
3725 }
3726 }
3727
3728 return map;
3729 }
3730
3731 static void
3732 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
3733 const struct brw_vue_map *last_vue_map,
3734 bool two_sided_color,
3735 unsigned *out_offset,
3736 unsigned *out_length)
3737 {
3738 /* The compiler computes the first URB slot without considering COL/BFC
3739 * swizzling (because it doesn't know whether it's enabled), so we need
3740 * to do that here too. This may result in a smaller offset, which
3741 * should be safe.
3742 */
3743 const unsigned first_slot =
3744 brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
3745
3746 /* This becomes the URB read offset (counted in pairs of slots). */
3747 assert(first_slot % 2 == 0);
3748 *out_offset = first_slot / 2;
3749
3750 /* We need to adjust the inputs read to account for front/back color
3751 * swizzling, as it can make the URB length longer.
3752 */
3753 for (int c = 0; c <= 1; c++) {
3754 if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
3755 /* If two sided color is enabled, the fragment shader's gl_Color
3756 * (COL0) input comes from either the gl_FrontColor (COL0) or
3757 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
3758 */
3759 if (two_sided_color)
3760 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3761
3762 /* If front color isn't written, we opt to give them back color
3763 * instead of an undefined value. Switch from COL to BFC.
3764 */
3765 if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
3766 fs_input_slots &= ~(VARYING_BIT_COL0 << c);
3767 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3768 }
3769 }
3770 }
3771
3772 /* Compute the minimum URB Read Length necessary for the FS inputs.
3773 *
3774 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3775 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3776 *
3777 * "This field should be set to the minimum length required to read the
3778 * maximum source attribute. The maximum source attribute is indicated
3779 * by the maximum value of the enabled Attribute # Source Attribute if
3780 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3781 * enable is not set.
3782 * read_length = ceiling((max_source_attr + 1) / 2)
3783 *
3784 * [errata] Corruption/Hang possible if length programmed larger than
3785 * recommended"
3786 *
3787 * Similar text exists for Ivy Bridge.
3788 *
3789 * We find the last URB slot that's actually read by the FS.
3790 */
3791 unsigned last_read_slot = last_vue_map->num_slots - 1;
3792 while (last_read_slot > first_slot && !(fs_input_slots &
3793 (1ull << last_vue_map->slot_to_varying[last_read_slot])))
3794 --last_read_slot;
3795
3796 /* The URB read length is the difference of the two, counted in pairs. */
3797 *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
3798 }
3799
3800 static void
3801 iris_emit_sbe_swiz(struct iris_batch *batch,
3802 const struct iris_context *ice,
3803 unsigned urb_read_offset,
3804 unsigned sprite_coord_enables)
3805 {
3806 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
3807 const struct brw_wm_prog_data *wm_prog_data = (void *)
3808 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3809 const struct brw_vue_map *vue_map = ice->shaders.last_vue_map;
3810 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3811
3812 /* XXX: this should be generated when putting programs in place */
3813
3814 for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) {
3815 const int input_index = wm_prog_data->urb_setup[fs_attr];
3816 if (input_index < 0 || input_index >= 16)
3817 continue;
3818
3819 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
3820 &attr_overrides[input_index];
3821 int slot = vue_map->varying_to_slot[fs_attr];
3822
3823 /* Viewport and Layer are stored in the VUE header. We need to override
3824 * them to zero if earlier stages didn't write them, as GL requires that
3825 * they read back as zero when not explicitly set.
3826 */
3827 switch (fs_attr) {
3828 case VARYING_SLOT_VIEWPORT:
3829 case VARYING_SLOT_LAYER:
3830 attr->ComponentOverrideX = true;
3831 attr->ComponentOverrideW = true;
3832 attr->ConstantSource = CONST_0000;
3833
3834 if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
3835 attr->ComponentOverrideY = true;
3836 if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
3837 attr->ComponentOverrideZ = true;
3838 continue;
3839
3840 case VARYING_SLOT_PRIMITIVE_ID:
3841 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
3842 if (slot == -1) {
3843 attr->ComponentOverrideX = true;
3844 attr->ComponentOverrideY = true;
3845 attr->ComponentOverrideZ = true;
3846 attr->ComponentOverrideW = true;
3847 attr->ConstantSource = PRIM_ID;
3848 continue;
3849 }
3850
3851 default:
3852 break;
3853 }
3854
3855 if (sprite_coord_enables & (1 << input_index))
3856 continue;
3857
3858 /* If there was only a back color written but not front, use back
3859 * as the color instead of undefined.
3860 */
3861 if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
3862 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
3863 if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
3864 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
3865
3866 /* Not written by the previous stage - undefined. */
3867 if (slot == -1) {
3868 attr->ComponentOverrideX = true;
3869 attr->ComponentOverrideY = true;
3870 attr->ComponentOverrideZ = true;
3871 attr->ComponentOverrideW = true;
3872 attr->ConstantSource = CONST_0001_FLOAT;
3873 continue;
3874 }
3875
3876 /* Compute the location of the attribute relative to the read offset,
3877 * which is counted in 256-bit increments (two 128-bit VUE slots).
3878 */
3879 const int source_attr = slot - 2 * urb_read_offset;
3880 assert(source_attr >= 0 && source_attr <= 32);
3881 attr->SourceAttribute = source_attr;
3882
3883 /* If we are doing two-sided color, and the VUE slot following this one
3884 * represents a back-facing color, then we need to instruct the SF unit
3885 * to do back-facing swizzling.
3886 */
3887 if (cso_rast->light_twoside &&
3888 ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
3889 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
3890 (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
3891 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
3892 attr->SwizzleSelect = INPUTATTR_FACING;
3893 }
3894
3895 iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
3896 for (int i = 0; i < 16; i++)
3897 sbes.Attribute[i] = attr_overrides[i];
3898 }
3899 }
3900
3901 static unsigned
3902 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
3903 const struct iris_rasterizer_state *cso)
3904 {
3905 unsigned overrides = 0;
3906
3907 if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
3908 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
3909
3910 for (int i = 0; i < 8; i++) {
3911 if ((cso->sprite_coord_enable & (1 << i)) &&
3912 prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
3913 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
3914 }
3915
3916 return overrides;
3917 }
3918
3919 static void
3920 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
3921 {
3922 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3923 const struct brw_wm_prog_data *wm_prog_data = (void *)
3924 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3925 const struct shader_info *fs_info =
3926 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
3927
3928 unsigned urb_read_offset, urb_read_length;
3929 iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
3930 ice->shaders.last_vue_map,
3931 cso_rast->light_twoside,
3932 &urb_read_offset, &urb_read_length);
3933
3934 unsigned sprite_coord_overrides =
3935 iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast);
3936
3937 iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
3938 sbe.AttributeSwizzleEnable = true;
3939 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
3940 sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
3941 sbe.VertexURBEntryReadOffset = urb_read_offset;
3942 sbe.VertexURBEntryReadLength = urb_read_length;
3943 sbe.ForceVertexURBEntryReadOffset = true;
3944 sbe.ForceVertexURBEntryReadLength = true;
3945 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
3946 sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
3947 #if GEN_GEN >= 9
3948 for (int i = 0; i < 32; i++) {
3949 sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
3950 }
3951 #endif
3952 }
3953
3954 iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides);
3955 }
3956
3957 /* ------------------------------------------------------------------- */
3958
3959 /**
3960 * Populate VS program key fields based on the current state.
3961 */
3962 static void
3963 iris_populate_vs_key(const struct iris_context *ice,
3964 const struct shader_info *info,
3965 gl_shader_stage last_stage,
3966 struct brw_vs_prog_key *key)
3967 {
3968 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3969
3970 if (info->clip_distance_array_size == 0 &&
3971 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
3972 last_stage == MESA_SHADER_VERTEX)
3973 key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
3974 }
3975
3976 /**
3977 * Populate TCS program key fields based on the current state.
3978 */
3979 static void
3980 iris_populate_tcs_key(const struct iris_context *ice,
3981 struct brw_tcs_prog_key *key)
3982 {
3983 }
3984
3985 /**
3986 * Populate TES program key fields based on the current state.
3987 */
3988 static void
3989 iris_populate_tes_key(const struct iris_context *ice,
3990 const struct shader_info *info,
3991 gl_shader_stage last_stage,
3992 struct brw_tes_prog_key *key)
3993 {
3994 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3995
3996 if (info->clip_distance_array_size == 0 &&
3997 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
3998 last_stage == MESA_SHADER_TESS_EVAL)
3999 key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4000 }
4001
4002 /**
4003 * Populate GS program key fields based on the current state.
4004 */
4005 static void
4006 iris_populate_gs_key(const struct iris_context *ice,
4007 const struct shader_info *info,
4008 gl_shader_stage last_stage,
4009 struct brw_gs_prog_key *key)
4010 {
4011 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4012
4013 if (info->clip_distance_array_size == 0 &&
4014 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4015 last_stage == MESA_SHADER_GEOMETRY)
4016 key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4017 }
4018
4019 /**
4020 * Populate FS program key fields based on the current state.
4021 */
4022 static void
4023 iris_populate_fs_key(const struct iris_context *ice,
4024 const struct shader_info *info,
4025 struct brw_wm_prog_key *key)
4026 {
4027 struct iris_screen *screen = (void *) ice->ctx.screen;
4028 const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4029 const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4030 const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4031 const struct iris_blend_state *blend = ice->state.cso_blend;
4032
4033 key->nr_color_regions = fb->nr_cbufs;
4034
4035 key->clamp_fragment_color = rast->clamp_fragment_color;
4036
4037 key->alpha_to_coverage = blend->alpha_to_coverage;
4038
4039 key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled;
4040
4041 key->flat_shade = rast->flatshade &&
4042 (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4043
4044 key->persample_interp = rast->force_persample_interp;
4045 key->multisample_fbo = rast->multisample && fb->samples > 1;
4046
4047 key->coherent_fb_fetch = GEN_GEN >= 9;
4048
4049 key->force_dual_color_blend =
4050 screen->driconf.dual_color_blend_by_location &&
4051 (blend->blend_enables & 1) && blend->dual_color_blending;
4052
4053 /* TODO: Respect glHint for key->high_quality_derivatives */
4054 }
4055
4056 static void
4057 iris_populate_cs_key(const struct iris_context *ice,
4058 struct brw_cs_prog_key *key)
4059 {
4060 }
4061
4062 static uint64_t
4063 KSP(const struct iris_compiled_shader *shader)
4064 {
4065 struct iris_resource *res = (void *) shader->assembly.res;
4066 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4067 }
4068
4069 /* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
4070 * prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
4071 * this WA on C0 stepping.
4072 *
4073 * TODO: Fill out SamplerCount for prefetching?
4074 */
4075
4076 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
4077 pkt.KernelStartPointer = KSP(shader); \
4078 pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
4079 shader->bt.size_bytes / 4; \
4080 pkt.FloatingPointMode = prog_data->use_alt_mode; \
4081 \
4082 pkt.DispatchGRFStartRegisterForURBData = \
4083 prog_data->dispatch_grf_start_reg; \
4084 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
4085 pkt.prefix##URBEntryReadOffset = 0; \
4086 \
4087 pkt.StatisticsEnable = true; \
4088 pkt.Enable = true; \
4089 \
4090 if (prog_data->total_scratch) { \
4091 struct iris_bo *bo = \
4092 iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
4093 uint32_t scratch_addr = bo->gtt_offset; \
4094 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
4095 pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); \
4096 }
4097
4098 /**
4099 * Encode most of 3DSTATE_VS based on the compiled shader.
4100 */
4101 static void
4102 iris_store_vs_state(struct iris_context *ice,
4103 const struct gen_device_info *devinfo,
4104 struct iris_compiled_shader *shader)
4105 {
4106 struct brw_stage_prog_data *prog_data = shader->prog_data;
4107 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4108
4109 iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4110 INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4111 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4112 vs.SIMD8DispatchEnable = true;
4113 vs.UserClipDistanceCullTestEnableBitmask =
4114 vue_prog_data->cull_distance_mask;
4115 }
4116 }
4117
4118 /**
4119 * Encode most of 3DSTATE_HS based on the compiled shader.
4120 */
4121 static void
4122 iris_store_tcs_state(struct iris_context *ice,
4123 const struct gen_device_info *devinfo,
4124 struct iris_compiled_shader *shader)
4125 {
4126 struct brw_stage_prog_data *prog_data = shader->prog_data;
4127 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4128 struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4129
4130 iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4131 INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4132
4133 hs.InstanceCount = tcs_prog_data->instances - 1;
4134 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4135 hs.IncludeVertexHandles = true;
4136
4137 #if GEN_GEN >= 9
4138 hs.DispatchMode = vue_prog_data->dispatch_mode;
4139 hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4140 #endif
4141 }
4142 }
4143
4144 /**
4145 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4146 */
4147 static void
4148 iris_store_tes_state(struct iris_context *ice,
4149 const struct gen_device_info *devinfo,
4150 struct iris_compiled_shader *shader)
4151 {
4152 struct brw_stage_prog_data *prog_data = shader->prog_data;
4153 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4154 struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4155
4156 uint32_t *te_state = (void *) shader->derived_data;
4157 uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
4158
4159 iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4160 te.Partitioning = tes_prog_data->partitioning;
4161 te.OutputTopology = tes_prog_data->output_topology;
4162 te.TEDomain = tes_prog_data->domain;
4163 te.TEEnable = true;
4164 te.MaximumTessellationFactorOdd = 63.0;
4165 te.MaximumTessellationFactorNotOdd = 64.0;
4166 }
4167
4168 iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4169 INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4170
4171 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4172 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4173 ds.ComputeWCoordinateEnable =
4174 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4175
4176 ds.UserClipDistanceCullTestEnableBitmask =
4177 vue_prog_data->cull_distance_mask;
4178 }
4179
4180 }
4181
4182 /**
4183 * Encode most of 3DSTATE_GS based on the compiled shader.
4184 */
4185 static void
4186 iris_store_gs_state(struct iris_context *ice,
4187 const struct gen_device_info *devinfo,
4188 struct iris_compiled_shader *shader)
4189 {
4190 struct brw_stage_prog_data *prog_data = shader->prog_data;
4191 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4192 struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4193
4194 iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4195 INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4196
4197 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4198 gs.OutputTopology = gs_prog_data->output_topology;
4199 gs.ControlDataHeaderSize =
4200 gs_prog_data->control_data_header_size_hwords;
4201 gs.InstanceControl = gs_prog_data->invocations - 1;
4202 gs.DispatchMode = DISPATCH_MODE_SIMD8;
4203 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4204 gs.ControlDataFormat = gs_prog_data->control_data_format;
4205 gs.ReorderMode = TRAILING;
4206 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4207 gs.MaximumNumberofThreads =
4208 GEN_GEN == 8 ? (devinfo->max_gs_threads / 2 - 1)
4209 : (devinfo->max_gs_threads - 1);
4210
4211 if (gs_prog_data->static_vertex_count != -1) {
4212 gs.StaticOutput = true;
4213 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4214 }
4215 gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4216
4217 gs.UserClipDistanceCullTestEnableBitmask =
4218 vue_prog_data->cull_distance_mask;
4219
4220 const int urb_entry_write_offset = 1;
4221 const uint32_t urb_entry_output_length =
4222 DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4223 urb_entry_write_offset;
4224
4225 gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4226 gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4227 }
4228 }
4229
4230 /**
4231 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4232 */
4233 static void
4234 iris_store_fs_state(struct iris_context *ice,
4235 const struct gen_device_info *devinfo,
4236 struct iris_compiled_shader *shader)
4237 {
4238 struct brw_stage_prog_data *prog_data = shader->prog_data;
4239 struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4240
4241 uint32_t *ps_state = (void *) shader->derived_data;
4242 uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4243
4244 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4245 ps.VectorMaskEnable = true;
4246 // XXX: WABTPPrefetchDisable, see above, drop at C0
4247 ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
4248 shader->bt.size_bytes / 4;
4249 ps.FloatingPointMode = prog_data->use_alt_mode;
4250 ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
4251
4252 ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4253
4254 /* From the documentation for this packet:
4255 * "If the PS kernel does not need the Position XY Offsets to
4256 * compute a Position Value, then this field should be programmed
4257 * to POSOFFSET_NONE."
4258 *
4259 * "SW Recommendation: If the PS kernel needs the Position Offsets
4260 * to compute a Position XY value, this field should match Position
4261 * ZW Interpolation Mode to ensure a consistent position.xyzw
4262 * computation."
4263 *
4264 * We only require XY sample offsets. So, this recommendation doesn't
4265 * look useful at the moment. We might need this in future.
4266 */
4267 ps.PositionXYOffsetSelect =
4268 wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4269
4270 if (prog_data->total_scratch) {
4271 struct iris_bo *bo =
4272 iris_get_scratch_space(ice, prog_data->total_scratch,
4273 MESA_SHADER_FRAGMENT);
4274 uint32_t scratch_addr = bo->gtt_offset;
4275 ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4276 ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
4277 }
4278 }
4279
4280 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4281 psx.PixelShaderValid = true;
4282 psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4283 psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4284 psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4285 psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4286 psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4287 psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4288 psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4289
4290 #if GEN_GEN >= 9
4291 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4292 psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4293 #endif
4294 }
4295 }
4296
4297 /**
4298 * Compute the size of the derived data (shader command packets).
4299 *
4300 * This must match the data written by the iris_store_xs_state() functions.
4301 */
4302 static void
4303 iris_store_cs_state(struct iris_context *ice,
4304 const struct gen_device_info *devinfo,
4305 struct iris_compiled_shader *shader)
4306 {
4307 struct brw_stage_prog_data *prog_data = shader->prog_data;
4308 struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4309 void *map = shader->derived_data;
4310
4311 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4312 desc.KernelStartPointer = KSP(shader);
4313 desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4314 desc.NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads;
4315 desc.SharedLocalMemorySize =
4316 encode_slm_size(GEN_GEN, prog_data->total_shared);
4317 desc.BarrierEnable = cs_prog_data->uses_barrier;
4318 desc.CrossThreadConstantDataReadLength =
4319 cs_prog_data->push.cross_thread.regs;
4320 }
4321 }
4322
4323 static unsigned
4324 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4325 {
4326 assert(cache_id <= IRIS_CACHE_BLORP);
4327
4328 static const unsigned dwords[] = {
4329 [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4330 [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4331 [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4332 [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4333 [IRIS_CACHE_FS] =
4334 GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4335 [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4336 [IRIS_CACHE_BLORP] = 0,
4337 };
4338
4339 return sizeof(uint32_t) * dwords[cache_id];
4340 }
4341
4342 /**
4343 * Create any state packets corresponding to the given shader stage
4344 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4345 * This means that we can look up a program in the in-memory cache and
4346 * get most of the state packet without having to reconstruct it.
4347 */
4348 static void
4349 iris_store_derived_program_state(struct iris_context *ice,
4350 enum iris_program_cache_id cache_id,
4351 struct iris_compiled_shader *shader)
4352 {
4353 struct iris_screen *screen = (void *) ice->ctx.screen;
4354 const struct gen_device_info *devinfo = &screen->devinfo;
4355
4356 switch (cache_id) {
4357 case IRIS_CACHE_VS:
4358 iris_store_vs_state(ice, devinfo, shader);
4359 break;
4360 case IRIS_CACHE_TCS:
4361 iris_store_tcs_state(ice, devinfo, shader);
4362 break;
4363 case IRIS_CACHE_TES:
4364 iris_store_tes_state(ice, devinfo, shader);
4365 break;
4366 case IRIS_CACHE_GS:
4367 iris_store_gs_state(ice, devinfo, shader);
4368 break;
4369 case IRIS_CACHE_FS:
4370 iris_store_fs_state(ice, devinfo, shader);
4371 break;
4372 case IRIS_CACHE_CS:
4373 iris_store_cs_state(ice, devinfo, shader);
4374 case IRIS_CACHE_BLORP:
4375 break;
4376 default:
4377 break;
4378 }
4379 }
4380
4381 /* ------------------------------------------------------------------- */
4382
4383 static const uint32_t push_constant_opcodes[] = {
4384 [MESA_SHADER_VERTEX] = 21,
4385 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4386 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4387 [MESA_SHADER_GEOMETRY] = 22,
4388 [MESA_SHADER_FRAGMENT] = 23,
4389 [MESA_SHADER_COMPUTE] = 0,
4390 };
4391
4392 static uint32_t
4393 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4394 {
4395 struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4396
4397 iris_use_pinned_bo(batch, state_bo, false);
4398
4399 return ice->state.unbound_tex.offset;
4400 }
4401
4402 static uint32_t
4403 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4404 {
4405 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4406 if (!ice->state.null_fb.res)
4407 return use_null_surface(batch, ice);
4408
4409 struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4410
4411 iris_use_pinned_bo(batch, state_bo, false);
4412
4413 return ice->state.null_fb.offset;
4414 }
4415
4416 static uint32_t
4417 surf_state_offset_for_aux(struct iris_resource *res,
4418 unsigned aux_modes,
4419 enum isl_aux_usage aux_usage)
4420 {
4421 return SURFACE_STATE_ALIGNMENT *
4422 util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4423 }
4424
4425 #if GEN_GEN == 9
4426 static void
4427 surf_state_update_clear_value(struct iris_batch *batch,
4428 struct iris_resource *res,
4429 struct iris_state_ref *state,
4430 unsigned aux_modes,
4431 enum isl_aux_usage aux_usage)
4432 {
4433 struct isl_device *isl_dev = &batch->screen->isl_dev;
4434 struct iris_bo *state_bo = iris_resource_bo(state->res);
4435 uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
4436 uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
4437 uint32_t clear_offset = offset_into_bo +
4438 isl_dev->ss.clear_value_offset +
4439 surf_state_offset_for_aux(res, aux_modes, aux_usage);
4440 uint32_t *color = res->aux.clear_color.u32;
4441
4442 assert(isl_dev->ss.clear_value_size == 16);
4443
4444 if (aux_usage == ISL_AUX_USAGE_HIZ) {
4445 iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4446 PIPE_CONTROL_WRITE_IMMEDIATE,
4447 state_bo, clear_offset, color[0]);
4448 } else {
4449 iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4450 PIPE_CONTROL_WRITE_IMMEDIATE,
4451 state_bo, clear_offset,
4452 (uint64_t) color[0] |
4453 (uint64_t) color[1] << 32);
4454 iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4455 PIPE_CONTROL_WRITE_IMMEDIATE,
4456 state_bo, clear_offset + 8,
4457 (uint64_t) color[2] |
4458 (uint64_t) color[3] << 32);
4459 }
4460
4461 iris_emit_pipe_control_flush(batch,
4462 "update fast clear: state cache invalidate",
4463 PIPE_CONTROL_FLUSH_ENABLE |
4464 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4465 }
4466 #endif
4467
4468 static void
4469 update_clear_value(struct iris_context *ice,
4470 struct iris_batch *batch,
4471 struct iris_resource *res,
4472 struct iris_state_ref *state,
4473 unsigned all_aux_modes,
4474 struct isl_view *view)
4475 {
4476 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4477 UNUSED unsigned aux_modes = all_aux_modes;
4478
4479 /* We only need to update the clear color in the surface state for gen8 and
4480 * gen9. Newer gens can read it directly from the clear color state buffer.
4481 */
4482 #if GEN_GEN == 9
4483 /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4484 aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4485
4486 while (aux_modes) {
4487 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4488
4489 surf_state_update_clear_value(batch, res, state, all_aux_modes,
4490 aux_usage);
4491 }
4492 #elif GEN_GEN == 8
4493 pipe_resource_reference(&state->res, NULL);
4494
4495 void *map = alloc_surface_states(ice->state.surface_uploader,
4496 state, all_aux_modes);
4497 while (aux_modes) {
4498 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4499 fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage, 0, 0);
4500 map += SURFACE_STATE_ALIGNMENT;
4501 }
4502 #endif
4503 }
4504
4505 /**
4506 * Add a surface to the validation list, as well as the buffer containing
4507 * the corresponding SURFACE_STATE.
4508 *
4509 * Returns the binding table entry (offset to SURFACE_STATE).
4510 */
4511 static uint32_t
4512 use_surface(struct iris_context *ice,
4513 struct iris_batch *batch,
4514 struct pipe_surface *p_surf,
4515 bool writeable,
4516 enum isl_aux_usage aux_usage,
4517 bool is_read_surface)
4518 {
4519 struct iris_surface *surf = (void *) p_surf;
4520 struct iris_resource *res = (void *) p_surf->texture;
4521 uint32_t offset = 0;
4522
4523 iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
4524 if (GEN_GEN == 8 && is_read_surface) {
4525 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.res), false);
4526 } else {
4527 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
4528 }
4529
4530 if (res->aux.bo) {
4531 iris_use_pinned_bo(batch, res->aux.bo, writeable);
4532 if (res->aux.clear_color_bo)
4533 iris_use_pinned_bo(batch, res->aux.clear_color_bo, false);
4534
4535 if (memcmp(&res->aux.clear_color, &surf->clear_color,
4536 sizeof(surf->clear_color)) != 0) {
4537 update_clear_value(ice, batch, res, &surf->surface_state,
4538 res->aux.possible_usages, &surf->view);
4539 if (GEN_GEN == 8) {
4540 update_clear_value(ice, batch, res, &surf->surface_state_read,
4541 res->aux.possible_usages, &surf->read_view);
4542 }
4543 surf->clear_color = res->aux.clear_color;
4544 }
4545 }
4546
4547 offset = (GEN_GEN == 8 && is_read_surface) ? surf->surface_state_read.offset
4548 : surf->surface_state.offset;
4549
4550 return offset +
4551 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4552 }
4553
4554 static uint32_t
4555 use_sampler_view(struct iris_context *ice,
4556 struct iris_batch *batch,
4557 struct iris_sampler_view *isv)
4558 {
4559 // XXX: ASTC hacks
4560 enum isl_aux_usage aux_usage =
4561 iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0);
4562
4563 iris_use_pinned_bo(batch, isv->res->bo, false);
4564 iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
4565
4566 if (isv->res->aux.bo) {
4567 iris_use_pinned_bo(batch, isv->res->aux.bo, false);
4568 if (isv->res->aux.clear_color_bo)
4569 iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo, false);
4570 if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4571 sizeof(isv->clear_color)) != 0) {
4572 update_clear_value(ice, batch, isv->res, &isv->surface_state,
4573 isv->res->aux.sampler_usages, &isv->view);
4574 isv->clear_color = isv->res->aux.clear_color;
4575 }
4576 }
4577
4578 return isv->surface_state.offset +
4579 surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
4580 aux_usage);
4581 }
4582
4583 static uint32_t
4584 use_ubo_ssbo(struct iris_batch *batch,
4585 struct iris_context *ice,
4586 struct pipe_shader_buffer *buf,
4587 struct iris_state_ref *surf_state,
4588 bool writable)
4589 {
4590 if (!buf->buffer || !surf_state->res)
4591 return use_null_surface(batch, ice);
4592
4593 iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable);
4594 iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
4595
4596 return surf_state->offset;
4597 }
4598
4599 static uint32_t
4600 use_image(struct iris_batch *batch, struct iris_context *ice,
4601 struct iris_shader_state *shs, int i)
4602 {
4603 struct iris_image_view *iv = &shs->image[i];
4604 struct iris_resource *res = (void *) iv->base.resource;
4605
4606 if (!res)
4607 return use_null_surface(batch, ice);
4608
4609 bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
4610
4611 iris_use_pinned_bo(batch, res->bo, write);
4612 iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.res), false);
4613
4614 if (res->aux.bo)
4615 iris_use_pinned_bo(batch, res->aux.bo, write);
4616
4617 return iv->surface_state.offset;
4618 }
4619
4620 #define push_bt_entry(addr) \
4621 assert(addr >= binder_addr); \
4622 assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4623 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4624
4625 #define bt_assert(section) \
4626 if (!pin_only && shader->bt.used_mask[section] != 0) \
4627 assert(shader->bt.offsets[section] == s);
4628
4629 /**
4630 * Populate the binding table for a given shader stage.
4631 *
4632 * This fills out the table of pointers to surfaces required by the shader,
4633 * and also adds those buffers to the validation list so the kernel can make
4634 * resident before running our batch.
4635 */
4636 static void
4637 iris_populate_binding_table(struct iris_context *ice,
4638 struct iris_batch *batch,
4639 gl_shader_stage stage,
4640 bool pin_only)
4641 {
4642 const struct iris_binder *binder = &ice->state.binder;
4643 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
4644 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4645 if (!shader)
4646 return;
4647
4648 struct iris_binding_table *bt = &shader->bt;
4649 UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
4650 struct iris_shader_state *shs = &ice->state.shaders[stage];
4651 uint32_t binder_addr = binder->bo->gtt_offset;
4652
4653 uint32_t *bt_map = binder->map + binder->bt_offset[stage];
4654 int s = 0;
4655
4656 const struct shader_info *info = iris_get_shader_info(ice, stage);
4657 if (!info) {
4658 /* TCS passthrough doesn't need a binding table. */
4659 assert(stage == MESA_SHADER_TESS_CTRL);
4660 return;
4661 }
4662
4663 if (stage == MESA_SHADER_COMPUTE &&
4664 shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
4665 /* surface for gl_NumWorkGroups */
4666 struct iris_state_ref *grid_data = &ice->state.grid_size;
4667 struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
4668 iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false);
4669 iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false);
4670 push_bt_entry(grid_state->offset);
4671 }
4672
4673 if (stage == MESA_SHADER_FRAGMENT) {
4674 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4675 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4676 if (cso_fb->nr_cbufs) {
4677 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
4678 uint32_t addr;
4679 if (cso_fb->cbufs[i]) {
4680 addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
4681 ice->state.draw_aux_usage[i], false);
4682 } else {
4683 addr = use_null_fb_surface(batch, ice);
4684 }
4685 push_bt_entry(addr);
4686 }
4687 } else if (GEN_GEN < 11) {
4688 uint32_t addr = use_null_fb_surface(batch, ice);
4689 push_bt_entry(addr);
4690 }
4691 }
4692
4693 #define foreach_surface_used(index, group) \
4694 bt_assert(group); \
4695 for (int index = 0; index < bt->sizes[group]; index++) \
4696 if (iris_group_index_to_bti(bt, group, index) != \
4697 IRIS_SURFACE_NOT_USED)
4698
4699 foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
4700 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4701 uint32_t addr;
4702 if (cso_fb->cbufs[i]) {
4703 addr = use_surface(ice, batch, cso_fb->cbufs[i],
4704 true, ice->state.draw_aux_usage[i], true);
4705 push_bt_entry(addr);
4706 }
4707 }
4708
4709 foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
4710 struct iris_sampler_view *view = shs->textures[i];
4711 uint32_t addr = view ? use_sampler_view(ice, batch, view)
4712 : use_null_surface(batch, ice);
4713 push_bt_entry(addr);
4714 }
4715
4716 foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
4717 uint32_t addr = use_image(batch, ice, shs, i);
4718 push_bt_entry(addr);
4719 }
4720
4721 foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
4722 uint32_t addr;
4723
4724 if (i == bt->sizes[IRIS_SURFACE_GROUP_UBO] - 1) {
4725 if (ish->const_data) {
4726 iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data), false);
4727 iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data_state.res),
4728 false);
4729 addr = ish->const_data_state.offset;
4730 } else {
4731 /* This can only happen with INTEL_DISABLE_COMPACT_BINDING_TABLE=1. */
4732 addr = use_null_surface(batch, ice);
4733 }
4734 } else {
4735 addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
4736 &shs->constbuf_surf_state[i], false);
4737 }
4738
4739 push_bt_entry(addr);
4740 }
4741
4742 foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
4743 uint32_t addr =
4744 use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
4745 shs->writable_ssbos & (1u << i));
4746 push_bt_entry(addr);
4747 }
4748
4749 #if 0
4750 /* XXX: YUV surfaces not implemented yet */
4751 bt_assert(plane_start[1], ...);
4752 bt_assert(plane_start[2], ...);
4753 #endif
4754 }
4755
4756 static void
4757 iris_use_optional_res(struct iris_batch *batch,
4758 struct pipe_resource *res,
4759 bool writeable)
4760 {
4761 if (res) {
4762 struct iris_bo *bo = iris_resource_bo(res);
4763 iris_use_pinned_bo(batch, bo, writeable);
4764 }
4765 }
4766
4767 static void
4768 pin_depth_and_stencil_buffers(struct iris_batch *batch,
4769 struct pipe_surface *zsbuf,
4770 struct iris_depth_stencil_alpha_state *cso_zsa)
4771 {
4772 if (!zsbuf)
4773 return;
4774
4775 struct iris_resource *zres, *sres;
4776 iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
4777
4778 if (zres) {
4779 iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled);
4780 if (zres->aux.bo) {
4781 iris_use_pinned_bo(batch, zres->aux.bo,
4782 cso_zsa->depth_writes_enabled);
4783 }
4784 }
4785
4786 if (sres) {
4787 iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled);
4788 }
4789 }
4790
4791 /* ------------------------------------------------------------------- */
4792
4793 /**
4794 * Pin any BOs which were installed by a previous batch, and restored
4795 * via the hardware logical context mechanism.
4796 *
4797 * We don't need to re-emit all state every batch - the hardware context
4798 * mechanism will save and restore it for us. This includes pointers to
4799 * various BOs...which won't exist unless we ask the kernel to pin them
4800 * by adding them to the validation list.
4801 *
4802 * We can skip buffers if we've re-emitted those packets, as we're
4803 * overwriting those stale pointers with new ones, and don't actually
4804 * refer to the old BOs.
4805 */
4806 static void
4807 iris_restore_render_saved_bos(struct iris_context *ice,
4808 struct iris_batch *batch,
4809 const struct pipe_draw_info *draw)
4810 {
4811 struct iris_genx_state *genx = ice->state.genx;
4812
4813 const uint64_t clean = ~ice->state.dirty;
4814
4815 if (clean & IRIS_DIRTY_CC_VIEWPORT) {
4816 iris_use_optional_res(batch, ice->state.last_res.cc_vp, false);
4817 }
4818
4819 if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
4820 iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false);
4821 }
4822
4823 if (clean & IRIS_DIRTY_BLEND_STATE) {
4824 iris_use_optional_res(batch, ice->state.last_res.blend, false);
4825 }
4826
4827 if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
4828 iris_use_optional_res(batch, ice->state.last_res.color_calc, false);
4829 }
4830
4831 if (clean & IRIS_DIRTY_SCISSOR_RECT) {
4832 iris_use_optional_res(batch, ice->state.last_res.scissor, false);
4833 }
4834
4835 if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
4836 for (int i = 0; i < 4; i++) {
4837 struct iris_stream_output_target *tgt =
4838 (void *) ice->state.so_target[i];
4839 if (tgt) {
4840 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
4841 true);
4842 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
4843 true);
4844 }
4845 }
4846 }
4847
4848 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4849 if (!(clean & (IRIS_DIRTY_CONSTANTS_VS << stage)))
4850 continue;
4851
4852 struct iris_shader_state *shs = &ice->state.shaders[stage];
4853 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4854
4855 if (!shader)
4856 continue;
4857
4858 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
4859
4860 for (int i = 0; i < 4; i++) {
4861 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
4862
4863 if (range->length == 0)
4864 continue;
4865
4866 /* Range block is a binding table index, map back to UBO index. */
4867 unsigned block_index = iris_bti_to_group_index(
4868 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
4869 assert(block_index != IRIS_SURFACE_NOT_USED);
4870
4871 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
4872 struct iris_resource *res = (void *) cbuf->buffer;
4873
4874 if (res)
4875 iris_use_pinned_bo(batch, res->bo, false);
4876 else
4877 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false);
4878 }
4879 }
4880
4881 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4882 if (clean & (IRIS_DIRTY_BINDINGS_VS << stage)) {
4883 /* Re-pin any buffers referred to by the binding table. */
4884 iris_populate_binding_table(ice, batch, stage, true);
4885 }
4886 }
4887
4888 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4889 struct iris_shader_state *shs = &ice->state.shaders[stage];
4890 struct pipe_resource *res = shs->sampler_table.res;
4891 if (res)
4892 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
4893 }
4894
4895 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4896 if (clean & (IRIS_DIRTY_VS << stage)) {
4897 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4898
4899 if (shader) {
4900 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
4901 iris_use_pinned_bo(batch, bo, false);
4902
4903 struct brw_stage_prog_data *prog_data = shader->prog_data;
4904
4905 if (prog_data->total_scratch > 0) {
4906 struct iris_bo *bo =
4907 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
4908 iris_use_pinned_bo(batch, bo, true);
4909 }
4910 }
4911 }
4912 }
4913
4914 if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
4915 (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
4916 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4917 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
4918 }
4919
4920 iris_use_optional_res(batch, ice->state.last_res.index_buffer, false);
4921
4922 if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
4923 uint64_t bound = ice->state.bound_vertex_buffers;
4924 while (bound) {
4925 const int i = u_bit_scan64(&bound);
4926 struct pipe_resource *res = genx->vertex_buffers[i].resource;
4927 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
4928 }
4929 }
4930 }
4931
4932 static void
4933 iris_restore_compute_saved_bos(struct iris_context *ice,
4934 struct iris_batch *batch,
4935 const struct pipe_grid_info *grid)
4936 {
4937 const uint64_t clean = ~ice->state.dirty;
4938
4939 const int stage = MESA_SHADER_COMPUTE;
4940 struct iris_shader_state *shs = &ice->state.shaders[stage];
4941
4942 if (clean & IRIS_DIRTY_BINDINGS_CS) {
4943 /* Re-pin any buffers referred to by the binding table. */
4944 iris_populate_binding_table(ice, batch, stage, true);
4945 }
4946
4947 struct pipe_resource *sampler_res = shs->sampler_table.res;
4948 if (sampler_res)
4949 iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false);
4950
4951 if ((clean & IRIS_DIRTY_SAMPLER_STATES_CS) &&
4952 (clean & IRIS_DIRTY_BINDINGS_CS) &&
4953 (clean & IRIS_DIRTY_CONSTANTS_CS) &&
4954 (clean & IRIS_DIRTY_CS)) {
4955 iris_use_optional_res(batch, ice->state.last_res.cs_desc, false);
4956 }
4957
4958 if (clean & IRIS_DIRTY_CS) {
4959 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4960
4961 if (shader) {
4962 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
4963 iris_use_pinned_bo(batch, bo, false);
4964
4965 struct iris_bo *curbe_bo =
4966 iris_resource_bo(ice->state.last_res.cs_thread_ids);
4967 iris_use_pinned_bo(batch, curbe_bo, false);
4968
4969 struct brw_stage_prog_data *prog_data = shader->prog_data;
4970
4971 if (prog_data->total_scratch > 0) {
4972 struct iris_bo *bo =
4973 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
4974 iris_use_pinned_bo(batch, bo, true);
4975 }
4976 }
4977 }
4978 }
4979
4980 /**
4981 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
4982 */
4983 static void
4984 iris_update_surface_base_address(struct iris_batch *batch,
4985 struct iris_binder *binder)
4986 {
4987 if (batch->last_surface_base_address == binder->bo->gtt_offset)
4988 return;
4989
4990 flush_before_state_base_change(batch);
4991
4992 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
4993 sba.SurfaceStateBaseAddressModifyEnable = true;
4994 sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
4995
4996 /* The hardware appears to pay attention to the MOCS fields even
4997 * if you don't set the "Address Modify Enable" bit for the base.
4998 */
4999 sba.GeneralStateMOCS = MOCS_WB;
5000 sba.StatelessDataPortAccessMOCS = MOCS_WB;
5001 sba.DynamicStateMOCS = MOCS_WB;
5002 sba.IndirectObjectMOCS = MOCS_WB;
5003 sba.InstructionMOCS = MOCS_WB;
5004 sba.SurfaceStateMOCS = MOCS_WB;
5005 #if GEN_GEN >= 9
5006 sba.BindlessSurfaceStateMOCS = MOCS_WB;
5007 #endif
5008 }
5009
5010 flush_after_state_base_change(batch);
5011
5012 batch->last_surface_base_address = binder->bo->gtt_offset;
5013 }
5014
5015 static inline void
5016 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5017 bool window_space_position, float *zmin, float *zmax)
5018 {
5019 if (window_space_position) {
5020 *zmin = 0.f;
5021 *zmax = 1.f;
5022 return;
5023 }
5024 util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5025 }
5026
5027 #if GEN_GEN >= 12
5028 void
5029 genX(emit_aux_map_state)(struct iris_batch *batch)
5030 {
5031 struct iris_screen *screen = batch->screen;
5032 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5033 if (!aux_map_ctx)
5034 return;
5035 uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
5036 if (batch->last_aux_map_state != aux_map_state_num) {
5037 /* If the aux-map state number increased, then we need to rewrite the
5038 * register. Rewriting the register is used to both set the aux-map
5039 * translation table address, and also to invalidate any previously
5040 * cached translations.
5041 */
5042 uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
5043 assert(base_addr != 0 && ALIGN(base_addr, 32 * 1024) == base_addr);
5044 iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5045 base_addr);
5046 batch->last_aux_map_state = aux_map_state_num;
5047 }
5048 }
5049 #endif
5050
5051 static void
5052 iris_upload_dirty_render_state(struct iris_context *ice,
5053 struct iris_batch *batch,
5054 const struct pipe_draw_info *draw)
5055 {
5056 const uint64_t dirty = ice->state.dirty;
5057
5058 if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER))
5059 return;
5060
5061 struct iris_genx_state *genx = ice->state.genx;
5062 struct iris_binder *binder = &ice->state.binder;
5063 struct brw_wm_prog_data *wm_prog_data = (void *)
5064 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5065
5066 if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5067 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5068 uint32_t cc_vp_address;
5069
5070 /* XXX: could avoid streaming for depth_clip [0,1] case. */
5071 uint32_t *cc_vp_map =
5072 stream_state(batch, ice->state.dynamic_uploader,
5073 &ice->state.last_res.cc_vp,
5074 4 * ice->state.num_viewports *
5075 GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5076 for (int i = 0; i < ice->state.num_viewports; i++) {
5077 float zmin, zmax;
5078 iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5079 ice->state.window_space_position,
5080 &zmin, &zmax);
5081 if (cso_rast->depth_clip_near)
5082 zmin = 0.0;
5083 if (cso_rast->depth_clip_far)
5084 zmax = 1.0;
5085
5086 iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5087 ccv.MinimumDepth = zmin;
5088 ccv.MaximumDepth = zmax;
5089 }
5090
5091 cc_vp_map += GENX(CC_VIEWPORT_length);
5092 }
5093
5094 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5095 ptr.CCViewportPointer = cc_vp_address;
5096 }
5097 }
5098
5099 if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5100 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5101 uint32_t sf_cl_vp_address;
5102 uint32_t *vp_map =
5103 stream_state(batch, ice->state.dynamic_uploader,
5104 &ice->state.last_res.sf_cl_vp,
5105 4 * ice->state.num_viewports *
5106 GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5107
5108 for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5109 const struct pipe_viewport_state *state = &ice->state.viewports[i];
5110 float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5111
5112 float vp_xmin = viewport_extent(state, 0, -1.0f);
5113 float vp_xmax = viewport_extent(state, 0, 1.0f);
5114 float vp_ymin = viewport_extent(state, 1, -1.0f);
5115 float vp_ymax = viewport_extent(state, 1, 1.0f);
5116
5117 gen_calculate_guardband_size(cso_fb->width, cso_fb->height,
5118 state->scale[0], state->scale[1],
5119 state->translate[0], state->translate[1],
5120 &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5121
5122 iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5123 vp.ViewportMatrixElementm00 = state->scale[0];
5124 vp.ViewportMatrixElementm11 = state->scale[1];
5125 vp.ViewportMatrixElementm22 = state->scale[2];
5126 vp.ViewportMatrixElementm30 = state->translate[0];
5127 vp.ViewportMatrixElementm31 = state->translate[1];
5128 vp.ViewportMatrixElementm32 = state->translate[2];
5129 vp.XMinClipGuardband = gb_xmin;
5130 vp.XMaxClipGuardband = gb_xmax;
5131 vp.YMinClipGuardband = gb_ymin;
5132 vp.YMaxClipGuardband = gb_ymax;
5133 vp.XMinViewPort = MAX2(vp_xmin, 0);
5134 vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5135 vp.YMinViewPort = MAX2(vp_ymin, 0);
5136 vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5137 }
5138
5139 vp_map += GENX(SF_CLIP_VIEWPORT_length);
5140 }
5141
5142 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5143 ptr.SFClipViewportPointer = sf_cl_vp_address;
5144 }
5145 }
5146
5147 if (dirty & IRIS_DIRTY_URB) {
5148 unsigned size[4];
5149
5150 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5151 if (!ice->shaders.prog[i]) {
5152 size[i] = 1;
5153 } else {
5154 struct brw_vue_prog_data *vue_prog_data =
5155 (void *) ice->shaders.prog[i]->prog_data;
5156 size[i] = vue_prog_data->urb_entry_size;
5157 }
5158 assert(size[i] != 0);
5159 }
5160
5161 genX(emit_urb_setup)(ice, batch, size,
5162 ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5163 ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL);
5164 }
5165
5166 if (dirty & IRIS_DIRTY_BLEND_STATE) {
5167 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5168 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5169 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5170 const int header_dwords = GENX(BLEND_STATE_length);
5171
5172 /* Always write at least one BLEND_STATE - the final RT message will
5173 * reference BLEND_STATE[0] even if there aren't color writes. There
5174 * may still be alpha testing, computed depth, and so on.
5175 */
5176 const int rt_dwords =
5177 MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5178
5179 uint32_t blend_offset;
5180 uint32_t *blend_map =
5181 stream_state(batch, ice->state.dynamic_uploader,
5182 &ice->state.last_res.blend,
5183 4 * (header_dwords + rt_dwords), 64, &blend_offset);
5184
5185 uint32_t blend_state_header;
5186 iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5187 bs.AlphaTestEnable = cso_zsa->alpha.enabled;
5188 bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha.func);
5189 }
5190
5191 blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5192 memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5193
5194 iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5195 ptr.BlendStatePointer = blend_offset;
5196 ptr.BlendStatePointerValid = true;
5197 }
5198 }
5199
5200 if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5201 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5202 #if GEN_GEN == 8
5203 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5204 #endif
5205 uint32_t cc_offset;
5206 void *cc_map =
5207 stream_state(batch, ice->state.dynamic_uploader,
5208 &ice->state.last_res.color_calc,
5209 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5210 64, &cc_offset);
5211 iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5212 cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5213 cc.AlphaReferenceValueAsFLOAT32 = cso->alpha.ref_value;
5214 cc.BlendConstantColorRed = ice->state.blend_color.color[0];
5215 cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5216 cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
5217 cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5218 #if GEN_GEN == 8
5219 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5220 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5221 #endif
5222 }
5223 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5224 ptr.ColorCalcStatePointer = cc_offset;
5225 ptr.ColorCalcStatePointerValid = true;
5226 }
5227 }
5228
5229 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5230 if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)))
5231 continue;
5232
5233 struct iris_shader_state *shs = &ice->state.shaders[stage];
5234 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5235
5236 if (!shader)
5237 continue;
5238
5239 if (shs->sysvals_need_upload)
5240 upload_sysvals(ice, stage);
5241
5242 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5243
5244 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5245 pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5246 if (prog_data) {
5247 /* The Skylake PRM contains the following restriction:
5248 *
5249 * "The driver must ensure The following case does not occur
5250 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5251 * buffer 3 read length equal to zero committed followed by a
5252 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5253 * zero committed."
5254 *
5255 * To avoid this, we program the buffers in the highest slots.
5256 * This way, slot 0 is only used if slot 3 is also used.
5257 */
5258 int n = 3;
5259
5260 for (int i = 3; i >= 0; i--) {
5261 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5262
5263 if (range->length == 0)
5264 continue;
5265
5266 /* Range block is a binding table index, map back to UBO index. */
5267 unsigned block_index = iris_bti_to_group_index(
5268 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5269 assert(block_index != IRIS_SURFACE_NOT_USED);
5270
5271 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5272 struct iris_resource *res = (void *) cbuf->buffer;
5273
5274 assert(cbuf->buffer_offset % 32 == 0);
5275
5276 pkt.ConstantBody.ReadLength[n] = range->length;
5277 pkt.ConstantBody.Buffer[n] =
5278 res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5279 : ro_bo(batch->screen->workaround_bo, 0);
5280 n--;
5281 }
5282 }
5283 }
5284 }
5285
5286 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5287 /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5288 * in order to commit constants. TODO: Investigate "Disable Gather
5289 * at Set Shader" to go back to legacy mode...
5290 */
5291 if (dirty & ((IRIS_DIRTY_BINDINGS_VS |
5292 (GEN_GEN == 9 ? IRIS_DIRTY_CONSTANTS_VS : 0)) << stage)) {
5293 iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
5294 ptr._3DCommandSubOpcode = 38 + stage;
5295 ptr.PointertoVSBindingTable = binder->bt_offset[stage];
5296 }
5297 }
5298 }
5299
5300 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5301 if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
5302 iris_populate_binding_table(ice, batch, stage, false);
5303 }
5304 }
5305
5306 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5307 if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) ||
5308 !ice->shaders.prog[stage])
5309 continue;
5310
5311 iris_upload_sampler_states(ice, stage);
5312
5313 struct iris_shader_state *shs = &ice->state.shaders[stage];
5314 struct pipe_resource *res = shs->sampler_table.res;
5315 if (res)
5316 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
5317
5318 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
5319 ptr._3DCommandSubOpcode = 43 + stage;
5320 ptr.PointertoVSSamplerState = shs->sampler_table.offset;
5321 }
5322 }
5323
5324 if (ice->state.need_border_colors)
5325 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
5326
5327 if (dirty & IRIS_DIRTY_MULTISAMPLE) {
5328 iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
5329 ms.PixelLocation =
5330 ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
5331 if (ice->state.framebuffer.samples > 0)
5332 ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
5333 }
5334 }
5335
5336 if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
5337 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
5338 ms.SampleMask = ice->state.sample_mask;
5339 }
5340 }
5341
5342 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5343 if (!(dirty & (IRIS_DIRTY_VS << stage)))
5344 continue;
5345
5346 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5347
5348 if (shader) {
5349 struct brw_stage_prog_data *prog_data = shader->prog_data;
5350 struct iris_resource *cache = (void *) shader->assembly.res;
5351 iris_use_pinned_bo(batch, cache->bo, false);
5352
5353 if (prog_data->total_scratch > 0) {
5354 struct iris_bo *bo =
5355 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5356 iris_use_pinned_bo(batch, bo, true);
5357 }
5358
5359 if (stage == MESA_SHADER_FRAGMENT) {
5360 UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
5361 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5362
5363 uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
5364 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
5365 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
5366 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
5367 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
5368
5369 /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5370 *
5371 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5372 * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5373 * mode."
5374 *
5375 * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
5376 */
5377 if (GEN_GEN >= 9 && cso_fb->samples == 16 &&
5378 !wm_prog_data->persample_dispatch) {
5379 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
5380 ps._32PixelDispatchEnable = false;
5381 }
5382
5383 ps.DispatchGRFStartRegisterForConstantSetupData0 =
5384 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
5385 ps.DispatchGRFStartRegisterForConstantSetupData1 =
5386 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
5387 ps.DispatchGRFStartRegisterForConstantSetupData2 =
5388 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
5389
5390 ps.KernelStartPointer0 = KSP(shader) +
5391 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
5392 ps.KernelStartPointer1 = KSP(shader) +
5393 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
5394 ps.KernelStartPointer2 = KSP(shader) +
5395 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
5396 }
5397
5398 uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
5399 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
5400 #if GEN_GEN >= 9
5401 if (!wm_prog_data->uses_sample_mask)
5402 psx.InputCoverageMaskState = ICMS_NONE;
5403 else if (wm_prog_data->post_depth_coverage)
5404 psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
5405 else if (wm_prog_data->inner_coverage &&
5406 cso->conservative_rasterization)
5407 psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
5408 else
5409 psx.InputCoverageMaskState = ICMS_NORMAL;
5410 #else
5411 psx.PixelShaderUsesInputCoverageMask =
5412 wm_prog_data->uses_sample_mask;
5413 #endif
5414 }
5415
5416 uint32_t *shader_ps = (uint32_t *) shader->derived_data;
5417 uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
5418 iris_emit_merge(batch, shader_ps, ps_state,
5419 GENX(3DSTATE_PS_length));
5420 iris_emit_merge(batch, shader_psx, psx_state,
5421 GENX(3DSTATE_PS_EXTRA_length));
5422 } else {
5423 iris_batch_emit(batch, shader->derived_data,
5424 iris_derived_program_state_size(stage));
5425 }
5426 } else {
5427 if (stage == MESA_SHADER_TESS_EVAL) {
5428 iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
5429 iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
5430 iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
5431 } else if (stage == MESA_SHADER_GEOMETRY) {
5432 iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
5433 }
5434 }
5435 }
5436
5437 if (ice->state.streamout_active) {
5438 if (dirty & IRIS_DIRTY_SO_BUFFERS) {
5439 iris_batch_emit(batch, genx->so_buffers,
5440 4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
5441 for (int i = 0; i < 4; i++) {
5442 struct iris_stream_output_target *tgt =
5443 (void *) ice->state.so_target[i];
5444 if (tgt) {
5445 tgt->zeroed = true;
5446 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5447 true);
5448 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5449 true);
5450 }
5451 }
5452 }
5453
5454 if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
5455 uint32_t *decl_list =
5456 ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
5457 iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
5458 }
5459
5460 if (dirty & IRIS_DIRTY_STREAMOUT) {
5461 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5462
5463 uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
5464 iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
5465 sol.SOFunctionEnable = true;
5466 sol.SOStatisticsEnable = true;
5467
5468 sol.RenderingDisable = cso_rast->rasterizer_discard &&
5469 !ice->state.prims_generated_query_active;
5470 sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
5471 }
5472
5473 assert(ice->state.streamout);
5474
5475 iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
5476 GENX(3DSTATE_STREAMOUT_length));
5477 }
5478 } else {
5479 if (dirty & IRIS_DIRTY_STREAMOUT) {
5480 iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
5481 }
5482 }
5483
5484 if (dirty & IRIS_DIRTY_CLIP) {
5485 struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5486 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5487
5488 bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
5489 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
5490 bool points_or_lines = cso_rast->fill_mode_point_or_line ||
5491 (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
5492 : ice->state.prim_is_points_or_lines);
5493
5494 uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
5495 iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
5496 cl.StatisticsEnable = ice->state.statistics_counters_enabled;
5497 if (cso_rast->rasterizer_discard)
5498 cl.ClipMode = CLIPMODE_REJECT_ALL;
5499 else if (ice->state.window_space_position)
5500 cl.ClipMode = CLIPMODE_ACCEPT_ALL;
5501 else
5502 cl.ClipMode = CLIPMODE_NORMAL;
5503
5504 cl.PerspectiveDivideDisable = ice->state.window_space_position;
5505 cl.ViewportXYClipTestEnable = !points_or_lines;
5506
5507 if (wm_prog_data->barycentric_interp_modes &
5508 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
5509 cl.NonPerspectiveBarycentricEnable = true;
5510
5511 cl.ForceZeroRTAIndexEnable = cso_fb->layers == 0;
5512 cl.MaximumVPIndex = ice->state.num_viewports - 1;
5513 }
5514 iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
5515 ARRAY_SIZE(cso_rast->clip));
5516 }
5517
5518 if (dirty & IRIS_DIRTY_RASTER) {
5519 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5520 iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
5521
5522 uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
5523 iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
5524 sf.ViewportTransformEnable = !ice->state.window_space_position;
5525 }
5526 iris_emit_merge(batch, cso->sf, dynamic_sf,
5527 ARRAY_SIZE(dynamic_sf));
5528 }
5529
5530 if (dirty & IRIS_DIRTY_WM) {
5531 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5532 uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
5533
5534 iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
5535 wm.StatisticsEnable = ice->state.statistics_counters_enabled;
5536
5537 wm.BarycentricInterpolationMode =
5538 wm_prog_data->barycentric_interp_modes;
5539
5540 if (wm_prog_data->early_fragment_tests)
5541 wm.EarlyDepthStencilControl = EDSC_PREPS;
5542 else if (wm_prog_data->has_side_effects)
5543 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
5544
5545 /* We could skip this bit if color writes are enabled. */
5546 if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
5547 wm.ForceThreadDispatchEnable = ForceON;
5548 }
5549 iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
5550 }
5551
5552 if (dirty & IRIS_DIRTY_SBE) {
5553 iris_emit_sbe(batch, ice);
5554 }
5555
5556 if (dirty & IRIS_DIRTY_PS_BLEND) {
5557 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5558 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5559 const struct shader_info *fs_info =
5560 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
5561
5562 uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
5563 iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
5564 pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
5565 pb.AlphaTestEnable = cso_zsa->alpha.enabled;
5566
5567 /* The dual source blending docs caution against using SRC1 factors
5568 * when the shader doesn't use a dual source render target write.
5569 * Empirically, this can lead to GPU hangs, and the results are
5570 * undefined anyway, so simply disable blending to avoid the hang.
5571 */
5572 pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
5573 (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
5574 }
5575
5576 iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
5577 ARRAY_SIZE(cso_blend->ps_blend));
5578 }
5579
5580 if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
5581 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5582 #if GEN_GEN >= 9
5583 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5584 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
5585 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
5586 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
5587 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5588 }
5589 iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
5590 #else
5591 iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
5592 #endif
5593
5594 #if GEN_GEN >= 12
5595 iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
5596 #endif
5597 }
5598
5599 if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
5600 uint32_t scissor_offset =
5601 emit_state(batch, ice->state.dynamic_uploader,
5602 &ice->state.last_res.scissor,
5603 ice->state.scissors,
5604 sizeof(struct pipe_scissor_state) *
5605 ice->state.num_viewports, 32);
5606
5607 iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
5608 ptr.ScissorRectPointer = scissor_offset;
5609 }
5610 }
5611
5612 if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
5613 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
5614
5615 /* Do not emit the clear params yets. We need to update the clear value
5616 * first.
5617 */
5618 uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
5619 uint32_t cso_z_size = sizeof(cso_z->packets) - clear_length;
5620 iris_batch_emit(batch, cso_z->packets, cso_z_size);
5621
5622 union isl_color_value clear_value = { .f32 = { 0, } };
5623
5624 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5625 if (cso_fb->zsbuf) {
5626 struct iris_resource *zres, *sres;
5627 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
5628 &zres, &sres);
5629 if (zres && zres->aux.bo)
5630 clear_value = iris_resource_get_clear_color(zres, NULL, NULL);
5631 }
5632
5633 uint32_t clear_params[GENX(3DSTATE_CLEAR_PARAMS_length)];
5634 iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
5635 clear.DepthClearValueValid = true;
5636 clear.DepthClearValue = clear_value.f32[0];
5637 }
5638 iris_batch_emit(batch, clear_params, clear_length);
5639 }
5640
5641 if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5642 /* Listen for buffer changes, and also write enable changes. */
5643 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5644 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5645 }
5646
5647 if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
5648 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
5649 for (int i = 0; i < 32; i++) {
5650 poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
5651 }
5652 }
5653 }
5654
5655 if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
5656 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5657 iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
5658 }
5659
5660 if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
5661 iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
5662 topo.PrimitiveTopologyType =
5663 translate_prim_type(draw->mode, draw->vertices_per_patch);
5664 }
5665 }
5666
5667 if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
5668 int count = util_bitcount64(ice->state.bound_vertex_buffers);
5669 int dynamic_bound = ice->state.bound_vertex_buffers;
5670
5671 if (ice->state.vs_uses_draw_params) {
5672 assert(ice->draw.draw_params.res);
5673
5674 struct iris_vertex_buffer_state *state =
5675 &(ice->state.genx->vertex_buffers[count]);
5676 pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
5677 struct iris_resource *res = (void *) state->resource;
5678
5679 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
5680 vb.VertexBufferIndex = count;
5681 vb.AddressModifyEnable = true;
5682 vb.BufferPitch = 0;
5683 vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
5684 vb.BufferStartingAddress =
5685 ro_bo(NULL, res->bo->gtt_offset +
5686 (int) ice->draw.draw_params.offset);
5687 vb.MOCS = mocs(res->bo);
5688 }
5689 dynamic_bound |= 1ull << count;
5690 count++;
5691 }
5692
5693 if (ice->state.vs_uses_derived_draw_params) {
5694 struct iris_vertex_buffer_state *state =
5695 &(ice->state.genx->vertex_buffers[count]);
5696 pipe_resource_reference(&state->resource,
5697 ice->draw.derived_draw_params.res);
5698 struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
5699
5700 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
5701 vb.VertexBufferIndex = count;
5702 vb.AddressModifyEnable = true;
5703 vb.BufferPitch = 0;
5704 vb.BufferSize =
5705 res->bo->size - ice->draw.derived_draw_params.offset;
5706 vb.BufferStartingAddress =
5707 ro_bo(NULL, res->bo->gtt_offset +
5708 (int) ice->draw.derived_draw_params.offset);
5709 vb.MOCS = mocs(res->bo);
5710 }
5711 dynamic_bound |= 1ull << count;
5712 count++;
5713 }
5714
5715 if (count) {
5716 /* The VF cache designers cut corners, and made the cache key's
5717 * <VertexBufferIndex, Memory Address> tuple only consider the bottom
5718 * 32 bits of the address. If you have two vertex buffers which get
5719 * placed exactly 4 GiB apart and use them in back-to-back draw calls,
5720 * you can get collisions (even within a single batch).
5721 *
5722 * So, we need to do a VF cache invalidate if the buffer for a VB
5723 * slot slot changes [48:32] address bits from the previous time.
5724 */
5725 unsigned flush_flags = 0;
5726
5727 uint64_t bound = dynamic_bound;
5728 while (bound) {
5729 const int i = u_bit_scan64(&bound);
5730 uint16_t high_bits = 0;
5731
5732 struct iris_resource *res =
5733 (void *) genx->vertex_buffers[i].resource;
5734 if (res) {
5735 iris_use_pinned_bo(batch, res->bo, false);
5736
5737 high_bits = res->bo->gtt_offset >> 32ull;
5738 if (high_bits != ice->state.last_vbo_high_bits[i]) {
5739 flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
5740 PIPE_CONTROL_CS_STALL;
5741 ice->state.last_vbo_high_bits[i] = high_bits;
5742 }
5743 }
5744 }
5745
5746 if (flush_flags) {
5747 iris_emit_pipe_control_flush(batch,
5748 "workaround: VF cache 32-bit key [VB]",
5749 flush_flags);
5750 }
5751
5752 const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
5753
5754 uint32_t *map =
5755 iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
5756 _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
5757 vb.DWordLength = (vb_dwords * count + 1) - 2;
5758 }
5759 map += 1;
5760
5761 bound = dynamic_bound;
5762 while (bound) {
5763 const int i = u_bit_scan64(&bound);
5764 memcpy(map, genx->vertex_buffers[i].state,
5765 sizeof(uint32_t) * vb_dwords);
5766 map += vb_dwords;
5767 }
5768 }
5769 }
5770
5771 if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
5772 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
5773 const unsigned entries = MAX2(cso->count, 1);
5774 if (!(ice->state.vs_needs_sgvs_element ||
5775 ice->state.vs_uses_derived_draw_params ||
5776 ice->state.vs_needs_edge_flag)) {
5777 iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
5778 (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
5779 } else {
5780 uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
5781 const unsigned dyn_count = cso->count +
5782 ice->state.vs_needs_sgvs_element +
5783 ice->state.vs_uses_derived_draw_params;
5784
5785 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
5786 &dynamic_ves, ve) {
5787 ve.DWordLength =
5788 1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
5789 }
5790 memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
5791 (cso->count - ice->state.vs_needs_edge_flag) *
5792 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
5793 uint32_t *ve_pack_dest =
5794 &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
5795 GENX(VERTEX_ELEMENT_STATE_length)];
5796
5797 if (ice->state.vs_needs_sgvs_element) {
5798 uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
5799 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
5800 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
5801 ve.Valid = true;
5802 ve.VertexBufferIndex =
5803 util_bitcount64(ice->state.bound_vertex_buffers);
5804 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
5805 ve.Component0Control = base_ctrl;
5806 ve.Component1Control = base_ctrl;
5807 ve.Component2Control = VFCOMP_STORE_0;
5808 ve.Component3Control = VFCOMP_STORE_0;
5809 }
5810 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
5811 }
5812 if (ice->state.vs_uses_derived_draw_params) {
5813 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
5814 ve.Valid = true;
5815 ve.VertexBufferIndex =
5816 util_bitcount64(ice->state.bound_vertex_buffers) +
5817 ice->state.vs_uses_draw_params;
5818 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
5819 ve.Component0Control = VFCOMP_STORE_SRC;
5820 ve.Component1Control = VFCOMP_STORE_SRC;
5821 ve.Component2Control = VFCOMP_STORE_0;
5822 ve.Component3Control = VFCOMP_STORE_0;
5823 }
5824 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
5825 }
5826 if (ice->state.vs_needs_edge_flag) {
5827 for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length); i++)
5828 ve_pack_dest[i] = cso->edgeflag_ve[i];
5829 }
5830
5831 iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
5832 (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
5833 }
5834
5835 if (!ice->state.vs_needs_edge_flag) {
5836 iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
5837 entries * GENX(3DSTATE_VF_INSTANCING_length));
5838 } else {
5839 assert(cso->count > 0);
5840 const unsigned edgeflag_index = cso->count - 1;
5841 uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
5842 memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
5843 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
5844
5845 uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
5846 edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
5847 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
5848 vi.VertexElementIndex = edgeflag_index +
5849 ice->state.vs_needs_sgvs_element +
5850 ice->state.vs_uses_derived_draw_params;
5851 }
5852 for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length); i++)
5853 vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
5854
5855 iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
5856 entries * GENX(3DSTATE_VF_INSTANCING_length));
5857 }
5858 }
5859
5860 if (dirty & IRIS_DIRTY_VF_SGVS) {
5861 const struct brw_vs_prog_data *vs_prog_data = (void *)
5862 ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
5863 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
5864
5865 iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
5866 if (vs_prog_data->uses_vertexid) {
5867 sgv.VertexIDEnable = true;
5868 sgv.VertexIDComponentNumber = 2;
5869 sgv.VertexIDElementOffset =
5870 cso->count - ice->state.vs_needs_edge_flag;
5871 }
5872
5873 if (vs_prog_data->uses_instanceid) {
5874 sgv.InstanceIDEnable = true;
5875 sgv.InstanceIDComponentNumber = 3;
5876 sgv.InstanceIDElementOffset =
5877 cso->count - ice->state.vs_needs_edge_flag;
5878 }
5879 }
5880 }
5881
5882 if (dirty & IRIS_DIRTY_VF) {
5883 iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
5884 if (draw->primitive_restart) {
5885 vf.IndexedDrawCutIndexEnable = true;
5886 vf.CutIndex = draw->restart_index;
5887 }
5888 }
5889 }
5890
5891 if (dirty & IRIS_DIRTY_VF_STATISTICS) {
5892 iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
5893 vf.StatisticsEnable = true;
5894 }
5895 }
5896
5897 #if GEN_GEN == 8
5898 if (dirty & IRIS_DIRTY_PMA_FIX) {
5899 bool enable = want_pma_fix(ice);
5900 genX(update_pma_fix)(ice, batch, enable);
5901 }
5902 #endif
5903
5904 if (ice->state.current_hash_scale != 1)
5905 genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
5906
5907 #if GEN_GEN >= 12
5908 genX(emit_aux_map_state)(batch);
5909 #endif
5910 }
5911
5912 static void
5913 iris_upload_render_state(struct iris_context *ice,
5914 struct iris_batch *batch,
5915 const struct pipe_draw_info *draw)
5916 {
5917 bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
5918
5919 /* Always pin the binder. If we're emitting new binding table pointers,
5920 * we need it. If not, we're probably inheriting old tables via the
5921 * context, and need it anyway. Since true zero-bindings cases are
5922 * practically non-existent, just pin it and avoid last_res tracking.
5923 */
5924 iris_use_pinned_bo(batch, ice->state.binder.bo, false);
5925
5926 if (!batch->contains_draw) {
5927 iris_restore_render_saved_bos(ice, batch, draw);
5928 batch->contains_draw = true;
5929 }
5930
5931 iris_upload_dirty_render_state(ice, batch, draw);
5932
5933 if (draw->index_size > 0) {
5934 unsigned offset;
5935
5936 if (draw->has_user_indices) {
5937 u_upload_data(ice->ctx.stream_uploader, 0,
5938 draw->count * draw->index_size, 4, draw->index.user,
5939 &offset, &ice->state.last_res.index_buffer);
5940 } else {
5941 struct iris_resource *res = (void *) draw->index.resource;
5942 res->bind_history |= PIPE_BIND_INDEX_BUFFER;
5943
5944 pipe_resource_reference(&ice->state.last_res.index_buffer,
5945 draw->index.resource);
5946 offset = 0;
5947 }
5948
5949 struct iris_genx_state *genx = ice->state.genx;
5950 struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
5951
5952 uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
5953 iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
5954 ib.IndexFormat = draw->index_size >> 1;
5955 ib.MOCS = mocs(bo);
5956 ib.BufferSize = bo->size - offset;
5957 ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
5958 }
5959
5960 if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
5961 memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
5962 iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
5963 iris_use_pinned_bo(batch, bo, false);
5964 }
5965
5966 /* The VF cache key only uses 32-bits, see vertex buffer comment above */
5967 uint16_t high_bits = bo->gtt_offset >> 32ull;
5968 if (high_bits != ice->state.last_index_bo_high_bits) {
5969 iris_emit_pipe_control_flush(batch,
5970 "workaround: VF cache 32-bit key [IB]",
5971 PIPE_CONTROL_VF_CACHE_INVALIDATE |
5972 PIPE_CONTROL_CS_STALL);
5973 ice->state.last_index_bo_high_bits = high_bits;
5974 }
5975 }
5976
5977 #define _3DPRIM_END_OFFSET 0x2420
5978 #define _3DPRIM_START_VERTEX 0x2430
5979 #define _3DPRIM_VERTEX_COUNT 0x2434
5980 #define _3DPRIM_INSTANCE_COUNT 0x2438
5981 #define _3DPRIM_START_INSTANCE 0x243C
5982 #define _3DPRIM_BASE_VERTEX 0x2440
5983
5984 if (draw->indirect) {
5985 if (draw->indirect->indirect_draw_count) {
5986 use_predicate = true;
5987
5988 struct iris_bo *draw_count_bo =
5989 iris_resource_bo(draw->indirect->indirect_draw_count);
5990 unsigned draw_count_offset =
5991 draw->indirect->indirect_draw_count_offset;
5992
5993 iris_emit_pipe_control_flush(batch,
5994 "ensure indirect draw buffer is flushed",
5995 PIPE_CONTROL_FLUSH_ENABLE);
5996
5997 if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
5998 struct gen_mi_builder b;
5999 gen_mi_builder_init(&b, batch);
6000
6001 /* comparison = draw id < draw count */
6002 struct gen_mi_value comparison =
6003 gen_mi_ult(&b, gen_mi_imm(draw->drawid),
6004 gen_mi_mem32(ro_bo(draw_count_bo,
6005 draw_count_offset)));
6006
6007 /* predicate = comparison & conditional rendering predicate */
6008 gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
6009 gen_mi_iand(&b, comparison,
6010 gen_mi_reg32(CS_GPR(15))));
6011 } else {
6012 uint32_t mi_predicate;
6013
6014 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6015 iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid);
6016 /* Upload the current draw count from the draw parameters buffer
6017 * to MI_PREDICATE_SRC0.
6018 */
6019 iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
6020 draw_count_bo, draw_count_offset);
6021 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6022 iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
6023
6024 if (draw->drawid == 0) {
6025 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
6026 MI_PREDICATE_COMBINEOP_SET |
6027 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6028 } else {
6029 /* While draw_index < draw_count the predicate's result will be
6030 * (draw_index == draw_count) ^ TRUE = TRUE
6031 * When draw_index == draw_count the result is
6032 * (TRUE) ^ TRUE = FALSE
6033 * After this all results will be:
6034 * (FALSE) ^ FALSE = FALSE
6035 */
6036 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
6037 MI_PREDICATE_COMBINEOP_XOR |
6038 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6039 }
6040 iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
6041 }
6042 }
6043 struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
6044 assert(bo);
6045
6046 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6047 lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
6048 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 0);
6049 }
6050 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6051 lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
6052 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 4);
6053 }
6054 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6055 lrm.RegisterAddress = _3DPRIM_START_VERTEX;
6056 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 8);
6057 }
6058 if (draw->index_size) {
6059 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6060 lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
6061 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6062 }
6063 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6064 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6065 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 16);
6066 }
6067 } else {
6068 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6069 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6070 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6071 }
6072 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
6073 lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
6074 lri.DataDWord = 0;
6075 }
6076 }
6077 } else if (draw->count_from_stream_output) {
6078 struct iris_stream_output_target *so =
6079 (void *) draw->count_from_stream_output;
6080
6081 /* XXX: Replace with actual cache tracking */
6082 iris_emit_pipe_control_flush(batch,
6083 "draw count from stream output stall",
6084 PIPE_CONTROL_CS_STALL);
6085
6086 struct gen_mi_builder b;
6087 gen_mi_builder_init(&b, batch);
6088
6089 struct iris_address addr =
6090 ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
6091 struct gen_mi_value offset =
6092 gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset);
6093
6094 gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT),
6095 gen_mi_udiv32_imm(&b, offset, so->stride));
6096
6097 _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
6098 _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
6099 _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
6100 _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
6101 }
6102
6103 iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
6104 prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
6105 prim.PredicateEnable = use_predicate;
6106
6107 if (draw->indirect || draw->count_from_stream_output) {
6108 prim.IndirectParameterEnable = true;
6109 } else {
6110 prim.StartInstanceLocation = draw->start_instance;
6111 prim.InstanceCount = draw->instance_count;
6112 prim.VertexCountPerInstance = draw->count;
6113
6114 prim.StartVertexLocation = draw->start;
6115
6116 if (draw->index_size) {
6117 prim.BaseVertexLocation += draw->index_bias;
6118 } else {
6119 prim.StartVertexLocation += draw->index_bias;
6120 }
6121 }
6122 }
6123 }
6124
6125 static void
6126 iris_upload_compute_state(struct iris_context *ice,
6127 struct iris_batch *batch,
6128 const struct pipe_grid_info *grid)
6129 {
6130 const uint64_t dirty = ice->state.dirty;
6131 struct iris_screen *screen = batch->screen;
6132 const struct gen_device_info *devinfo = &screen->devinfo;
6133 struct iris_binder *binder = &ice->state.binder;
6134 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6135 struct iris_compiled_shader *shader =
6136 ice->shaders.prog[MESA_SHADER_COMPUTE];
6137 struct brw_stage_prog_data *prog_data = shader->prog_data;
6138 struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6139
6140 /* Always pin the binder. If we're emitting new binding table pointers,
6141 * we need it. If not, we're probably inheriting old tables via the
6142 * context, and need it anyway. Since true zero-bindings cases are
6143 * practically non-existent, just pin it and avoid last_res tracking.
6144 */
6145 iris_use_pinned_bo(batch, ice->state.binder.bo, false);
6146
6147 if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->sysvals_need_upload)
6148 upload_sysvals(ice, MESA_SHADER_COMPUTE);
6149
6150 if (dirty & IRIS_DIRTY_BINDINGS_CS)
6151 iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
6152
6153 if (dirty & IRIS_DIRTY_SAMPLER_STATES_CS)
6154 iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
6155
6156 iris_use_optional_res(batch, shs->sampler_table.res, false);
6157 iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false);
6158
6159 if (ice->state.need_border_colors)
6160 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
6161
6162 #if GEN_GEN >= 12
6163 genX(emit_aux_map_state)(batch);
6164 #endif
6165
6166 if (dirty & IRIS_DIRTY_CS) {
6167 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
6168 *
6169 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6170 * the only bits that are changed are scoreboard related: Scoreboard
6171 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
6172 * these scoreboard related states, a MEDIA_STATE_FLUSH is
6173 * sufficient."
6174 */
6175 iris_emit_pipe_control_flush(batch,
6176 "workaround: stall before MEDIA_VFE_STATE",
6177 PIPE_CONTROL_CS_STALL);
6178
6179 iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
6180 if (prog_data->total_scratch) {
6181 struct iris_bo *bo =
6182 iris_get_scratch_space(ice, prog_data->total_scratch,
6183 MESA_SHADER_COMPUTE);
6184 vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
6185 vfe.ScratchSpaceBasePointer = rw_bo(bo, 0);
6186 }
6187
6188 vfe.MaximumNumberofThreads =
6189 devinfo->max_cs_threads * screen->subslice_total - 1;
6190 #if GEN_GEN < 11
6191 vfe.ResetGatewayTimer =
6192 Resettingrelativetimerandlatchingtheglobaltimestamp;
6193 #endif
6194 #if GEN_GEN == 8
6195 vfe.BypassGatewayControl = true;
6196 #endif
6197 vfe.NumberofURBEntries = 2;
6198 vfe.URBEntryAllocationSize = 2;
6199
6200 vfe.CURBEAllocationSize =
6201 ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
6202 cs_prog_data->push.cross_thread.regs, 2);
6203 }
6204 }
6205
6206 /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6207 if (dirty & IRIS_DIRTY_CS) {
6208 uint32_t curbe_data_offset = 0;
6209 assert(cs_prog_data->push.cross_thread.dwords == 0 &&
6210 cs_prog_data->push.per_thread.dwords == 1 &&
6211 cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
6212 uint32_t *curbe_data_map =
6213 stream_state(batch, ice->state.dynamic_uploader,
6214 &ice->state.last_res.cs_thread_ids,
6215 ALIGN(cs_prog_data->push.total.size, 64), 64,
6216 &curbe_data_offset);
6217 assert(curbe_data_map);
6218 memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64));
6219 iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map);
6220
6221 iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
6222 curbe.CURBETotalDataLength =
6223 ALIGN(cs_prog_data->push.total.size, 64);
6224 curbe.CURBEDataStartAddress = curbe_data_offset;
6225 }
6226 }
6227
6228 if (dirty & (IRIS_DIRTY_SAMPLER_STATES_CS |
6229 IRIS_DIRTY_BINDINGS_CS |
6230 IRIS_DIRTY_CONSTANTS_CS |
6231 IRIS_DIRTY_CS)) {
6232 uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
6233
6234 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
6235 idd.SamplerStatePointer = shs->sampler_table.offset;
6236 idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
6237 }
6238
6239 for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
6240 desc[i] |= ((uint32_t *) shader->derived_data)[i];
6241
6242 iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
6243 load.InterfaceDescriptorTotalLength =
6244 GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
6245 load.InterfaceDescriptorDataStartAddress =
6246 emit_state(batch, ice->state.dynamic_uploader,
6247 &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
6248 }
6249 }
6250
6251 uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
6252 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
6253 uint32_t right_mask;
6254
6255 if (remainder > 0)
6256 right_mask = ~0u >> (32 - remainder);
6257 else
6258 right_mask = ~0u >> (32 - cs_prog_data->simd_size);
6259
6260 #define GPGPU_DISPATCHDIMX 0x2500
6261 #define GPGPU_DISPATCHDIMY 0x2504
6262 #define GPGPU_DISPATCHDIMZ 0x2508
6263
6264 if (grid->indirect) {
6265 struct iris_state_ref *grid_size = &ice->state.grid_size;
6266 struct iris_bo *bo = iris_resource_bo(grid_size->res);
6267 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6268 lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
6269 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
6270 }
6271 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6272 lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
6273 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
6274 }
6275 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6276 lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
6277 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
6278 }
6279 }
6280
6281 iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
6282 ggw.IndirectParameterEnable = grid->indirect != NULL;
6283 ggw.SIMDSize = cs_prog_data->simd_size / 16;
6284 ggw.ThreadDepthCounterMaximum = 0;
6285 ggw.ThreadHeightCounterMaximum = 0;
6286 ggw.ThreadWidthCounterMaximum = cs_prog_data->threads - 1;
6287 ggw.ThreadGroupIDXDimension = grid->grid[0];
6288 ggw.ThreadGroupIDYDimension = grid->grid[1];
6289 ggw.ThreadGroupIDZDimension = grid->grid[2];
6290 ggw.RightExecutionMask = right_mask;
6291 ggw.BottomExecutionMask = 0xffffffff;
6292 }
6293
6294 iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
6295
6296 if (!batch->contains_draw) {
6297 iris_restore_compute_saved_bos(ice, batch, grid);
6298 batch->contains_draw = true;
6299 }
6300 }
6301
6302 /**
6303 * State module teardown.
6304 */
6305 static void
6306 iris_destroy_state(struct iris_context *ice)
6307 {
6308 struct iris_genx_state *genx = ice->state.genx;
6309
6310 pipe_resource_reference(&ice->draw.draw_params.res, NULL);
6311 pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
6312
6313 /* Loop over all VBOs, including ones for draw parameters */
6314 for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
6315 pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
6316 }
6317
6318 free(ice->state.genx);
6319
6320 for (int i = 0; i < 4; i++) {
6321 pipe_so_target_reference(&ice->state.so_target[i], NULL);
6322 }
6323
6324 for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
6325 pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
6326 }
6327 pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
6328
6329 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
6330 struct iris_shader_state *shs = &ice->state.shaders[stage];
6331 pipe_resource_reference(&shs->sampler_table.res, NULL);
6332 for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
6333 pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
6334 pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
6335 }
6336 for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
6337 pipe_resource_reference(&shs->image[i].base.resource, NULL);
6338 pipe_resource_reference(&shs->image[i].surface_state.res, NULL);
6339 }
6340 for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
6341 pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
6342 pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
6343 }
6344 for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
6345 pipe_sampler_view_reference((struct pipe_sampler_view **)
6346 &shs->textures[i], NULL);
6347 }
6348 }
6349
6350 pipe_resource_reference(&ice->state.grid_size.res, NULL);
6351 pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
6352
6353 pipe_resource_reference(&ice->state.null_fb.res, NULL);
6354 pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
6355
6356 pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
6357 pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
6358 pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
6359 pipe_resource_reference(&ice->state.last_res.scissor, NULL);
6360 pipe_resource_reference(&ice->state.last_res.blend, NULL);
6361 pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
6362 pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
6363 pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
6364 }
6365
6366 /* ------------------------------------------------------------------- */
6367
6368 static void
6369 iris_rebind_buffer(struct iris_context *ice,
6370 struct iris_resource *res,
6371 uint64_t old_address)
6372 {
6373 struct pipe_context *ctx = &ice->ctx;
6374 struct iris_screen *screen = (void *) ctx->screen;
6375 struct iris_genx_state *genx = ice->state.genx;
6376
6377 assert(res->base.target == PIPE_BUFFER);
6378
6379 /* Buffers can't be framebuffer attachments, nor display related,
6380 * and we don't have upstream Clover support.
6381 */
6382 assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
6383 PIPE_BIND_RENDER_TARGET |
6384 PIPE_BIND_BLENDABLE |
6385 PIPE_BIND_DISPLAY_TARGET |
6386 PIPE_BIND_CURSOR |
6387 PIPE_BIND_COMPUTE_RESOURCE |
6388 PIPE_BIND_GLOBAL)));
6389
6390 if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
6391 uint64_t bound_vbs = ice->state.bound_vertex_buffers;
6392 while (bound_vbs) {
6393 const int i = u_bit_scan64(&bound_vbs);
6394 struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
6395
6396 /* Update the CPU struct */
6397 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
6398 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
6399 uint64_t *addr = (uint64_t *) &state->state[1];
6400
6401 if (*addr == old_address + state->offset) {
6402 *addr = res->bo->gtt_offset + state->offset;
6403 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
6404 }
6405 }
6406 }
6407
6408 /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
6409 * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
6410 *
6411 * There is also no need to handle these:
6412 * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
6413 * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
6414 */
6415
6416 if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
6417 /* XXX: be careful about resetting vs appending... */
6418 assert(false);
6419 }
6420
6421 for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
6422 struct iris_shader_state *shs = &ice->state.shaders[s];
6423 enum pipe_shader_type p_stage = stage_to_pipe(s);
6424
6425 if (!(res->bind_stages & (1 << s)))
6426 continue;
6427
6428 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
6429 /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
6430 uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
6431 while (bound_cbufs) {
6432 const int i = u_bit_scan(&bound_cbufs);
6433 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
6434 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
6435
6436 if (res->bo == iris_resource_bo(cbuf->buffer)) {
6437 pipe_resource_reference(&surf_state->res, NULL);
6438 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
6439 }
6440 }
6441 }
6442
6443 if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
6444 uint32_t bound_ssbos = shs->bound_ssbos;
6445 while (bound_ssbos) {
6446 const int i = u_bit_scan(&bound_ssbos);
6447 struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
6448
6449 if (res->bo == iris_resource_bo(ssbo->buffer)) {
6450 struct pipe_shader_buffer buf = {
6451 .buffer = &res->base,
6452 .buffer_offset = ssbo->buffer_offset,
6453 .buffer_size = ssbo->buffer_size,
6454 };
6455 iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
6456 (shs->writable_ssbos >> i) & 1);
6457 }
6458 }
6459 }
6460
6461 if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
6462 uint32_t bound_sampler_views = shs->bound_sampler_views;
6463 while (bound_sampler_views) {
6464 const int i = u_bit_scan(&bound_sampler_views);
6465 struct iris_sampler_view *isv = shs->textures[i];
6466
6467 if (res->bo == iris_resource_bo(isv->base.texture)) {
6468 void *map = alloc_surface_states(ice->state.surface_uploader,
6469 &isv->surface_state,
6470 isv->res->aux.sampler_usages);
6471 assert(map);
6472 fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
6473 isv->view.format, isv->view.swizzle,
6474 isv->base.u.buf.offset,
6475 isv->base.u.buf.size);
6476 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
6477 }
6478 }
6479 }
6480
6481 if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
6482 uint32_t bound_image_views = shs->bound_image_views;
6483 while (bound_image_views) {
6484 const int i = u_bit_scan(&bound_image_views);
6485 struct iris_image_view *iv = &shs->image[i];
6486
6487 if (res->bo == iris_resource_bo(iv->base.resource)) {
6488 iris_set_shader_images(ctx, p_stage, i, 1, &iv->base);
6489 }
6490 }
6491 }
6492 }
6493 }
6494
6495 /* ------------------------------------------------------------------- */
6496
6497 static unsigned
6498 flags_to_post_sync_op(uint32_t flags)
6499 {
6500 if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
6501 return WriteImmediateData;
6502
6503 if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
6504 return WritePSDepthCount;
6505
6506 if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
6507 return WriteTimestamp;
6508
6509 return 0;
6510 }
6511
6512 /**
6513 * Do the given flags have a Post Sync or LRI Post Sync operation?
6514 */
6515 static enum pipe_control_flags
6516 get_post_sync_flags(enum pipe_control_flags flags)
6517 {
6518 flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
6519 PIPE_CONTROL_WRITE_DEPTH_COUNT |
6520 PIPE_CONTROL_WRITE_TIMESTAMP |
6521 PIPE_CONTROL_LRI_POST_SYNC_OP;
6522
6523 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
6524 * "LRI Post Sync Operation". So more than one bit set would be illegal.
6525 */
6526 assert(util_bitcount(flags) <= 1);
6527
6528 return flags;
6529 }
6530
6531 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
6532
6533 /**
6534 * Emit a series of PIPE_CONTROL commands, taking into account any
6535 * workarounds necessary to actually accomplish the caller's request.
6536 *
6537 * Unless otherwise noted, spec quotations in this function come from:
6538 *
6539 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
6540 * Restrictions for PIPE_CONTROL.
6541 *
6542 * You should not use this function directly. Use the helpers in
6543 * iris_pipe_control.c instead, which may split the pipe control further.
6544 */
6545 static void
6546 iris_emit_raw_pipe_control(struct iris_batch *batch,
6547 const char *reason,
6548 uint32_t flags,
6549 struct iris_bo *bo,
6550 uint32_t offset,
6551 uint64_t imm)
6552 {
6553 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
6554 enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
6555 enum pipe_control_flags non_lri_post_sync_flags =
6556 post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
6557
6558 /* Recursive PIPE_CONTROL workarounds --------------------------------
6559 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
6560 *
6561 * We do these first because we want to look at the original operation,
6562 * rather than any workarounds we set.
6563 */
6564 if (GEN_GEN == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
6565 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
6566 * lists several workarounds:
6567 *
6568 * "Project: SKL, KBL, BXT
6569 *
6570 * If the VF Cache Invalidation Enable is set to a 1 in a
6571 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
6572 * sets to 0, with the VF Cache Invalidation Enable set to 0
6573 * needs to be sent prior to the PIPE_CONTROL with VF Cache
6574 * Invalidation Enable set to a 1."
6575 */
6576 iris_emit_raw_pipe_control(batch,
6577 "workaround: recursive VF cache invalidate",
6578 0, NULL, 0, 0);
6579 }
6580
6581 if (GEN_GEN == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
6582 /* Project: SKL / Argument: LRI Post Sync Operation [23]
6583 *
6584 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
6585 * programmed prior to programming a PIPECONTROL command with "LRI
6586 * Post Sync Operation" in GPGPU mode of operation (i.e when
6587 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
6588 *
6589 * The same text exists a few rows below for Post Sync Op.
6590 */
6591 iris_emit_raw_pipe_control(batch,
6592 "workaround: CS stall before gpgpu post-sync",
6593 PIPE_CONTROL_CS_STALL, bo, offset, imm);
6594 }
6595
6596 if (GEN_GEN == 10 && (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
6597 /* Cannonlake:
6598 * "Before sending a PIPE_CONTROL command with bit 12 set, SW must issue
6599 * another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
6600 * = 0 and Pipe Control Flush Enable (bit 7) = 1"
6601 */
6602 iris_emit_raw_pipe_control(batch,
6603 "workaround: PC flush before RT flush",
6604 PIPE_CONTROL_FLUSH_ENABLE, bo, offset, imm);
6605 }
6606
6607 /* "Flush Types" workarounds ---------------------------------------------
6608 * We do these now because they may add post-sync operations or CS stalls.
6609 */
6610
6611 if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
6612 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
6613 *
6614 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
6615 * 'Write PS Depth Count' or 'Write Timestamp'."
6616 */
6617 if (!bo) {
6618 flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
6619 post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
6620 non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
6621 bo = batch->screen->workaround_bo;
6622 }
6623 }
6624
6625 /* #1130 from Gen10 workarounds page:
6626 *
6627 * "Enable Depth Stall on every Post Sync Op if Render target Cache
6628 * Flush is not enabled in same PIPE CONTROL and Enable Pixel score
6629 * board stall if Render target cache flush is enabled."
6630 *
6631 * Applicable to CNL B0 and C0 steppings only.
6632 *
6633 * The wording here is unclear, and this workaround doesn't look anything
6634 * like the internal bug report recommendations, but leave it be for now...
6635 */
6636 if (GEN_GEN == 10) {
6637 if (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) {
6638 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
6639 } else if (flags & non_lri_post_sync_flags) {
6640 flags |= PIPE_CONTROL_DEPTH_STALL;
6641 }
6642 }
6643
6644 if (flags & PIPE_CONTROL_DEPTH_STALL) {
6645 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
6646 *
6647 * "This bit must be DISABLED for operations other than writing
6648 * PS_DEPTH_COUNT."
6649 *
6650 * This seems like nonsense. An Ivybridge workaround requires us to
6651 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
6652 * operation. Gen8+ requires us to emit depth stalls and depth cache
6653 * flushes together. So, it's hard to imagine this means anything other
6654 * than "we originally intended this to be used for PS_DEPTH_COUNT".
6655 *
6656 * We ignore the supposed restriction and do nothing.
6657 */
6658 }
6659
6660 if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
6661 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
6662 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
6663 *
6664 * "This bit must be DISABLED for End-of-pipe (Read) fences,
6665 * PS_DEPTH_COUNT or TIMESTAMP queries."
6666 *
6667 * TODO: Implement end-of-pipe checking.
6668 */
6669 assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
6670 PIPE_CONTROL_WRITE_TIMESTAMP)));
6671 }
6672
6673 if (GEN_GEN < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
6674 /* From the PIPE_CONTROL instruction table, bit 1:
6675 *
6676 * "This bit is ignored if Depth Stall Enable is set.
6677 * Further, the render cache is not flushed even if Write Cache
6678 * Flush Enable bit is set."
6679 *
6680 * We assert that the caller doesn't do this combination, to try and
6681 * prevent mistakes. It shouldn't hurt the GPU, though.
6682 *
6683 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
6684 * and "Render Target Flush" combo is explicitly required for BTI
6685 * update workarounds.
6686 */
6687 assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
6688 PIPE_CONTROL_RENDER_TARGET_FLUSH)));
6689 }
6690
6691 /* PIPE_CONTROL page workarounds ------------------------------------- */
6692
6693 if (GEN_GEN <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
6694 /* From the PIPE_CONTROL page itself:
6695 *
6696 * "IVB, HSW, BDW
6697 * Restriction: Pipe_control with CS-stall bit set must be issued
6698 * before a pipe-control command that has the State Cache
6699 * Invalidate bit set."
6700 */
6701 flags |= PIPE_CONTROL_CS_STALL;
6702 }
6703
6704 if (flags & PIPE_CONTROL_FLUSH_LLC) {
6705 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
6706 *
6707 * "Project: ALL
6708 * SW must always program Post-Sync Operation to "Write Immediate
6709 * Data" when Flush LLC is set."
6710 *
6711 * For now, we just require the caller to do it.
6712 */
6713 assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
6714 }
6715
6716 /* "Post-Sync Operation" workarounds -------------------------------- */
6717
6718 /* Project: All / Argument: Global Snapshot Count Reset [19]
6719 *
6720 * "This bit must not be exercised on any product.
6721 * Requires stall bit ([20] of DW1) set."
6722 *
6723 * We don't use this, so we just assert that it isn't used. The
6724 * PIPE_CONTROL instruction page indicates that they intended this
6725 * as a debug feature and don't think it is useful in production,
6726 * but it may actually be usable, should we ever want to.
6727 */
6728 assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
6729
6730 if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
6731 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
6732 /* Project: All / Arguments:
6733 *
6734 * - Generic Media State Clear [16]
6735 * - Indirect State Pointers Disable [16]
6736 *
6737 * "Requires stall bit ([20] of DW1) set."
6738 *
6739 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
6740 * State Clear) says:
6741 *
6742 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
6743 * programmed prior to programming a PIPECONTROL command with "Media
6744 * State Clear" set in GPGPU mode of operation"
6745 *
6746 * This is a subset of the earlier rule, so there's nothing to do.
6747 */
6748 flags |= PIPE_CONTROL_CS_STALL;
6749 }
6750
6751 if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
6752 /* Project: All / Argument: Store Data Index
6753 *
6754 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
6755 * than '0'."
6756 *
6757 * For now, we just assert that the caller does this. We might want to
6758 * automatically add a write to the workaround BO...
6759 */
6760 assert(non_lri_post_sync_flags != 0);
6761 }
6762
6763 if (flags & PIPE_CONTROL_SYNC_GFDT) {
6764 /* Project: All / Argument: Sync GFDT
6765 *
6766 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
6767 * than '0' or 0x2520[13] must be set."
6768 *
6769 * For now, we just assert that the caller does this.
6770 */
6771 assert(non_lri_post_sync_flags != 0);
6772 }
6773
6774 if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
6775 /* Project: IVB+ / Argument: TLB inv
6776 *
6777 * "Requires stall bit ([20] of DW1) set."
6778 *
6779 * Also, from the PIPE_CONTROL instruction table:
6780 *
6781 * "Project: SKL+
6782 * Post Sync Operation or CS stall must be set to ensure a TLB
6783 * invalidation occurs. Otherwise no cycle will occur to the TLB
6784 * cache to invalidate."
6785 *
6786 * This is not a subset of the earlier rule, so there's nothing to do.
6787 */
6788 flags |= PIPE_CONTROL_CS_STALL;
6789 }
6790
6791 if (GEN_GEN == 9 && devinfo->gt == 4) {
6792 /* TODO: The big Skylake GT4 post sync op workaround */
6793 }
6794
6795 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
6796
6797 if (IS_COMPUTE_PIPELINE(batch)) {
6798 if (GEN_GEN >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
6799 /* Project: SKL+ / Argument: Tex Invalidate
6800 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
6801 */
6802 flags |= PIPE_CONTROL_CS_STALL;
6803 }
6804
6805 if (GEN_GEN == 8 && (post_sync_flags ||
6806 (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
6807 PIPE_CONTROL_DEPTH_STALL |
6808 PIPE_CONTROL_RENDER_TARGET_FLUSH |
6809 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
6810 PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
6811 /* Project: BDW / Arguments:
6812 *
6813 * - LRI Post Sync Operation [23]
6814 * - Post Sync Op [15:14]
6815 * - Notify En [8]
6816 * - Depth Stall [13]
6817 * - Render Target Cache Flush [12]
6818 * - Depth Cache Flush [0]
6819 * - DC Flush Enable [5]
6820 *
6821 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
6822 * Workloads."
6823 */
6824 flags |= PIPE_CONTROL_CS_STALL;
6825
6826 /* Also, from the PIPE_CONTROL instruction table, bit 20:
6827 *
6828 * "Project: BDW
6829 * This bit must be always set when PIPE_CONTROL command is
6830 * programmed by GPGPU and MEDIA workloads, except for the cases
6831 * when only Read Only Cache Invalidation bits are set (State
6832 * Cache Invalidation Enable, Instruction cache Invalidation
6833 * Enable, Texture Cache Invalidation Enable, Constant Cache
6834 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
6835 * need not implemented when FF_DOP_CG is disable via "Fixed
6836 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
6837 *
6838 * It sounds like we could avoid CS stalls in some cases, but we
6839 * don't currently bother. This list isn't exactly the list above,
6840 * either...
6841 */
6842 }
6843 }
6844
6845 /* "Stall" workarounds ----------------------------------------------
6846 * These have to come after the earlier ones because we may have added
6847 * some additional CS stalls above.
6848 */
6849
6850 if (GEN_GEN < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
6851 /* Project: PRE-SKL, VLV, CHV
6852 *
6853 * "[All Stepping][All SKUs]:
6854 *
6855 * One of the following must also be set:
6856 *
6857 * - Render Target Cache Flush Enable ([12] of DW1)
6858 * - Depth Cache Flush Enable ([0] of DW1)
6859 * - Stall at Pixel Scoreboard ([1] of DW1)
6860 * - Depth Stall ([13] of DW1)
6861 * - Post-Sync Operation ([13] of DW1)
6862 * - DC Flush Enable ([5] of DW1)"
6863 *
6864 * If we don't already have one of those bits set, we choose to add
6865 * "Stall at Pixel Scoreboard". Some of the other bits require a
6866 * CS stall as a workaround (see above), which would send us into
6867 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
6868 * appears to be safe, so we choose that.
6869 */
6870 const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
6871 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
6872 PIPE_CONTROL_WRITE_IMMEDIATE |
6873 PIPE_CONTROL_WRITE_DEPTH_COUNT |
6874 PIPE_CONTROL_WRITE_TIMESTAMP |
6875 PIPE_CONTROL_STALL_AT_SCOREBOARD |
6876 PIPE_CONTROL_DEPTH_STALL |
6877 PIPE_CONTROL_DATA_CACHE_FLUSH;
6878 if (!(flags & wa_bits))
6879 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
6880 }
6881
6882 /* Emit --------------------------------------------------------------- */
6883
6884 if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
6885 fprintf(stderr,
6886 " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
6887 (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
6888 (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
6889 (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
6890 (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
6891 (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
6892 (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
6893 (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
6894 (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
6895 (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
6896 (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
6897 (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
6898 (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
6899 (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
6900 (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
6901 (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
6902 (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
6903 "SnapRes" : "",
6904 (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
6905 "ISPDis" : "",
6906 (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
6907 (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
6908 (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
6909 imm, reason);
6910 }
6911
6912 iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
6913 pc.LRIPostSyncOperation = NoLRIOperation;
6914 pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
6915 pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
6916 pc.StoreDataIndex = 0;
6917 pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
6918 pc.GlobalSnapshotCountReset =
6919 flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
6920 pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
6921 pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
6922 pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
6923 pc.RenderTargetCacheFlushEnable =
6924 flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
6925 pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
6926 pc.StateCacheInvalidationEnable =
6927 flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
6928 pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
6929 pc.ConstantCacheInvalidationEnable =
6930 flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
6931 pc.PostSyncOperation = flags_to_post_sync_op(flags);
6932 pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
6933 pc.InstructionCacheInvalidateEnable =
6934 flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
6935 pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
6936 pc.IndirectStatePointersDisable =
6937 flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
6938 pc.TextureCacheInvalidationEnable =
6939 flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
6940 pc.Address = rw_bo(bo, offset);
6941 pc.ImmediateData = imm;
6942 }
6943 }
6944
6945 void
6946 genX(emit_urb_setup)(struct iris_context *ice,
6947 struct iris_batch *batch,
6948 const unsigned size[4],
6949 bool tess_present, bool gs_present)
6950 {
6951 const struct gen_device_info *devinfo = &batch->screen->devinfo;
6952 const unsigned push_size_kB = 32;
6953 unsigned entries[4];
6954 unsigned start[4];
6955
6956 ice->shaders.last_vs_entry_size = size[MESA_SHADER_VERTEX];
6957
6958 gen_get_urb_config(devinfo, 1024 * push_size_kB,
6959 1024 * ice->shaders.urb_size,
6960 tess_present, gs_present,
6961 size, entries, start);
6962
6963 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
6964 iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
6965 urb._3DCommandSubOpcode += i;
6966 urb.VSURBStartingAddress = start[i];
6967 urb.VSURBEntryAllocationSize = size[i] - 1;
6968 urb.VSNumberofURBEntries = entries[i];
6969 }
6970 }
6971 }
6972
6973 #if GEN_GEN == 9
6974 /**
6975 * Preemption on Gen9 has to be enabled or disabled in various cases.
6976 *
6977 * See these workarounds for preemption:
6978 * - WaDisableMidObjectPreemptionForGSLineStripAdj
6979 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
6980 * - WaDisableMidObjectPreemptionForLineLoop
6981 * - WA#0798
6982 *
6983 * We don't put this in the vtable because it's only used on Gen9.
6984 */
6985 void
6986 gen9_toggle_preemption(struct iris_context *ice,
6987 struct iris_batch *batch,
6988 const struct pipe_draw_info *draw)
6989 {
6990 struct iris_genx_state *genx = ice->state.genx;
6991 bool object_preemption = true;
6992
6993 /* WaDisableMidObjectPreemptionForGSLineStripAdj
6994 *
6995 * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
6996 * and GS is enabled."
6997 */
6998 if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
6999 ice->shaders.prog[MESA_SHADER_GEOMETRY])
7000 object_preemption = false;
7001
7002 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7003 *
7004 * "TriFan miscompare in Execlist Preemption test. Cut index that is
7005 * on a previous context. End the previous, the resume another context
7006 * with a tri-fan or polygon, and the vertex count is corrupted. If we
7007 * prempt again we will cause corruption.
7008 *
7009 * WA: Disable mid-draw preemption when draw-call has a tri-fan."
7010 */
7011 if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
7012 object_preemption = false;
7013
7014 /* WaDisableMidObjectPreemptionForLineLoop
7015 *
7016 * "VF Stats Counters Missing a vertex when preemption enabled.
7017 *
7018 * WA: Disable mid-draw preemption when the draw uses a lineloop
7019 * topology."
7020 */
7021 if (draw->mode == PIPE_PRIM_LINE_LOOP)
7022 object_preemption = false;
7023
7024 /* WA#0798
7025 *
7026 * "VF is corrupting GAFS data when preempted on an instance boundary
7027 * and replayed with instancing enabled.
7028 *
7029 * WA: Disable preemption when using instanceing."
7030 */
7031 if (draw->instance_count > 1)
7032 object_preemption = false;
7033
7034 if (genx->object_preemption != object_preemption) {
7035 iris_enable_obj_preemption(batch, object_preemption);
7036 genx->object_preemption = object_preemption;
7037 }
7038 }
7039 #endif
7040
7041 static void
7042 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
7043 {
7044 struct iris_genx_state *genx = ice->state.genx;
7045
7046 memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
7047 }
7048
7049 static void
7050 iris_emit_mi_report_perf_count(struct iris_batch *batch,
7051 struct iris_bo *bo,
7052 uint32_t offset_in_bytes,
7053 uint32_t report_id)
7054 {
7055 iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
7056 mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes);
7057 mi_rpc.ReportID = report_id;
7058 }
7059 }
7060
7061 /**
7062 * Update the pixel hashing modes that determine the balancing of PS threads
7063 * across subslices and slices.
7064 *
7065 * \param width Width bound of the rendering area (already scaled down if \p
7066 * scale is greater than 1).
7067 * \param height Height bound of the rendering area (already scaled down if \p
7068 * scale is greater than 1).
7069 * \param scale The number of framebuffer samples that could potentially be
7070 * affected by an individual channel of the PS thread. This is
7071 * typically one for single-sampled rendering, but for operations
7072 * like CCS resolves and fast clears a single PS invocation may
7073 * update a huge number of pixels, in which case a finer
7074 * balancing is desirable in order to maximally utilize the
7075 * bandwidth available. UINT_MAX can be used as shorthand for
7076 * "finest hashing mode available".
7077 */
7078 void
7079 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
7080 unsigned width, unsigned height, unsigned scale)
7081 {
7082 #if GEN_GEN == 9
7083 const struct gen_device_info *devinfo = &batch->screen->devinfo;
7084 const unsigned slice_hashing[] = {
7085 /* Because all Gen9 platforms with more than one slice require
7086 * three-way subslice hashing, a single "normal" 16x16 slice hashing
7087 * block is guaranteed to suffer from substantial imbalance, with one
7088 * subslice receiving twice as much work as the other two in the
7089 * slice.
7090 *
7091 * The performance impact of that would be particularly severe when
7092 * three-way hashing is also in use for slice balancing (which is the
7093 * case for all Gen9 GT4 platforms), because one of the slices
7094 * receives one every three 16x16 blocks in either direction, which
7095 * is roughly the periodicity of the underlying subslice imbalance
7096 * pattern ("roughly" because in reality the hardware's
7097 * implementation of three-way hashing doesn't do exact modulo 3
7098 * arithmetic, which somewhat decreases the magnitude of this effect
7099 * in practice). This leads to a systematic subslice imbalance
7100 * within that slice regardless of the size of the primitive. The
7101 * 32x32 hashing mode guarantees that the subslice imbalance within a
7102 * single slice hashing block is minimal, largely eliminating this
7103 * effect.
7104 */
7105 _32x32,
7106 /* Finest slice hashing mode available. */
7107 NORMAL
7108 };
7109 const unsigned subslice_hashing[] = {
7110 /* 16x16 would provide a slight cache locality benefit especially
7111 * visible in the sampler L1 cache efficiency of low-bandwidth
7112 * non-LLC platforms, but it comes at the cost of greater subslice
7113 * imbalance for primitives of dimensions approximately intermediate
7114 * between 16x4 and 16x16.
7115 */
7116 _16x4,
7117 /* Finest subslice hashing mode available. */
7118 _8x4
7119 };
7120 /* Dimensions of the smallest hashing block of a given hashing mode. If
7121 * the rendering area is smaller than this there can't possibly be any
7122 * benefit from switching to this mode, so we optimize out the
7123 * transition.
7124 */
7125 const unsigned min_size[][2] = {
7126 { 16, 4 },
7127 { 8, 4 }
7128 };
7129 const unsigned idx = scale > 1;
7130
7131 if (width > min_size[idx][0] || height > min_size[idx][1]) {
7132 uint32_t gt_mode;
7133
7134 iris_pack_state(GENX(GT_MODE), &gt_mode, reg) {
7135 reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
7136 reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
7137 reg.SubsliceHashing = subslice_hashing[idx];
7138 reg.SubsliceHashingMask = -1;
7139 };
7140
7141 iris_emit_raw_pipe_control(batch,
7142 "workaround: CS stall before GT_MODE LRI",
7143 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7144 PIPE_CONTROL_CS_STALL,
7145 NULL, 0, 0);
7146
7147 iris_emit_lri(batch, GT_MODE, gt_mode);
7148
7149 ice->state.current_hash_scale = scale;
7150 }
7151 #endif
7152 }
7153
7154 void
7155 genX(init_state)(struct iris_context *ice)
7156 {
7157 struct pipe_context *ctx = &ice->ctx;
7158 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
7159
7160 ctx->create_blend_state = iris_create_blend_state;
7161 ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
7162 ctx->create_rasterizer_state = iris_create_rasterizer_state;
7163 ctx->create_sampler_state = iris_create_sampler_state;
7164 ctx->create_sampler_view = iris_create_sampler_view;
7165 ctx->create_surface = iris_create_surface;
7166 ctx->create_vertex_elements_state = iris_create_vertex_elements;
7167 ctx->bind_blend_state = iris_bind_blend_state;
7168 ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
7169 ctx->bind_sampler_states = iris_bind_sampler_states;
7170 ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
7171 ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
7172 ctx->delete_blend_state = iris_delete_state;
7173 ctx->delete_depth_stencil_alpha_state = iris_delete_state;
7174 ctx->delete_rasterizer_state = iris_delete_state;
7175 ctx->delete_sampler_state = iris_delete_state;
7176 ctx->delete_vertex_elements_state = iris_delete_state;
7177 ctx->set_blend_color = iris_set_blend_color;
7178 ctx->set_clip_state = iris_set_clip_state;
7179 ctx->set_constant_buffer = iris_set_constant_buffer;
7180 ctx->set_shader_buffers = iris_set_shader_buffers;
7181 ctx->set_shader_images = iris_set_shader_images;
7182 ctx->set_sampler_views = iris_set_sampler_views;
7183 ctx->set_tess_state = iris_set_tess_state;
7184 ctx->set_framebuffer_state = iris_set_framebuffer_state;
7185 ctx->set_polygon_stipple = iris_set_polygon_stipple;
7186 ctx->set_sample_mask = iris_set_sample_mask;
7187 ctx->set_scissor_states = iris_set_scissor_states;
7188 ctx->set_stencil_ref = iris_set_stencil_ref;
7189 ctx->set_vertex_buffers = iris_set_vertex_buffers;
7190 ctx->set_viewport_states = iris_set_viewport_states;
7191 ctx->sampler_view_destroy = iris_sampler_view_destroy;
7192 ctx->surface_destroy = iris_surface_destroy;
7193 ctx->draw_vbo = iris_draw_vbo;
7194 ctx->launch_grid = iris_launch_grid;
7195 ctx->create_stream_output_target = iris_create_stream_output_target;
7196 ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
7197 ctx->set_stream_output_targets = iris_set_stream_output_targets;
7198
7199 ice->vtbl.destroy_state = iris_destroy_state;
7200 ice->vtbl.init_render_context = iris_init_render_context;
7201 ice->vtbl.init_compute_context = iris_init_compute_context;
7202 ice->vtbl.upload_render_state = iris_upload_render_state;
7203 ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
7204 ice->vtbl.upload_compute_state = iris_upload_compute_state;
7205 ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
7206 ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
7207 ice->vtbl.rebind_buffer = iris_rebind_buffer;
7208 ice->vtbl.load_register_reg32 = iris_load_register_reg32;
7209 ice->vtbl.load_register_reg64 = iris_load_register_reg64;
7210 ice->vtbl.load_register_imm32 = iris_load_register_imm32;
7211 ice->vtbl.load_register_imm64 = iris_load_register_imm64;
7212 ice->vtbl.load_register_mem32 = iris_load_register_mem32;
7213 ice->vtbl.load_register_mem64 = iris_load_register_mem64;
7214 ice->vtbl.store_register_mem32 = iris_store_register_mem32;
7215 ice->vtbl.store_register_mem64 = iris_store_register_mem64;
7216 ice->vtbl.store_data_imm32 = iris_store_data_imm32;
7217 ice->vtbl.store_data_imm64 = iris_store_data_imm64;
7218 ice->vtbl.copy_mem_mem = iris_copy_mem_mem;
7219 ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
7220 ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
7221 ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
7222 ice->vtbl.populate_vs_key = iris_populate_vs_key;
7223 ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
7224 ice->vtbl.populate_tes_key = iris_populate_tes_key;
7225 ice->vtbl.populate_gs_key = iris_populate_gs_key;
7226 ice->vtbl.populate_fs_key = iris_populate_fs_key;
7227 ice->vtbl.populate_cs_key = iris_populate_cs_key;
7228 ice->vtbl.mocs = mocs;
7229 ice->vtbl.lost_genx_state = iris_lost_genx_state;
7230
7231 ice->state.dirty = ~0ull;
7232
7233 ice->state.statistics_counters_enabled = true;
7234
7235 ice->state.sample_mask = 0xffff;
7236 ice->state.num_viewports = 1;
7237 ice->state.prim_mode = PIPE_PRIM_MAX;
7238 ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
7239 ice->draw.derived_params.drawid = -1;
7240
7241 /* Make a 1x1x1 null surface for unbound textures */
7242 void *null_surf_map =
7243 upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
7244 4 * GENX(RENDER_SURFACE_STATE_length), 64);
7245 isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
7246 ice->state.unbound_tex.offset +=
7247 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
7248
7249 /* Default all scissor rectangles to be empty regions. */
7250 for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
7251 ice->state.scissors[i] = (struct pipe_scissor_state) {
7252 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
7253 };
7254 }
7255 }