ead1302e152ba18cbe99aa0aa9f085ce29cf9f5f
[mesa.git] / src / gallium / drivers / iris / iris_state.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_state.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * This is the main state upload code.
31 *
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
37 *
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
44 *
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
51 *
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
55 *
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
60 *
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
66 *
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
71 */
72
73 #include <stdio.h>
74 #include <errno.h>
75
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifdef DEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
100 #include "nir.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/gen_aux_map.h"
103 #include "intel/common/gen_l3_config.h"
104 #include "intel/common/gen_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
110
111 #include "iris_genx_macros.h"
112 #include "intel/common/gen_guardband.h"
113
114 /**
115 * Statically assert that PIPE_* enums match the hardware packets.
116 * (As long as they match, we don't need to translate them.)
117 */
118 UNUSED static void pipe_asserts()
119 {
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
121
122 /* pipe_logicop happens to match the hardware. */
123 PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
124 PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
125 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
126 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
127 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
128 PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
129 PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
130 PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
131 PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
132 PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
133 PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
134 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
135 PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
136 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
137 PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
138 PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
139
140 /* pipe_blend_func happens to match the hardware. */
141 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
142 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
143 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
144 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
145 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
146 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
147 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
148 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
149 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
150 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
151 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
152 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
153 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
154 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
155 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
156 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
157 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
158 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
159 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
160
161 /* pipe_blend_func happens to match the hardware. */
162 PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
163 PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
164 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
165 PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
166 PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
167
168 /* pipe_stencil_op happens to match the hardware. */
169 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
170 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
171 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
172 PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
173 PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
174 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
175 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
176 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
177
178 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
180 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
181 #undef PIPE_ASSERT
182 }
183
184 static unsigned
185 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
186 {
187 static const unsigned map[] = {
188 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
189 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
190 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
191 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
192 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
193 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
194 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
195 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
196 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
197 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
198 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
199 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
200 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
201 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
202 [PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
203 };
204
205 return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
206 }
207
208 static unsigned
209 translate_compare_func(enum pipe_compare_func pipe_func)
210 {
211 static const unsigned map[] = {
212 [PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
213 [PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
214 [PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
215 [PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
216 [PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
217 [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
218 [PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
219 [PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
220 };
221 return map[pipe_func];
222 }
223
224 static unsigned
225 translate_shadow_func(enum pipe_compare_func pipe_func)
226 {
227 /* Gallium specifies the result of shadow comparisons as:
228 *
229 * 1 if ref <op> texel,
230 * 0 otherwise.
231 *
232 * The hardware does:
233 *
234 * 0 if texel <op> ref,
235 * 1 otherwise.
236 *
237 * So we need to flip the operator and also negate.
238 */
239 static const unsigned map[] = {
240 [PIPE_FUNC_NEVER] = PREFILTEROPALWAYS,
241 [PIPE_FUNC_LESS] = PREFILTEROPLEQUAL,
242 [PIPE_FUNC_EQUAL] = PREFILTEROPNOTEQUAL,
243 [PIPE_FUNC_LEQUAL] = PREFILTEROPLESS,
244 [PIPE_FUNC_GREATER] = PREFILTEROPGEQUAL,
245 [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
246 [PIPE_FUNC_GEQUAL] = PREFILTEROPGREATER,
247 [PIPE_FUNC_ALWAYS] = PREFILTEROPNEVER,
248 };
249 return map[pipe_func];
250 }
251
252 static unsigned
253 translate_cull_mode(unsigned pipe_face)
254 {
255 static const unsigned map[4] = {
256 [PIPE_FACE_NONE] = CULLMODE_NONE,
257 [PIPE_FACE_FRONT] = CULLMODE_FRONT,
258 [PIPE_FACE_BACK] = CULLMODE_BACK,
259 [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
260 };
261 return map[pipe_face];
262 }
263
264 static unsigned
265 translate_fill_mode(unsigned pipe_polymode)
266 {
267 static const unsigned map[4] = {
268 [PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
269 [PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
270 [PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
271 [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
272 };
273 return map[pipe_polymode];
274 }
275
276 static unsigned
277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
278 {
279 static const unsigned map[] = {
280 [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
281 [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
282 [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
283 };
284 return map[pipe_mip];
285 }
286
287 static uint32_t
288 translate_wrap(unsigned pipe_wrap)
289 {
290 static const unsigned map[] = {
291 [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
292 [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
293 [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
294 [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
295 [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
296 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
297
298 /* These are unsupported. */
299 [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
300 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
301 };
302 return map[pipe_wrap];
303 }
304
305 /**
306 * Allocate space for some indirect state.
307 *
308 * Return a pointer to the map (to fill it out) and a state ref (for
309 * referring to the state in GPU commands).
310 */
311 static void *
312 upload_state(struct u_upload_mgr *uploader,
313 struct iris_state_ref *ref,
314 unsigned size,
315 unsigned alignment)
316 {
317 void *p = NULL;
318 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
319 return p;
320 }
321
322 /**
323 * Stream out temporary/short-lived state.
324 *
325 * This allocates space, pins the BO, and includes the BO address in the
326 * returned offset (which works because all state lives in 32-bit memory
327 * zones).
328 */
329 static uint32_t *
330 stream_state(struct iris_batch *batch,
331 struct u_upload_mgr *uploader,
332 struct pipe_resource **out_res,
333 unsigned size,
334 unsigned alignment,
335 uint32_t *out_offset)
336 {
337 void *ptr = NULL;
338
339 u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
340
341 struct iris_bo *bo = iris_resource_bo(*out_res);
342 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
343
344 iris_record_state_size(batch->state_sizes,
345 bo->gtt_offset + *out_offset, size);
346
347 *out_offset += iris_bo_offset_from_base_address(bo);
348
349 return ptr;
350 }
351
352 /**
353 * stream_state() + memcpy.
354 */
355 static uint32_t
356 emit_state(struct iris_batch *batch,
357 struct u_upload_mgr *uploader,
358 struct pipe_resource **out_res,
359 const void *data,
360 unsigned size,
361 unsigned alignment)
362 {
363 unsigned offset = 0;
364 uint32_t *map =
365 stream_state(batch, uploader, out_res, size, alignment, &offset);
366
367 if (map)
368 memcpy(map, data, size);
369
370 return offset;
371 }
372
373 /**
374 * Did field 'x' change between 'old_cso' and 'new_cso'?
375 *
376 * (If so, we may want to set some dirty flags.)
377 */
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
381
382 static void
383 flush_before_state_base_change(struct iris_batch *batch)
384 {
385 const struct gen_device_info *devinfo = &batch->screen->devinfo;
386
387 /* Flush before emitting STATE_BASE_ADDRESS.
388 *
389 * This isn't documented anywhere in the PRM. However, it seems to be
390 * necessary prior to changing the surface state base adress. We've
391 * seen issues in Vulkan where we get GPU hangs when using multi-level
392 * command buffers which clear depth, reset state base address, and then
393 * go render stuff.
394 *
395 * Normally, in GL, we would trust the kernel to do sufficient stalls
396 * and flushes prior to executing our batch. However, it doesn't seem
397 * as if the kernel's flushing is always sufficient and we don't want to
398 * rely on it.
399 *
400 * We make this an end-of-pipe sync instead of a normal flush because we
401 * do not know the current status of the GPU. On Haswell at least,
402 * having a fast-clear operation in flight at the same time as a normal
403 * rendering operation can cause hangs. Since the kernel's flushing is
404 * insufficient, we need to ensure that any rendering operations from
405 * other processes are definitely complete before we try to do our own
406 * rendering. It's a bit of a big hammer but it appears to work.
407 */
408 iris_emit_end_of_pipe_sync(batch,
409 "change STATE_BASE_ADDRESS (flushes)",
410 PIPE_CONTROL_RENDER_TARGET_FLUSH |
411 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
412 PIPE_CONTROL_DATA_CACHE_FLUSH |
413 /* GEN:BUG:1606662791:
414 *
415 * Software must program PIPE_CONTROL command
416 * with "HDC Pipeline Flush" prior to
417 * programming of the below two non-pipeline
418 * state :
419 * * STATE_BASE_ADDRESS
420 * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
421 */
422 ((GEN_GEN == 12 && devinfo->revision == 0 /* A0 */ ?
423 PIPE_CONTROL_FLUSH_HDC : 0)));
424 }
425
426 static void
427 flush_after_state_base_change(struct iris_batch *batch)
428 {
429 /* After re-setting the surface state base address, we have to do some
430 * cache flusing so that the sampler engine will pick up the new
431 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432 * Shared Function > 3D Sampler > State > State Caching (page 96):
433 *
434 * Coherency with system memory in the state cache, like the texture
435 * cache is handled partially by software. It is expected that the
436 * command stream or shader will issue Cache Flush operation or
437 * Cache_Flush sampler message to ensure that the L1 cache remains
438 * coherent with system memory.
439 *
440 * [...]
441 *
442 * Whenever the value of the Dynamic_State_Base_Addr,
443 * Surface_State_Base_Addr are altered, the L1 state cache must be
444 * invalidated to ensure the new surface or sampler state is fetched
445 * from system memory.
446 *
447 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448 * which, according the PIPE_CONTROL instruction documentation in the
449 * Broadwell PRM:
450 *
451 * Setting this bit is independent of any other bit in this packet.
452 * This bit controls the invalidation of the L1 and L2 state caches
453 * at the top of the pipe i.e. at the parsing time.
454 *
455 * Unfortunately, experimentation seems to indicate that state cache
456 * invalidation through a PIPE_CONTROL does nothing whatsoever in
457 * regards to surface state and binding tables. In stead, it seems that
458 * invalidating the texture cache is what is actually needed.
459 *
460 * XXX: As far as we have been able to determine through
461 * experimentation, shows that flush the texture cache appears to be
462 * sufficient. The theory here is that all of the sampling/rendering
463 * units cache the binding table in the texture cache. However, we have
464 * yet to be able to actually confirm this.
465 */
466 iris_emit_end_of_pipe_sync(batch,
467 "change STATE_BASE_ADDRESS (invalidates)",
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
471 }
472
473 static void
474 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
475 {
476 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
477 lri.RegisterOffset = reg;
478 lri.DataDWord = val;
479 }
480 }
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
482
483 static void
484 _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
485 {
486 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
487 lrr.SourceRegisterAddress = src;
488 lrr.DestinationRegisterAddress = dst;
489 }
490 }
491
492 static void
493 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
494 uint32_t src)
495 {
496 _iris_emit_lrr(batch, dst, src);
497 }
498
499 static void
500 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
501 uint32_t src)
502 {
503 _iris_emit_lrr(batch, dst, src);
504 _iris_emit_lrr(batch, dst + 4, src + 4);
505 }
506
507 static void
508 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
509 uint32_t val)
510 {
511 _iris_emit_lri(batch, reg, val);
512 }
513
514 static void
515 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
516 uint64_t val)
517 {
518 _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
519 _iris_emit_lri(batch, reg + 4, val >> 32);
520 }
521
522 /**
523 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
524 */
525 static void
526 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
527 struct iris_bo *bo, uint32_t offset)
528 {
529 iris_batch_sync_region_start(batch);
530 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
531 lrm.RegisterAddress = reg;
532 lrm.MemoryAddress = ro_bo(bo, offset);
533 }
534 iris_batch_sync_region_end(batch);
535 }
536
537 /**
538 * Load a 64-bit value from a buffer into a MMIO register via
539 * two MI_LOAD_REGISTER_MEM commands.
540 */
541 static void
542 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
543 struct iris_bo *bo, uint32_t offset)
544 {
545 iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
546 iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
547 }
548
549 static void
550 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
551 struct iris_bo *bo, uint32_t offset,
552 bool predicated)
553 {
554 iris_batch_sync_region_start(batch);
555 iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
556 srm.RegisterAddress = reg;
557 srm.MemoryAddress = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
558 srm.PredicateEnable = predicated;
559 }
560 iris_batch_sync_region_end(batch);
561 }
562
563 static void
564 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
565 struct iris_bo *bo, uint32_t offset,
566 bool predicated)
567 {
568 iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
569 iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
570 }
571
572 static void
573 iris_store_data_imm32(struct iris_batch *batch,
574 struct iris_bo *bo, uint32_t offset,
575 uint32_t imm)
576 {
577 iris_batch_sync_region_start(batch);
578 iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
579 sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
580 sdi.ImmediateData = imm;
581 }
582 iris_batch_sync_region_end(batch);
583 }
584
585 static void
586 iris_store_data_imm64(struct iris_batch *batch,
587 struct iris_bo *bo, uint32_t offset,
588 uint64_t imm)
589 {
590 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
591 * 2 in genxml but it's actually variable length and we need 5 DWords.
592 */
593 void *map = iris_get_command_space(batch, 4 * 5);
594 iris_batch_sync_region_start(batch);
595 _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
596 sdi.DWordLength = 5 - 2;
597 sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
598 sdi.ImmediateData = imm;
599 }
600 iris_batch_sync_region_end(batch);
601 }
602
603 static void
604 iris_copy_mem_mem(struct iris_batch *batch,
605 struct iris_bo *dst_bo, uint32_t dst_offset,
606 struct iris_bo *src_bo, uint32_t src_offset,
607 unsigned bytes)
608 {
609 /* MI_COPY_MEM_MEM operates on DWords. */
610 assert(bytes % 4 == 0);
611 assert(dst_offset % 4 == 0);
612 assert(src_offset % 4 == 0);
613 iris_batch_sync_region_start(batch);
614
615 for (unsigned i = 0; i < bytes; i += 4) {
616 iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
617 cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i,
618 IRIS_DOMAIN_OTHER_WRITE);
619 cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
620 }
621 }
622
623 iris_batch_sync_region_end(batch);
624 }
625
626 static void
627 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
628 {
629 #if GEN_GEN >= 8 && GEN_GEN < 10
630 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
631 *
632 * Software must clear the COLOR_CALC_STATE Valid field in
633 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
634 * with Pipeline Select set to GPGPU.
635 *
636 * The internal hardware docs recommend the same workaround for Gen9
637 * hardware too.
638 */
639 if (pipeline == GPGPU)
640 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
641 #endif
642
643
644 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
645 * PIPELINE_SELECT [DevBWR+]":
646 *
647 * "Project: DEVSNB+
648 *
649 * Software must ensure all the write caches are flushed through a
650 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
651 * command to invalidate read only caches prior to programming
652 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
653 */
654 iris_emit_pipe_control_flush(batch,
655 "workaround: PIPELINE_SELECT flushes (1/2)",
656 PIPE_CONTROL_RENDER_TARGET_FLUSH |
657 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
658 PIPE_CONTROL_DATA_CACHE_FLUSH |
659 PIPE_CONTROL_CS_STALL);
660
661 iris_emit_pipe_control_flush(batch,
662 "workaround: PIPELINE_SELECT flushes (2/2)",
663 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
664 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
665 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
666 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
667
668 iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
669 #if GEN_GEN >= 9
670 sel.MaskBits = 3;
671 #endif
672 sel.PipelineSelection = pipeline;
673 }
674 }
675
676 UNUSED static void
677 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
678 {
679 #if GEN_GEN == 9
680 /* Project: DevGLK
681 *
682 * "This chicken bit works around a hardware issue with barrier
683 * logic encountered when switching between GPGPU and 3D pipelines.
684 * To workaround the issue, this mode bit should be set after a
685 * pipeline is selected."
686 */
687 uint32_t reg_val;
688 iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
689 reg.GLKBarrierMode = value;
690 reg.GLKBarrierModeMask = 1;
691 }
692 iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
693 #endif
694 }
695
696 static void
697 init_state_base_address(struct iris_batch *batch)
698 {
699 uint32_t mocs = batch->screen->isl_dev.mocs.internal;
700 flush_before_state_base_change(batch);
701
702 /* We program most base addresses once at context initialization time.
703 * Each base address points at a 4GB memory zone, and never needs to
704 * change. See iris_bufmgr.h for a description of the memory zones.
705 *
706 * The one exception is Surface State Base Address, which needs to be
707 * updated occasionally. See iris_binder.c for the details there.
708 */
709 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
710 sba.GeneralStateMOCS = mocs;
711 sba.StatelessDataPortAccessMOCS = mocs;
712 sba.DynamicStateMOCS = mocs;
713 sba.IndirectObjectMOCS = mocs;
714 sba.InstructionMOCS = mocs;
715 sba.SurfaceStateMOCS = mocs;
716
717 sba.GeneralStateBaseAddressModifyEnable = true;
718 sba.DynamicStateBaseAddressModifyEnable = true;
719 sba.IndirectObjectBaseAddressModifyEnable = true;
720 sba.InstructionBaseAddressModifyEnable = true;
721 sba.GeneralStateBufferSizeModifyEnable = true;
722 sba.DynamicStateBufferSizeModifyEnable = true;
723 #if (GEN_GEN >= 9)
724 sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
725 sba.BindlessSurfaceStateMOCS = mocs;
726 #endif
727 sba.IndirectObjectBufferSizeModifyEnable = true;
728 sba.InstructionBuffersizeModifyEnable = true;
729
730 sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
731 sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
732
733 sba.GeneralStateBufferSize = 0xfffff;
734 sba.IndirectObjectBufferSize = 0xfffff;
735 sba.InstructionBufferSize = 0xfffff;
736 sba.DynamicStateBufferSize = 0xfffff;
737 }
738
739 flush_after_state_base_change(batch);
740 }
741
742 static void
743 iris_emit_l3_config(struct iris_batch *batch,
744 const struct gen_l3_config *cfg)
745 {
746 uint32_t reg_val;
747 assert(cfg || GEN_GEN >= 12);
748
749 #if GEN_GEN >= 12
750 #define L3_ALLOCATION_REG GENX(L3ALLOC)
751 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
752 #else
753 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
754 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
755 #endif
756
757 iris_pack_state(L3_ALLOCATION_REG, &reg_val, reg) {
758 #if GEN_GEN < 11
759 reg.SLMEnable = cfg->n[GEN_L3P_SLM] > 0;
760 #endif
761 #if GEN_GEN == 11
762 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
763 * in L3CNTLREG register. The default setting of the bit is not the
764 * desirable behavior.
765 */
766 reg.ErrorDetectionBehaviorControl = true;
767 reg.UseFullWays = true;
768 #endif
769 if (GEN_GEN < 12 || cfg) {
770 reg.URBAllocation = cfg->n[GEN_L3P_URB];
771 reg.ROAllocation = cfg->n[GEN_L3P_RO];
772 reg.DCAllocation = cfg->n[GEN_L3P_DC];
773 reg.AllAllocation = cfg->n[GEN_L3P_ALL];
774 } else {
775 #if GEN_GEN >= 12
776 reg.L3FullWayAllocationEnable = true;
777 #endif
778 }
779 }
780 _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val);
781 }
782
783 #if GEN_GEN == 9
784 static void
785 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
786 {
787 uint32_t reg_val;
788
789 /* A fixed function pipe flush is required before modifying this field */
790 iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
791 : "disable preemption",
792 PIPE_CONTROL_RENDER_TARGET_FLUSH);
793
794 /* enable object level preemption */
795 iris_pack_state(GENX(CS_CHICKEN1), &reg_val, reg) {
796 reg.ReplayMode = enable;
797 reg.ReplayModeMask = true;
798 }
799 iris_emit_lri(batch, CS_CHICKEN1, reg_val);
800 }
801 #endif
802
803 #if GEN_GEN == 11
804 static void
805 iris_upload_slice_hashing_state(struct iris_batch *batch)
806 {
807 const struct gen_device_info *devinfo = &batch->screen->devinfo;
808 int subslices_delta =
809 devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1];
810 if (subslices_delta == 0)
811 return;
812
813 struct iris_context *ice = NULL;
814 ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
815 assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
816
817 unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
818 uint32_t hash_address;
819 struct pipe_resource *tmp = NULL;
820 uint32_t *map =
821 stream_state(batch, ice->state.dynamic_uploader, &tmp,
822 size, 64, &hash_address);
823 pipe_resource_reference(&tmp, NULL);
824
825 struct GENX(SLICE_HASH_TABLE) table0 = {
826 .Entry = {
827 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
828 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
829 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
830 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
831 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
832 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
833 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
834 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
835 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
836 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
837 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
838 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
839 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
840 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
841 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
842 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
843 }
844 };
845
846 struct GENX(SLICE_HASH_TABLE) table1 = {
847 .Entry = {
848 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
849 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
850 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
851 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
852 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
853 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
854 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
855 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
856 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
857 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
858 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
859 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
860 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
861 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
862 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
863 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
864 }
865 };
866
867 const struct GENX(SLICE_HASH_TABLE) *table =
868 subslices_delta < 0 ? &table0 : &table1;
869 GENX(SLICE_HASH_TABLE_pack)(NULL, map, table);
870
871 iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
872 ptr.SliceHashStatePointerValid = true;
873 ptr.SliceHashTableStatePointer = hash_address;
874 }
875
876 iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
877 mode.SliceHashingTableEnable = true;
878 }
879 }
880 #endif
881
882 static void
883 iris_alloc_push_constants(struct iris_batch *batch)
884 {
885 /* For now, we set a static partitioning of the push constant area,
886 * assuming that all stages could be in use.
887 *
888 * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
889 * see if that improves performance by offering more space to
890 * the VS/FS when those aren't in use. Also, try dynamically
891 * enabling/disabling it like i965 does. This would be more
892 * stalls and may not actually help; we don't know yet.
893 */
894 for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
895 iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
896 alloc._3DCommandSubOpcode = 18 + i;
897 alloc.ConstantBufferOffset = 6 * i;
898 alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
899 }
900 }
901 }
902
903 #if GEN_GEN >= 12
904 static void
905 init_aux_map_state(struct iris_batch *batch);
906 #endif
907
908 /**
909 * Upload the initial GPU state for a render context.
910 *
911 * This sets some invariant state that needs to be programmed a particular
912 * way, but we never actually change.
913 */
914 static void
915 iris_init_render_context(struct iris_batch *batch)
916 {
917 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
918 uint32_t reg_val;
919
920 iris_batch_sync_region_start(batch);
921
922 emit_pipeline_select(batch, _3D);
923
924 iris_emit_l3_config(batch, batch->screen->l3_config_3d);
925
926 init_state_base_address(batch);
927
928 #if GEN_GEN >= 9
929 iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
930 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
931 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
932 }
933 iris_emit_lri(batch, CS_DEBUG_MODE2, reg_val);
934 #else
935 iris_pack_state(GENX(INSTPM), &reg_val, reg) {
936 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
937 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
938 }
939 iris_emit_lri(batch, INSTPM, reg_val);
940 #endif
941
942 #if GEN_GEN == 9
943 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
944 reg.FloatBlendOptimizationEnable = true;
945 reg.FloatBlendOptimizationEnableMask = true;
946 reg.PartialResolveDisableInVC = true;
947 reg.PartialResolveDisableInVCMask = true;
948 }
949 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
950
951 if (devinfo->is_geminilake)
952 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
953 #endif
954
955 #if GEN_GEN == 11
956 iris_pack_state(GENX(TCCNTLREG), &reg_val, reg) {
957 reg.L3DataPartialWriteMergingEnable = true;
958 reg.ColorZPartialWriteMergingEnable = true;
959 reg.URBPartialWriteMergingEnable = true;
960 reg.TCDisable = true;
961 }
962 iris_emit_lri(batch, TCCNTLREG, reg_val);
963
964 iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
965 reg.HeaderlessMessageforPreemptableContexts = 1;
966 reg.HeaderlessMessageforPreemptableContextsMask = 1;
967 }
968 iris_emit_lri(batch, SAMPLER_MODE, reg_val);
969
970 /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
971 iris_pack_state(GENX(HALF_SLICE_CHICKEN7), &reg_val, reg) {
972 reg.EnabledTexelOffsetPrecisionFix = 1;
973 reg.EnabledTexelOffsetPrecisionFixMask = 1;
974 }
975 iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
976
977 /* Hardware specification recommends disabling repacking for the
978 * compatibility with decompression mechanism in display controller.
979 */
980 if (devinfo->disable_ccs_repack) {
981 iris_pack_state(GENX(CACHE_MODE_0), &reg_val, reg) {
982 reg.DisableRepackingforCompression = true;
983 reg.DisableRepackingforCompressionMask = true;
984 }
985 iris_emit_lri(batch, CACHE_MODE_0, reg_val);
986 }
987
988 iris_upload_slice_hashing_state(batch);
989 #endif
990
991 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
992 * changing it dynamically. We set it to the maximum size here, and
993 * instead include the render target dimensions in the viewport, so
994 * viewport extents clipping takes care of pruning stray geometry.
995 */
996 iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
997 rect.ClippedDrawingRectangleXMax = UINT16_MAX;
998 rect.ClippedDrawingRectangleYMax = UINT16_MAX;
999 }
1000
1001 /* Set the initial MSAA sample positions. */
1002 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
1003 GEN_SAMPLE_POS_1X(pat._1xSample);
1004 GEN_SAMPLE_POS_2X(pat._2xSample);
1005 GEN_SAMPLE_POS_4X(pat._4xSample);
1006 GEN_SAMPLE_POS_8X(pat._8xSample);
1007 #if GEN_GEN >= 9
1008 GEN_SAMPLE_POS_16X(pat._16xSample);
1009 #endif
1010 }
1011
1012 /* Use the legacy AA line coverage computation. */
1013 iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
1014
1015 /* Disable chromakeying (it's for media) */
1016 iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
1017
1018 /* We want regular rendering, not special HiZ operations. */
1019 iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
1020
1021 /* No polygon stippling offsets are necessary. */
1022 /* TODO: may need to set an offset for origin-UL framebuffers */
1023 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1024
1025 iris_alloc_push_constants(batch);
1026
1027
1028 #if GEN_GEN >= 12
1029 init_aux_map_state(batch);
1030 #endif
1031
1032 iris_batch_sync_region_end(batch);
1033 }
1034
1035 static void
1036 iris_init_compute_context(struct iris_batch *batch)
1037 {
1038 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
1039
1040 iris_batch_sync_region_start(batch);
1041
1042 /* GEN:BUG:1607854226:
1043 *
1044 * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1045 */
1046 #if GEN_GEN == 12
1047 emit_pipeline_select(batch, _3D);
1048 #else
1049 emit_pipeline_select(batch, GPGPU);
1050 #endif
1051
1052 iris_emit_l3_config(batch, batch->screen->l3_config_cs);
1053
1054 init_state_base_address(batch);
1055
1056 #if GEN_GEN == 12
1057 emit_pipeline_select(batch, GPGPU);
1058 #endif
1059
1060 #if GEN_GEN == 9
1061 if (devinfo->is_geminilake)
1062 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1063 #endif
1064
1065 #if GEN_GEN >= 12
1066 init_aux_map_state(batch);
1067 #endif
1068
1069 iris_batch_sync_region_end(batch);
1070 }
1071
1072 struct iris_vertex_buffer_state {
1073 /** The VERTEX_BUFFER_STATE hardware structure. */
1074 uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1075
1076 /** The resource to source vertex data from. */
1077 struct pipe_resource *resource;
1078
1079 int offset;
1080 };
1081
1082 struct iris_depth_buffer_state {
1083 /* Depth/HiZ/Stencil related hardware packets. */
1084 uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1085 GENX(3DSTATE_STENCIL_BUFFER_length) +
1086 GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1087 GENX(3DSTATE_CLEAR_PARAMS_length) +
1088 GENX(MI_LOAD_REGISTER_IMM_length) * 2];
1089 };
1090
1091 /**
1092 * Generation-specific context state (ice->state.genx->...).
1093 *
1094 * Most state can go in iris_context directly, but these encode hardware
1095 * packets which vary by generation.
1096 */
1097 struct iris_genx_state {
1098 struct iris_vertex_buffer_state vertex_buffers[33];
1099 uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1100
1101 struct iris_depth_buffer_state depth_buffer;
1102
1103 uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1104
1105 #if GEN_GEN == 8
1106 bool pma_fix_enabled;
1107 #endif
1108
1109 #if GEN_GEN == 9
1110 /* Is object level preemption enabled? */
1111 bool object_preemption;
1112 #endif
1113
1114 struct {
1115 #if GEN_GEN == 8
1116 struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1117 #endif
1118 } shaders[MESA_SHADER_STAGES];
1119 };
1120
1121 /**
1122 * The pipe->set_blend_color() driver hook.
1123 *
1124 * This corresponds to our COLOR_CALC_STATE.
1125 */
1126 static void
1127 iris_set_blend_color(struct pipe_context *ctx,
1128 const struct pipe_blend_color *state)
1129 {
1130 struct iris_context *ice = (struct iris_context *) ctx;
1131
1132 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1133 memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1134 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1135 }
1136
1137 /**
1138 * Gallium CSO for blend state (see pipe_blend_state).
1139 */
1140 struct iris_blend_state {
1141 /** Partial 3DSTATE_PS_BLEND */
1142 uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1143
1144 /** Partial BLEND_STATE */
1145 uint32_t blend_state[GENX(BLEND_STATE_length) +
1146 BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1147
1148 bool alpha_to_coverage; /* for shader key */
1149
1150 /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1151 uint8_t blend_enables;
1152
1153 /** Bitfield of whether color writes are enabled for RT[i] */
1154 uint8_t color_write_enables;
1155
1156 /** Does RT[0] use dual color blending? */
1157 bool dual_color_blending;
1158 };
1159
1160 static enum pipe_blendfactor
1161 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1162 {
1163 if (alpha_to_one) {
1164 if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1165 return PIPE_BLENDFACTOR_ONE;
1166
1167 if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1168 return PIPE_BLENDFACTOR_ZERO;
1169 }
1170
1171 return f;
1172 }
1173
1174 /**
1175 * The pipe->create_blend_state() driver hook.
1176 *
1177 * Translates a pipe_blend_state into iris_blend_state.
1178 */
1179 static void *
1180 iris_create_blend_state(struct pipe_context *ctx,
1181 const struct pipe_blend_state *state)
1182 {
1183 struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1184 uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1185
1186 cso->blend_enables = 0;
1187 cso->color_write_enables = 0;
1188 STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1189
1190 cso->alpha_to_coverage = state->alpha_to_coverage;
1191
1192 bool indep_alpha_blend = false;
1193
1194 for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1195 const struct pipe_rt_blend_state *rt =
1196 &state->rt[state->independent_blend_enable ? i : 0];
1197
1198 enum pipe_blendfactor src_rgb =
1199 fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1200 enum pipe_blendfactor src_alpha =
1201 fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1202 enum pipe_blendfactor dst_rgb =
1203 fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1204 enum pipe_blendfactor dst_alpha =
1205 fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1206
1207 if (rt->rgb_func != rt->alpha_func ||
1208 src_rgb != src_alpha || dst_rgb != dst_alpha)
1209 indep_alpha_blend = true;
1210
1211 if (rt->blend_enable)
1212 cso->blend_enables |= 1u << i;
1213
1214 if (rt->colormask)
1215 cso->color_write_enables |= 1u << i;
1216
1217 iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1218 be.LogicOpEnable = state->logicop_enable;
1219 be.LogicOpFunction = state->logicop_func;
1220
1221 be.PreBlendSourceOnlyClampEnable = false;
1222 be.ColorClampRange = COLORCLAMP_RTFORMAT;
1223 be.PreBlendColorClampEnable = true;
1224 be.PostBlendColorClampEnable = true;
1225
1226 be.ColorBufferBlendEnable = rt->blend_enable;
1227
1228 be.ColorBlendFunction = rt->rgb_func;
1229 be.AlphaBlendFunction = rt->alpha_func;
1230 be.SourceBlendFactor = src_rgb;
1231 be.SourceAlphaBlendFactor = src_alpha;
1232 be.DestinationBlendFactor = dst_rgb;
1233 be.DestinationAlphaBlendFactor = dst_alpha;
1234
1235 be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
1236 be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1237 be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
1238 be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1239 }
1240 blend_entry += GENX(BLEND_STATE_ENTRY_length);
1241 }
1242
1243 iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1244 /* pb.HasWriteableRT is filled in at draw time.
1245 * pb.AlphaTestEnable is filled in at draw time.
1246 *
1247 * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1248 * setting it when dual color blending without an appropriate shader.
1249 */
1250
1251 pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1252 pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1253
1254 pb.SourceBlendFactor =
1255 fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1256 pb.SourceAlphaBlendFactor =
1257 fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1258 pb.DestinationBlendFactor =
1259 fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1260 pb.DestinationAlphaBlendFactor =
1261 fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1262 }
1263
1264 iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1265 bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1266 bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1267 bs.AlphaToOneEnable = state->alpha_to_one;
1268 bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1269 bs.ColorDitherEnable = state->dither;
1270 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1271 }
1272
1273 cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1274
1275 return cso;
1276 }
1277
1278 /**
1279 * The pipe->bind_blend_state() driver hook.
1280 *
1281 * Bind a blending CSO and flag related dirty bits.
1282 */
1283 static void
1284 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1285 {
1286 struct iris_context *ice = (struct iris_context *) ctx;
1287 struct iris_blend_state *cso = state;
1288
1289 ice->state.cso_blend = cso;
1290
1291 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1292 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1293 ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
1294
1295 if (GEN_GEN == 8)
1296 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1297 }
1298
1299 /**
1300 * Return true if the FS writes to any color outputs which are not disabled
1301 * via color masking.
1302 */
1303 static bool
1304 has_writeable_rt(const struct iris_blend_state *cso_blend,
1305 const struct shader_info *fs_info)
1306 {
1307 if (!fs_info)
1308 return false;
1309
1310 unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1311
1312 if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1313 rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1314
1315 return cso_blend->color_write_enables & rt_outputs;
1316 }
1317
1318 /**
1319 * Gallium CSO for depth, stencil, and alpha testing state.
1320 */
1321 struct iris_depth_stencil_alpha_state {
1322 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1323 uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1324
1325 #if GEN_GEN >= 12
1326 uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1327 #endif
1328
1329 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1330 struct pipe_alpha_state alpha;
1331
1332 /** Outbound to resolve and cache set tracking. */
1333 bool depth_writes_enabled;
1334 bool stencil_writes_enabled;
1335
1336 /** Outbound to Gen8-9 PMA stall equations */
1337 bool depth_test_enabled;
1338 };
1339
1340 /**
1341 * The pipe->create_depth_stencil_alpha_state() driver hook.
1342 *
1343 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1344 * testing state since we need pieces of it in a variety of places.
1345 */
1346 static void *
1347 iris_create_zsa_state(struct pipe_context *ctx,
1348 const struct pipe_depth_stencil_alpha_state *state)
1349 {
1350 struct iris_depth_stencil_alpha_state *cso =
1351 malloc(sizeof(struct iris_depth_stencil_alpha_state));
1352
1353 bool two_sided_stencil = state->stencil[1].enabled;
1354
1355 cso->alpha = state->alpha;
1356 cso->depth_writes_enabled = state->depth.writemask;
1357 cso->depth_test_enabled = state->depth.enabled;
1358 cso->stencil_writes_enabled =
1359 state->stencil[0].writemask != 0 ||
1360 (two_sided_stencil && state->stencil[1].writemask != 0);
1361
1362 /* gallium frontends need to optimize away EQUAL writes for us. */
1363 assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
1364
1365 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1366 wmds.StencilFailOp = state->stencil[0].fail_op;
1367 wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1368 wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1369 wmds.StencilTestFunction =
1370 translate_compare_func(state->stencil[0].func);
1371 wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1372 wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1373 wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1374 wmds.BackfaceStencilTestFunction =
1375 translate_compare_func(state->stencil[1].func);
1376 wmds.DepthTestFunction = translate_compare_func(state->depth.func);
1377 wmds.DoubleSidedStencilEnable = two_sided_stencil;
1378 wmds.StencilTestEnable = state->stencil[0].enabled;
1379 wmds.StencilBufferWriteEnable =
1380 state->stencil[0].writemask != 0 ||
1381 (two_sided_stencil && state->stencil[1].writemask != 0);
1382 wmds.DepthTestEnable = state->depth.enabled;
1383 wmds.DepthBufferWriteEnable = state->depth.writemask;
1384 wmds.StencilTestMask = state->stencil[0].valuemask;
1385 wmds.StencilWriteMask = state->stencil[0].writemask;
1386 wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1387 wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1388 /* wmds.[Backface]StencilReferenceValue are merged later */
1389 #if GEN_GEN >= 12
1390 wmds.StencilReferenceValueModifyDisable = true;
1391 #endif
1392 }
1393
1394 #if GEN_GEN >= 12
1395 iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1396 depth_bounds.DepthBoundsTestValueModifyDisable = false;
1397 depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1398 depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test;
1399 depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min;
1400 depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max;
1401 }
1402 #endif
1403
1404 return cso;
1405 }
1406
1407 /**
1408 * The pipe->bind_depth_stencil_alpha_state() driver hook.
1409 *
1410 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1411 */
1412 static void
1413 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1414 {
1415 struct iris_context *ice = (struct iris_context *) ctx;
1416 struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1417 struct iris_depth_stencil_alpha_state *new_cso = state;
1418
1419 if (new_cso) {
1420 if (cso_changed(alpha.ref_value))
1421 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1422
1423 if (cso_changed(alpha.enabled))
1424 ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1425
1426 if (cso_changed(alpha.func))
1427 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1428
1429 if (cso_changed(depth_writes_enabled))
1430 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1431
1432 ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1433 ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1434
1435 #if GEN_GEN >= 12
1436 if (cso_changed(depth_bounds))
1437 ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1438 #endif
1439 }
1440
1441 ice->state.cso_zsa = new_cso;
1442 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1443 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1444 ice->state.stage_dirty |=
1445 ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1446
1447 if (GEN_GEN == 8)
1448 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1449 }
1450
1451 #if GEN_GEN == 8
1452 static bool
1453 want_pma_fix(struct iris_context *ice)
1454 {
1455 UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1456 UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
1457 const struct brw_wm_prog_data *wm_prog_data = (void *)
1458 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1459 const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1460 const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1461 const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1462
1463 /* In very specific combinations of state, we can instruct Gen8-9 hardware
1464 * to avoid stalling at the pixel mask array. The state equations are
1465 * documented in these places:
1466 *
1467 * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
1468 * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1469 *
1470 * Both equations share some common elements:
1471 *
1472 * no_hiz_op =
1473 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1474 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1475 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1476 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1477 *
1478 * killpixels =
1479 * 3DSTATE_WM::ForceKillPix != ForceOff &&
1480 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1481 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1482 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1483 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1484 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1485 *
1486 * (Technically the stencil PMA treats ForceKillPix differently,
1487 * but I think this is a documentation oversight, and we don't
1488 * ever use it in this way, so it doesn't matter).
1489 *
1490 * common_pma_fix =
1491 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
1492 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1493 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1494 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1495 * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1496 * 3DSTATE_PS_EXTRA::PixelShaderValid &&
1497 * no_hiz_op
1498 *
1499 * These are always true:
1500 *
1501 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1502 * 3DSTATE_PS_EXTRA::PixelShaderValid
1503 *
1504 * Also, we never use the normal drawing path for HiZ ops; these are true:
1505 *
1506 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1507 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1508 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1509 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
1510 *
1511 * This happens sometimes:
1512 *
1513 * 3DSTATE_WM::ForceThreadDispatch != 1
1514 *
1515 * However, we choose to ignore it as it either agrees with the signal
1516 * (dispatch was already enabled, so nothing out of the ordinary), or
1517 * there are no framebuffer attachments (so no depth or HiZ anyway,
1518 * meaning the PMA signal will already be disabled).
1519 */
1520
1521 if (!cso_fb->zsbuf)
1522 return false;
1523
1524 struct iris_resource *zres, *sres;
1525 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1526
1527 /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1528 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1529 */
1530 if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1531 return false;
1532
1533 /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1534 if (wm_prog_data->early_fragment_tests)
1535 return false;
1536
1537 /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1538 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1539 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1540 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1541 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1542 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1543 */
1544 bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1545 cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled;
1546
1547 /* The Gen8 depth PMA equation becomes:
1548 *
1549 * depth_writes =
1550 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1551 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1552 *
1553 * stencil_writes =
1554 * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1555 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1556 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1557 *
1558 * Z_PMA_OPT =
1559 * common_pma_fix &&
1560 * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1561 * ((killpixels && (depth_writes || stencil_writes)) ||
1562 * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1563 *
1564 */
1565 if (!cso_zsa->depth_test_enabled)
1566 return false;
1567
1568 return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1569 (killpixels && (cso_zsa->depth_writes_enabled ||
1570 (sres && cso_zsa->stencil_writes_enabled)));
1571 }
1572 #endif
1573
1574 void
1575 genX(update_pma_fix)(struct iris_context *ice,
1576 struct iris_batch *batch,
1577 bool enable)
1578 {
1579 #if GEN_GEN == 8
1580 struct iris_genx_state *genx = ice->state.genx;
1581
1582 if (genx->pma_fix_enabled == enable)
1583 return;
1584
1585 genx->pma_fix_enabled = enable;
1586
1587 /* According to the Broadwell PIPE_CONTROL documentation, software should
1588 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1589 * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
1590 *
1591 * The Gen9 docs say to use a depth stall rather than a command streamer
1592 * stall. However, the hardware seems to violently disagree. A full
1593 * command streamer stall seems to be needed in both cases.
1594 */
1595 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1596 PIPE_CONTROL_CS_STALL |
1597 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1598 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1599
1600 uint32_t reg_val;
1601 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
1602 reg.NPPMAFixEnable = enable;
1603 reg.NPEarlyZFailsDisable = enable;
1604 reg.NPPMAFixEnableMask = true;
1605 reg.NPEarlyZFailsDisableMask = true;
1606 }
1607 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
1608
1609 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1610 * Flush bits is often necessary. We do it regardless because it's easier.
1611 * The render cache flush is also necessary if stencil writes are enabled.
1612 *
1613 * Again, the Gen9 docs give a different set of flushes but the Broadwell
1614 * flushes seem to work just as well.
1615 */
1616 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1617 PIPE_CONTROL_DEPTH_STALL |
1618 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1619 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1620 #endif
1621 }
1622
1623 /**
1624 * Gallium CSO for rasterizer state.
1625 */
1626 struct iris_rasterizer_state {
1627 uint32_t sf[GENX(3DSTATE_SF_length)];
1628 uint32_t clip[GENX(3DSTATE_CLIP_length)];
1629 uint32_t raster[GENX(3DSTATE_RASTER_length)];
1630 uint32_t wm[GENX(3DSTATE_WM_length)];
1631 uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1632
1633 uint8_t num_clip_plane_consts;
1634 bool clip_halfz; /* for CC_VIEWPORT */
1635 bool depth_clip_near; /* for CC_VIEWPORT */
1636 bool depth_clip_far; /* for CC_VIEWPORT */
1637 bool flatshade; /* for shader state */
1638 bool flatshade_first; /* for stream output */
1639 bool clamp_fragment_color; /* for shader state */
1640 bool light_twoside; /* for shader state */
1641 bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1642 bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1643 bool line_stipple_enable;
1644 bool poly_stipple_enable;
1645 bool multisample;
1646 bool force_persample_interp;
1647 bool conservative_rasterization;
1648 bool fill_mode_point_or_line;
1649 enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1650 uint16_t sprite_coord_enable;
1651 };
1652
1653 static float
1654 get_line_width(const struct pipe_rasterizer_state *state)
1655 {
1656 float line_width = state->line_width;
1657
1658 /* From the OpenGL 4.4 spec:
1659 *
1660 * "The actual width of non-antialiased lines is determined by rounding
1661 * the supplied width to the nearest integer, then clamping it to the
1662 * implementation-dependent maximum non-antialiased line width."
1663 */
1664 if (!state->multisample && !state->line_smooth)
1665 line_width = roundf(state->line_width);
1666
1667 if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1668 /* For 1 pixel line thickness or less, the general anti-aliasing
1669 * algorithm gives up, and a garbage line is generated. Setting a
1670 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1671 * (one-pixel-wide), non-antialiased lines.
1672 *
1673 * Lines rendered with zero Line Width are rasterized using the
1674 * "Grid Intersection Quantization" rules as specified by the
1675 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1676 */
1677 line_width = 0.0f;
1678 }
1679
1680 return line_width;
1681 }
1682
1683 /**
1684 * The pipe->create_rasterizer_state() driver hook.
1685 */
1686 static void *
1687 iris_create_rasterizer_state(struct pipe_context *ctx,
1688 const struct pipe_rasterizer_state *state)
1689 {
1690 struct iris_rasterizer_state *cso =
1691 malloc(sizeof(struct iris_rasterizer_state));
1692
1693 cso->multisample = state->multisample;
1694 cso->force_persample_interp = state->force_persample_interp;
1695 cso->clip_halfz = state->clip_halfz;
1696 cso->depth_clip_near = state->depth_clip_near;
1697 cso->depth_clip_far = state->depth_clip_far;
1698 cso->flatshade = state->flatshade;
1699 cso->flatshade_first = state->flatshade_first;
1700 cso->clamp_fragment_color = state->clamp_fragment_color;
1701 cso->light_twoside = state->light_twoside;
1702 cso->rasterizer_discard = state->rasterizer_discard;
1703 cso->half_pixel_center = state->half_pixel_center;
1704 cso->sprite_coord_mode = state->sprite_coord_mode;
1705 cso->sprite_coord_enable = state->sprite_coord_enable;
1706 cso->line_stipple_enable = state->line_stipple_enable;
1707 cso->poly_stipple_enable = state->poly_stipple_enable;
1708 cso->conservative_rasterization =
1709 state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1710
1711 cso->fill_mode_point_or_line =
1712 state->fill_front == PIPE_POLYGON_MODE_LINE ||
1713 state->fill_front == PIPE_POLYGON_MODE_POINT ||
1714 state->fill_back == PIPE_POLYGON_MODE_LINE ||
1715 state->fill_back == PIPE_POLYGON_MODE_POINT;
1716
1717 if (state->clip_plane_enable != 0)
1718 cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1719 else
1720 cso->num_clip_plane_consts = 0;
1721
1722 float line_width = get_line_width(state);
1723
1724 iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1725 sf.StatisticsEnable = true;
1726 sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1727 sf.LineEndCapAntialiasingRegionWidth =
1728 state->line_smooth ? _10pixels : _05pixels;
1729 sf.LastPixelEnable = state->line_last_pixel;
1730 sf.LineWidth = line_width;
1731 sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1732 !state->point_quad_rasterization;
1733 sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1734 sf.PointWidth = state->point_size;
1735
1736 if (state->flatshade_first) {
1737 sf.TriangleFanProvokingVertexSelect = 1;
1738 } else {
1739 sf.TriangleStripListProvokingVertexSelect = 2;
1740 sf.TriangleFanProvokingVertexSelect = 2;
1741 sf.LineStripListProvokingVertexSelect = 1;
1742 }
1743 }
1744
1745 iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1746 rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1747 rr.CullMode = translate_cull_mode(state->cull_face);
1748 rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1749 rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1750 rr.DXMultisampleRasterizationEnable = state->multisample;
1751 rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1752 rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1753 rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1754 rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1755 rr.GlobalDepthOffsetScale = state->offset_scale;
1756 rr.GlobalDepthOffsetClamp = state->offset_clamp;
1757 rr.SmoothPointEnable = state->point_smooth;
1758 rr.AntialiasingEnable = state->line_smooth;
1759 rr.ScissorRectangleEnable = state->scissor;
1760 #if GEN_GEN >= 9
1761 rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1762 rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1763 rr.ConservativeRasterizationEnable =
1764 cso->conservative_rasterization;
1765 #else
1766 rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1767 #endif
1768 }
1769
1770 iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1771 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1772 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1773 */
1774 cl.EarlyCullEnable = true;
1775 cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1776 cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1777 cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1778 cl.GuardbandClipTestEnable = true;
1779 cl.ClipEnable = true;
1780 cl.MinimumPointWidth = 0.125;
1781 cl.MaximumPointWidth = 255.875;
1782
1783 if (state->flatshade_first) {
1784 cl.TriangleFanProvokingVertexSelect = 1;
1785 } else {
1786 cl.TriangleStripListProvokingVertexSelect = 2;
1787 cl.TriangleFanProvokingVertexSelect = 2;
1788 cl.LineStripListProvokingVertexSelect = 1;
1789 }
1790 }
1791
1792 iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1793 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1794 * filled in at draw time from the FS program.
1795 */
1796 wm.LineAntialiasingRegionWidth = _10pixels;
1797 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1798 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1799 wm.LineStippleEnable = state->line_stipple_enable;
1800 wm.PolygonStippleEnable = state->poly_stipple_enable;
1801 }
1802
1803 /* Remap from 0..255 back to 1..256 */
1804 const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1805
1806 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1807 if (state->line_stipple_enable) {
1808 line.LineStipplePattern = state->line_stipple_pattern;
1809 line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1810 line.LineStippleRepeatCount = line_stipple_factor;
1811 }
1812 }
1813
1814 return cso;
1815 }
1816
1817 /**
1818 * The pipe->bind_rasterizer_state() driver hook.
1819 *
1820 * Bind a rasterizer CSO and flag related dirty bits.
1821 */
1822 static void
1823 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1824 {
1825 struct iris_context *ice = (struct iris_context *) ctx;
1826 struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1827 struct iris_rasterizer_state *new_cso = state;
1828
1829 if (new_cso) {
1830 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1831 if (cso_changed_memcmp(line_stipple))
1832 ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1833
1834 if (cso_changed(half_pixel_center))
1835 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1836
1837 if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1838 ice->state.dirty |= IRIS_DIRTY_WM;
1839
1840 if (cso_changed(rasterizer_discard))
1841 ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1842
1843 if (cso_changed(flatshade_first))
1844 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1845
1846 if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1847 cso_changed(clip_halfz))
1848 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1849
1850 if (cso_changed(sprite_coord_enable) ||
1851 cso_changed(sprite_coord_mode) ||
1852 cso_changed(light_twoside))
1853 ice->state.dirty |= IRIS_DIRTY_SBE;
1854
1855 if (cso_changed(conservative_rasterization))
1856 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
1857 }
1858
1859 ice->state.cso_rast = new_cso;
1860 ice->state.dirty |= IRIS_DIRTY_RASTER;
1861 ice->state.dirty |= IRIS_DIRTY_CLIP;
1862 ice->state.stage_dirty |=
1863 ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
1864 }
1865
1866 /**
1867 * Return true if the given wrap mode requires the border color to exist.
1868 *
1869 * (We can skip uploading it if the sampler isn't going to use it.)
1870 */
1871 static bool
1872 wrap_mode_needs_border_color(unsigned wrap_mode)
1873 {
1874 return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1875 }
1876
1877 /**
1878 * Gallium CSO for sampler state.
1879 */
1880 struct iris_sampler_state {
1881 union pipe_color_union border_color;
1882 bool needs_border_color;
1883
1884 uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1885 };
1886
1887 /**
1888 * The pipe->create_sampler_state() driver hook.
1889 *
1890 * We fill out SAMPLER_STATE (except for the border color pointer), and
1891 * store that on the CPU. It doesn't make sense to upload it to a GPU
1892 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1893 * all bound sampler states to be in contiguous memor.
1894 */
1895 static void *
1896 iris_create_sampler_state(struct pipe_context *ctx,
1897 const struct pipe_sampler_state *state)
1898 {
1899 struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1900
1901 if (!cso)
1902 return NULL;
1903
1904 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1905 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1906
1907 unsigned wrap_s = translate_wrap(state->wrap_s);
1908 unsigned wrap_t = translate_wrap(state->wrap_t);
1909 unsigned wrap_r = translate_wrap(state->wrap_r);
1910
1911 memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1912
1913 cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
1914 wrap_mode_needs_border_color(wrap_t) ||
1915 wrap_mode_needs_border_color(wrap_r);
1916
1917 float min_lod = state->min_lod;
1918 unsigned mag_img_filter = state->mag_img_filter;
1919
1920 // XXX: explain this code ported from ilo...I don't get it at all...
1921 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
1922 state->min_lod > 0.0f) {
1923 min_lod = 0.0f;
1924 mag_img_filter = state->min_img_filter;
1925 }
1926
1927 iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
1928 samp.TCXAddressControlMode = wrap_s;
1929 samp.TCYAddressControlMode = wrap_t;
1930 samp.TCZAddressControlMode = wrap_r;
1931 samp.CubeSurfaceControlMode = state->seamless_cube_map;
1932 samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
1933 samp.MinModeFilter = state->min_img_filter;
1934 samp.MagModeFilter = mag_img_filter;
1935 samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
1936 samp.MaximumAnisotropy = RATIO21;
1937
1938 if (state->max_anisotropy >= 2) {
1939 if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
1940 samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
1941 samp.AnisotropicAlgorithm = EWAApproximation;
1942 }
1943
1944 if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
1945 samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
1946
1947 samp.MaximumAnisotropy =
1948 MIN2((state->max_anisotropy - 2) / 2, RATIO161);
1949 }
1950
1951 /* Set address rounding bits if not using nearest filtering. */
1952 if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
1953 samp.UAddressMinFilterRoundingEnable = true;
1954 samp.VAddressMinFilterRoundingEnable = true;
1955 samp.RAddressMinFilterRoundingEnable = true;
1956 }
1957
1958 if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
1959 samp.UAddressMagFilterRoundingEnable = true;
1960 samp.VAddressMagFilterRoundingEnable = true;
1961 samp.RAddressMagFilterRoundingEnable = true;
1962 }
1963
1964 if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
1965 samp.ShadowFunction = translate_shadow_func(state->compare_func);
1966
1967 const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
1968
1969 samp.LODPreClampMode = CLAMP_MODE_OGL;
1970 samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
1971 samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
1972 samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
1973
1974 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1975 }
1976
1977 return cso;
1978 }
1979
1980 /**
1981 * The pipe->bind_sampler_states() driver hook.
1982 */
1983 static void
1984 iris_bind_sampler_states(struct pipe_context *ctx,
1985 enum pipe_shader_type p_stage,
1986 unsigned start, unsigned count,
1987 void **states)
1988 {
1989 struct iris_context *ice = (struct iris_context *) ctx;
1990 gl_shader_stage stage = stage_from_pipe(p_stage);
1991 struct iris_shader_state *shs = &ice->state.shaders[stage];
1992
1993 assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
1994
1995 bool dirty = false;
1996
1997 for (int i = 0; i < count; i++) {
1998 if (shs->samplers[start + i] != states[i]) {
1999 shs->samplers[start + i] = states[i];
2000 dirty = true;
2001 }
2002 }
2003
2004 if (dirty)
2005 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2006 }
2007
2008 /**
2009 * Upload the sampler states into a contiguous area of GPU memory, for
2010 * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2011 *
2012 * Also fill out the border color state pointers.
2013 */
2014 static void
2015 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
2016 {
2017 struct iris_shader_state *shs = &ice->state.shaders[stage];
2018 const struct shader_info *info = iris_get_shader_info(ice, stage);
2019
2020 /* We assume gallium frontends will call pipe->bind_sampler_states()
2021 * if the program's number of textures changes.
2022 */
2023 unsigned count = info ? util_last_bit(info->textures_used) : 0;
2024
2025 if (!count)
2026 return;
2027
2028 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2029 * in the dynamic state memory zone, so we can point to it via the
2030 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2031 */
2032 unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
2033 uint32_t *map =
2034 upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
2035 if (unlikely(!map))
2036 return;
2037
2038 struct pipe_resource *res = shs->sampler_table.res;
2039 struct iris_bo *bo = iris_resource_bo(res);
2040
2041 iris_record_state_size(ice->state.sizes,
2042 bo->gtt_offset + shs->sampler_table.offset, size);
2043
2044 shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
2045
2046 /* Make sure all land in the same BO */
2047 iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
2048
2049 ice->state.need_border_colors &= ~(1 << stage);
2050
2051 for (int i = 0; i < count; i++) {
2052 struct iris_sampler_state *state = shs->samplers[i];
2053 struct iris_sampler_view *tex = shs->textures[i];
2054
2055 if (!state) {
2056 memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2057 } else if (!state->needs_border_color) {
2058 memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
2059 } else {
2060 ice->state.need_border_colors |= 1 << stage;
2061
2062 /* We may need to swizzle the border color for format faking.
2063 * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2064 * This means we need to move the border color's A channel into
2065 * the R or G channels so that those read swizzles will move it
2066 * back into A.
2067 */
2068 union pipe_color_union *color = &state->border_color;
2069 union pipe_color_union tmp;
2070 if (tex) {
2071 enum pipe_format internal_format = tex->res->internal_format;
2072
2073 if (util_format_is_alpha(internal_format)) {
2074 unsigned char swz[4] = {
2075 PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2076 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2077 };
2078 util_format_apply_color_swizzle(&tmp, color, swz, true);
2079 color = &tmp;
2080 } else if (util_format_is_luminance_alpha(internal_format) &&
2081 internal_format != PIPE_FORMAT_L8A8_SRGB) {
2082 unsigned char swz[4] = {
2083 PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2084 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2085 };
2086 util_format_apply_color_swizzle(&tmp, color, swz, true);
2087 color = &tmp;
2088 }
2089 }
2090
2091 /* Stream out the border color and merge the pointer. */
2092 uint32_t offset = iris_upload_border_color(ice, color);
2093
2094 uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2095 iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2096 dyns.BorderColorPointer = offset;
2097 }
2098
2099 for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2100 map[j] = state->sampler_state[j] | dynamic[j];
2101 }
2102
2103 map += GENX(SAMPLER_STATE_length);
2104 }
2105 }
2106
2107 static enum isl_channel_select
2108 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2109 {
2110 switch (swz) {
2111 case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2112 case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2113 case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2114 case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2115 case PIPE_SWIZZLE_1: return SCS_ONE;
2116 case PIPE_SWIZZLE_0: return SCS_ZERO;
2117 default: unreachable("invalid swizzle");
2118 }
2119 }
2120
2121 static void
2122 fill_buffer_surface_state(struct isl_device *isl_dev,
2123 struct iris_resource *res,
2124 void *map,
2125 enum isl_format format,
2126 struct isl_swizzle swizzle,
2127 unsigned offset,
2128 unsigned size)
2129 {
2130 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2131 const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2132
2133 /* The ARB_texture_buffer_specification says:
2134 *
2135 * "The number of texels in the buffer texture's texel array is given by
2136 *
2137 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2138 *
2139 * where <buffer_size> is the size of the buffer object, in basic
2140 * machine units and <components> and <base_type> are the element count
2141 * and base data type for elements, as specified in Table X.1. The
2142 * number of texels in the texel array is then clamped to the
2143 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2144 *
2145 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2146 * so that when ISL divides by stride to obtain the number of texels, that
2147 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2148 */
2149 unsigned final_size =
2150 MIN3(size, res->bo->size - res->offset - offset,
2151 IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2152
2153 isl_buffer_fill_state(isl_dev, map,
2154 .address = res->bo->gtt_offset + res->offset + offset,
2155 .size_B = final_size,
2156 .format = format,
2157 .swizzle = swizzle,
2158 .stride_B = cpp,
2159 .mocs = iris_mocs(res->bo, isl_dev));
2160 }
2161
2162 #define SURFACE_STATE_ALIGNMENT 64
2163
2164 /**
2165 * Allocate several contiguous SURFACE_STATE structures, one for each
2166 * supported auxiliary surface mode. This only allocates the CPU-side
2167 * copy, they will need to be uploaded later after they're filled in.
2168 */
2169 static void
2170 alloc_surface_states(struct iris_surface_state *surf_state,
2171 unsigned aux_usages)
2172 {
2173 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2174
2175 /* If this changes, update this to explicitly align pointers */
2176 STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2177
2178 assert(aux_usages != 0);
2179
2180 /* In case we're re-allocating them... */
2181 free(surf_state->cpu);
2182
2183 surf_state->num_states = util_bitcount(aux_usages);
2184 surf_state->cpu = calloc(surf_state->num_states, surf_size);
2185 surf_state->ref.offset = 0;
2186 pipe_resource_reference(&surf_state->ref.res, NULL);
2187
2188 assert(surf_state->cpu);
2189 }
2190
2191 /**
2192 * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2193 */
2194 static void
2195 upload_surface_states(struct u_upload_mgr *mgr,
2196 struct iris_surface_state *surf_state)
2197 {
2198 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2199 const unsigned bytes = surf_state->num_states * surf_size;
2200
2201 void *map =
2202 upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
2203
2204 surf_state->ref.offset +=
2205 iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
2206
2207 if (map)
2208 memcpy(map, surf_state->cpu, bytes);
2209 }
2210
2211 /**
2212 * Update resource addresses in a set of SURFACE_STATE descriptors,
2213 * and re-upload them if necessary.
2214 */
2215 static bool
2216 update_surface_state_addrs(struct u_upload_mgr *mgr,
2217 struct iris_surface_state *surf_state,
2218 struct iris_bo *bo)
2219 {
2220 if (surf_state->bo_address == bo->gtt_offset)
2221 return false;
2222
2223 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
2224 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
2225
2226 uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
2227
2228 /* First, update the CPU copies. We assume no other fields exist in
2229 * the QWord containing Surface Base Address.
2230 */
2231 for (unsigned i = 0; i < surf_state->num_states; i++) {
2232 *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
2233 ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
2234 }
2235
2236 /* Next, upload the updated copies to a GPU buffer. */
2237 upload_surface_states(mgr, surf_state);
2238
2239 surf_state->bo_address = bo->gtt_offset;
2240
2241 return true;
2242 }
2243
2244 #if GEN_GEN == 8
2245 /**
2246 * Return an ISL surface for use with non-coherent render target reads.
2247 *
2248 * In a few complex cases, we can't use the SURFACE_STATE for normal render
2249 * target writes. We need to make a separate one for sampling which refers
2250 * to the single slice of the texture being read.
2251 */
2252 static void
2253 get_rt_read_isl_surf(const struct gen_device_info *devinfo,
2254 struct iris_resource *res,
2255 enum pipe_texture_target target,
2256 struct isl_view *view,
2257 uint32_t *offset_to_tile,
2258 uint32_t *tile_x_sa,
2259 uint32_t *tile_y_sa,
2260 struct isl_surf *surf)
2261 {
2262 *surf = res->surf;
2263
2264 const enum isl_dim_layout dim_layout =
2265 iris_get_isl_dim_layout(devinfo, res->surf.tiling, target);
2266
2267 surf->dim = target_to_isl_surf_dim(target);
2268
2269 if (surf->dim_layout == dim_layout)
2270 return;
2271
2272 /* The layout of the specified texture target is not compatible with the
2273 * actual layout of the miptree structure in memory -- You're entering
2274 * dangerous territory, this can only possibly work if you only intended
2275 * to access a single level and slice of the texture, and the hardware
2276 * supports the tile offset feature in order to allow non-tile-aligned
2277 * base offsets, since we'll have to point the hardware to the first
2278 * texel of the level instead of relying on the usual base level/layer
2279 * controls.
2280 */
2281 assert(view->levels == 1 && view->array_len == 1);
2282 assert(*tile_x_sa == 0 && *tile_y_sa == 0);
2283
2284 *offset_to_tile = iris_resource_get_tile_offsets(res, view->base_level,
2285 view->base_array_layer,
2286 tile_x_sa, tile_y_sa);
2287 const unsigned l = view->base_level;
2288
2289 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
2290 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
2291 minify(surf->logical_level0_px.height, l);
2292 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
2293 minify(surf->logical_level0_px.depth, l);
2294
2295 surf->logical_level0_px.array_len = 1;
2296 surf->levels = 1;
2297 surf->dim_layout = dim_layout;
2298
2299 view->base_level = 0;
2300 view->base_array_layer = 0;
2301 }
2302 #endif
2303
2304 static void
2305 fill_surface_state(struct isl_device *isl_dev,
2306 void *map,
2307 struct iris_resource *res,
2308 struct isl_surf *surf,
2309 struct isl_view *view,
2310 unsigned aux_usage,
2311 uint32_t extra_main_offset,
2312 uint32_t tile_x_sa,
2313 uint32_t tile_y_sa)
2314 {
2315 struct isl_surf_fill_state_info f = {
2316 .surf = surf,
2317 .view = view,
2318 .mocs = iris_mocs(res->bo, isl_dev),
2319 .address = res->bo->gtt_offset + res->offset + extra_main_offset,
2320 .x_offset_sa = tile_x_sa,
2321 .y_offset_sa = tile_y_sa,
2322 };
2323
2324 assert(!iris_resource_unfinished_aux_import(res));
2325
2326 if (aux_usage != ISL_AUX_USAGE_NONE) {
2327 f.aux_surf = &res->aux.surf;
2328 f.aux_usage = aux_usage;
2329 f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
2330
2331 struct iris_bo *clear_bo = NULL;
2332 uint64_t clear_offset = 0;
2333 f.clear_color =
2334 iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
2335 if (clear_bo) {
2336 f.clear_address = clear_bo->gtt_offset + clear_offset;
2337 f.use_clear_address = isl_dev->info->gen > 9;
2338 }
2339 }
2340
2341 isl_surf_fill_state_s(isl_dev, map, &f);
2342 }
2343
2344 /**
2345 * The pipe->create_sampler_view() driver hook.
2346 */
2347 static struct pipe_sampler_view *
2348 iris_create_sampler_view(struct pipe_context *ctx,
2349 struct pipe_resource *tex,
2350 const struct pipe_sampler_view *tmpl)
2351 {
2352 struct iris_context *ice = (struct iris_context *) ctx;
2353 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2354 const struct gen_device_info *devinfo = &screen->devinfo;
2355 struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2356
2357 if (!isv)
2358 return NULL;
2359
2360 /* initialize base object */
2361 isv->base = *tmpl;
2362 isv->base.context = ctx;
2363 isv->base.texture = NULL;
2364 pipe_reference_init(&isv->base.reference, 1);
2365 pipe_resource_reference(&isv->base.texture, tex);
2366
2367 if (util_format_is_depth_or_stencil(tmpl->format)) {
2368 struct iris_resource *zres, *sres;
2369 const struct util_format_description *desc =
2370 util_format_description(tmpl->format);
2371
2372 iris_get_depth_stencil_resources(tex, &zres, &sres);
2373
2374 tex = util_format_has_depth(desc) ? &zres->base : &sres->base;
2375 }
2376
2377 isv->res = (struct iris_resource *) tex;
2378
2379 alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
2380
2381 isv->surface_state.bo_address = isv->res->bo->gtt_offset;
2382
2383 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2384
2385 if (isv->base.target == PIPE_TEXTURE_CUBE ||
2386 isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2387 usage |= ISL_SURF_USAGE_CUBE_BIT;
2388
2389 const struct iris_format_info fmt =
2390 iris_format_for_usage(devinfo, tmpl->format, usage);
2391
2392 isv->clear_color = isv->res->aux.clear_color;
2393
2394 isv->view = (struct isl_view) {
2395 .format = fmt.fmt,
2396 .swizzle = (struct isl_swizzle) {
2397 .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2398 .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2399 .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2400 .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2401 },
2402 .usage = usage,
2403 };
2404
2405 void *map = isv->surface_state.cpu;
2406
2407 /* Fill out SURFACE_STATE for this view. */
2408 if (tmpl->target != PIPE_BUFFER) {
2409 isv->view.base_level = tmpl->u.tex.first_level;
2410 isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2411 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2412 isv->view.base_array_layer = tmpl->u.tex.first_layer;
2413 isv->view.array_len =
2414 tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2415
2416 if (iris_resource_unfinished_aux_import(isv->res))
2417 iris_resource_finish_aux_import(&screen->base, isv->res);
2418
2419 unsigned aux_modes = isv->res->aux.sampler_usages;
2420 while (aux_modes) {
2421 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2422
2423 /* If we have a multisampled depth buffer, do not create a sampler
2424 * surface state with HiZ.
2425 */
2426 fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
2427 &isv->view, aux_usage, 0, 0, 0);
2428
2429 map += SURFACE_STATE_ALIGNMENT;
2430 }
2431 } else {
2432 fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
2433 isv->view.format, isv->view.swizzle,
2434 tmpl->u.buf.offset, tmpl->u.buf.size);
2435 }
2436
2437 upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
2438
2439 return &isv->base;
2440 }
2441
2442 static void
2443 iris_sampler_view_destroy(struct pipe_context *ctx,
2444 struct pipe_sampler_view *state)
2445 {
2446 struct iris_sampler_view *isv = (void *) state;
2447 pipe_resource_reference(&state->texture, NULL);
2448 pipe_resource_reference(&isv->surface_state.ref.res, NULL);
2449 free(isv->surface_state.cpu);
2450 free(isv);
2451 }
2452
2453 /**
2454 * The pipe->create_surface() driver hook.
2455 *
2456 * In Gallium nomenclature, "surfaces" are a view of a resource that
2457 * can be bound as a render target or depth/stencil buffer.
2458 */
2459 static struct pipe_surface *
2460 iris_create_surface(struct pipe_context *ctx,
2461 struct pipe_resource *tex,
2462 const struct pipe_surface *tmpl)
2463 {
2464 struct iris_context *ice = (struct iris_context *) ctx;
2465 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2466 const struct gen_device_info *devinfo = &screen->devinfo;
2467
2468 isl_surf_usage_flags_t usage = 0;
2469 if (tmpl->writable)
2470 usage = ISL_SURF_USAGE_STORAGE_BIT;
2471 else if (util_format_is_depth_or_stencil(tmpl->format))
2472 usage = ISL_SURF_USAGE_DEPTH_BIT;
2473 else
2474 usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2475
2476 const struct iris_format_info fmt =
2477 iris_format_for_usage(devinfo, tmpl->format, usage);
2478
2479 if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2480 !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2481 /* Framebuffer validation will reject this invalid case, but it
2482 * hasn't had the opportunity yet. In the meantime, we need to
2483 * avoid hitting ISL asserts about unsupported formats below.
2484 */
2485 return NULL;
2486 }
2487
2488 struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2489 struct pipe_surface *psurf = &surf->base;
2490 struct iris_resource *res = (struct iris_resource *) tex;
2491
2492 if (!surf)
2493 return NULL;
2494
2495 pipe_reference_init(&psurf->reference, 1);
2496 pipe_resource_reference(&psurf->texture, tex);
2497 psurf->context = ctx;
2498 psurf->format = tmpl->format;
2499 psurf->width = tex->width0;
2500 psurf->height = tex->height0;
2501 psurf->texture = tex;
2502 psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2503 psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2504 psurf->u.tex.level = tmpl->u.tex.level;
2505
2506 uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2507
2508 struct isl_view *view = &surf->view;
2509 *view = (struct isl_view) {
2510 .format = fmt.fmt,
2511 .base_level = tmpl->u.tex.level,
2512 .levels = 1,
2513 .base_array_layer = tmpl->u.tex.first_layer,
2514 .array_len = array_len,
2515 .swizzle = ISL_SWIZZLE_IDENTITY,
2516 .usage = usage,
2517 };
2518
2519 #if GEN_GEN == 8
2520 enum pipe_texture_target target = (tex->target == PIPE_TEXTURE_3D &&
2521 array_len == 1) ? PIPE_TEXTURE_2D :
2522 tex->target == PIPE_TEXTURE_1D_ARRAY ?
2523 PIPE_TEXTURE_2D_ARRAY : tex->target;
2524
2525 struct isl_view *read_view = &surf->read_view;
2526 *read_view = (struct isl_view) {
2527 .format = fmt.fmt,
2528 .base_level = tmpl->u.tex.level,
2529 .levels = 1,
2530 .base_array_layer = tmpl->u.tex.first_layer,
2531 .array_len = array_len,
2532 .swizzle = ISL_SWIZZLE_IDENTITY,
2533 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2534 };
2535 #endif
2536
2537 surf->clear_color = res->aux.clear_color;
2538
2539 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2540 if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2541 ISL_SURF_USAGE_STENCIL_BIT))
2542 return psurf;
2543
2544
2545 alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
2546 surf->surface_state.bo_address = res->bo->gtt_offset;
2547
2548 #if GEN_GEN == 8
2549 alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
2550 surf->surface_state_read.bo_address = res->bo->gtt_offset;
2551 #endif
2552
2553 if (!isl_format_is_compressed(res->surf.format)) {
2554 if (iris_resource_unfinished_aux_import(res))
2555 iris_resource_finish_aux_import(&screen->base, res);
2556
2557 void *map = surf->surface_state.cpu;
2558 UNUSED void *map_read = surf->surface_state_read.cpu;
2559
2560 /* This is a normal surface. Fill out a SURFACE_STATE for each possible
2561 * auxiliary surface mode and return the pipe_surface.
2562 */
2563 unsigned aux_modes = res->aux.possible_usages;
2564 while (aux_modes) {
2565 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2566 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2567 view, aux_usage, 0, 0, 0);
2568 map += SURFACE_STATE_ALIGNMENT;
2569
2570 #if GEN_GEN == 8
2571 struct isl_surf surf;
2572 uint32_t offset_to_tile = 0, tile_x_sa = 0, tile_y_sa = 0;
2573 get_rt_read_isl_surf(devinfo, res, target, read_view,
2574 &offset_to_tile, &tile_x_sa, &tile_y_sa, &surf);
2575 fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view,
2576 aux_usage, offset_to_tile, tile_x_sa, tile_y_sa);
2577 map_read += SURFACE_STATE_ALIGNMENT;
2578 #endif
2579 }
2580
2581 upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
2582
2583 #if GEN_GEN == 8
2584 upload_surface_states(ice->state.surface_uploader,
2585 &surf->surface_state_read);
2586 #endif
2587
2588 return psurf;
2589 }
2590
2591 /* The resource has a compressed format, which is not renderable, but we
2592 * have a renderable view format. We must be attempting to upload blocks
2593 * of compressed data via an uncompressed view.
2594 *
2595 * In this case, we can assume there are no auxiliary buffers, a single
2596 * miplevel, and that the resource is single-sampled. Gallium may try
2597 * and create an uncompressed view with multiple layers, however.
2598 */
2599 assert(!isl_format_is_compressed(fmt.fmt));
2600 assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
2601 assert(res->surf.samples == 1);
2602 assert(view->levels == 1);
2603
2604 struct isl_surf isl_surf;
2605 uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0;
2606
2607 if (view->base_level > 0) {
2608 /* We can't rely on the hardware's miplevel selection with such
2609 * a substantial lie about the format, so we select a single image
2610 * using the Tile X/Y Offset fields. In this case, we can't handle
2611 * multiple array slices.
2612 *
2613 * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2614 * hard-coded to align to exactly the block size of the compressed
2615 * texture. This means that, when reinterpreted as a non-compressed
2616 * texture, the tile offsets may be anything and we can't rely on
2617 * X/Y Offset.
2618 *
2619 * Return NULL to force gallium frontends to take fallback paths.
2620 */
2621 if (view->array_len > 1 || GEN_GEN == 8)
2622 return NULL;
2623
2624 const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D;
2625 isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2626 view->base_level,
2627 is_3d ? 0 : view->base_array_layer,
2628 is_3d ? view->base_array_layer : 0,
2629 &isl_surf,
2630 &offset_B, &tile_x_sa, &tile_y_sa);
2631
2632 /* We use address and tile offsets to access a single level/layer
2633 * as a subimage, so reset level/layer so it doesn't offset again.
2634 */
2635 view->base_array_layer = 0;
2636 view->base_level = 0;
2637 } else {
2638 /* Level 0 doesn't require tile offsets, and the hardware can find
2639 * array slices using QPitch even with the format override, so we
2640 * can allow layers in this case. Copy the original ISL surface.
2641 */
2642 memcpy(&isl_surf, &res->surf, sizeof(isl_surf));
2643 }
2644
2645 /* Scale down the image dimensions by the block size. */
2646 const struct isl_format_layout *fmtl =
2647 isl_format_get_layout(res->surf.format);
2648 isl_surf.format = fmt.fmt;
2649 isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf);
2650 isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf);
2651 tile_x_sa /= fmtl->bw;
2652 tile_y_sa /= fmtl->bh;
2653
2654 psurf->width = isl_surf.logical_level0_px.width;
2655 psurf->height = isl_surf.logical_level0_px.height;
2656
2657 struct isl_surf_fill_state_info f = {
2658 .surf = &isl_surf,
2659 .view = view,
2660 .mocs = iris_mocs(res->bo, &screen->isl_dev),
2661 .address = res->bo->gtt_offset + offset_B,
2662 .x_offset_sa = tile_x_sa,
2663 .y_offset_sa = tile_y_sa,
2664 };
2665
2666 isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
2667
2668 upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
2669
2670 return psurf;
2671 }
2672
2673 #if GEN_GEN < 9
2674 static void
2675 fill_default_image_param(struct brw_image_param *param)
2676 {
2677 memset(param, 0, sizeof(*param));
2678 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2679 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2680 * detailed explanation of these parameters.
2681 */
2682 param->swizzling[0] = 0xff;
2683 param->swizzling[1] = 0xff;
2684 }
2685
2686 static void
2687 fill_buffer_image_param(struct brw_image_param *param,
2688 enum pipe_format pfmt,
2689 unsigned size)
2690 {
2691 const unsigned cpp = util_format_get_blocksize(pfmt);
2692
2693 fill_default_image_param(param);
2694 param->size[0] = size / cpp;
2695 param->stride[0] = cpp;
2696 }
2697 #else
2698 #define isl_surf_fill_image_param(x, ...)
2699 #define fill_default_image_param(x, ...)
2700 #define fill_buffer_image_param(x, ...)
2701 #endif
2702
2703 /**
2704 * The pipe->set_shader_images() driver hook.
2705 */
2706 static void
2707 iris_set_shader_images(struct pipe_context *ctx,
2708 enum pipe_shader_type p_stage,
2709 unsigned start_slot, unsigned count,
2710 const struct pipe_image_view *p_images)
2711 {
2712 struct iris_context *ice = (struct iris_context *) ctx;
2713 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2714 gl_shader_stage stage = stage_from_pipe(p_stage);
2715 struct iris_shader_state *shs = &ice->state.shaders[stage];
2716 #if GEN_GEN == 8
2717 struct iris_genx_state *genx = ice->state.genx;
2718 struct brw_image_param *image_params = genx->shaders[stage].image_param;
2719 #endif
2720
2721 shs->bound_image_views &= ~u_bit_consecutive(start_slot, count);
2722
2723 for (unsigned i = 0; i < count; i++) {
2724 struct iris_image_view *iv = &shs->image[start_slot + i];
2725
2726 if (p_images && p_images[i].resource) {
2727 const struct pipe_image_view *img = &p_images[i];
2728 struct iris_resource *res = (void *) img->resource;
2729
2730 util_copy_image_view(&iv->base, img);
2731
2732 shs->bound_image_views |= 1 << (start_slot + i);
2733
2734 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2735 res->bind_stages |= 1 << stage;
2736
2737 enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
2738
2739 /* Render compression with images supported on gen12+ only. */
2740 unsigned aux_usages = GEN_GEN >= 12 ? res->aux.possible_usages :
2741 1 << ISL_AUX_USAGE_NONE;
2742
2743 alloc_surface_states(&iv->surface_state, aux_usages);
2744 iv->surface_state.bo_address = res->bo->gtt_offset;
2745
2746 void *map = iv->surface_state.cpu;
2747
2748 if (res->base.target != PIPE_BUFFER) {
2749 struct isl_view view = {
2750 .format = isl_fmt,
2751 .base_level = img->u.tex.level,
2752 .levels = 1,
2753 .base_array_layer = img->u.tex.first_layer,
2754 .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2755 .swizzle = ISL_SWIZZLE_IDENTITY,
2756 .usage = ISL_SURF_USAGE_STORAGE_BIT,
2757 };
2758
2759 /* If using untyped fallback. */
2760 if (isl_fmt == ISL_FORMAT_RAW) {
2761 fill_buffer_surface_state(&screen->isl_dev, res, map,
2762 isl_fmt, ISL_SWIZZLE_IDENTITY,
2763 0, res->bo->size);
2764 } else {
2765 unsigned aux_modes = aux_usages;
2766 while (aux_modes) {
2767 enum isl_aux_usage usage = u_bit_scan(&aux_modes);
2768
2769 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2770 &view, usage, 0, 0, 0);
2771
2772 map += SURFACE_STATE_ALIGNMENT;
2773 }
2774 }
2775
2776 isl_surf_fill_image_param(&screen->isl_dev,
2777 &image_params[start_slot + i],
2778 &res->surf, &view);
2779 } else {
2780 util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset,
2781 img->u.buf.offset + img->u.buf.size);
2782
2783 fill_buffer_surface_state(&screen->isl_dev, res, map,
2784 isl_fmt, ISL_SWIZZLE_IDENTITY,
2785 img->u.buf.offset, img->u.buf.size);
2786 fill_buffer_image_param(&image_params[start_slot + i],
2787 img->format, img->u.buf.size);
2788 }
2789
2790 upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
2791 } else {
2792 pipe_resource_reference(&iv->base.resource, NULL);
2793 pipe_resource_reference(&iv->surface_state.ref.res, NULL);
2794 fill_default_image_param(&image_params[start_slot + i]);
2795 }
2796 }
2797
2798 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
2799 ice->state.dirty |=
2800 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2801 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2802
2803 /* Broadwell also needs brw_image_params re-uploaded */
2804 if (GEN_GEN < 9) {
2805 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
2806 shs->sysvals_need_upload = true;
2807 }
2808 }
2809
2810
2811 /**
2812 * The pipe->set_sampler_views() driver hook.
2813 */
2814 static void
2815 iris_set_sampler_views(struct pipe_context *ctx,
2816 enum pipe_shader_type p_stage,
2817 unsigned start, unsigned count,
2818 struct pipe_sampler_view **views)
2819 {
2820 struct iris_context *ice = (struct iris_context *) ctx;
2821 gl_shader_stage stage = stage_from_pipe(p_stage);
2822 struct iris_shader_state *shs = &ice->state.shaders[stage];
2823
2824 shs->bound_sampler_views &= ~u_bit_consecutive(start, count);
2825
2826 for (unsigned i = 0; i < count; i++) {
2827 struct pipe_sampler_view *pview = views ? views[i] : NULL;
2828 pipe_sampler_view_reference((struct pipe_sampler_view **)
2829 &shs->textures[start + i], pview);
2830 struct iris_sampler_view *view = (void *) pview;
2831 if (view) {
2832 view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2833 view->res->bind_stages |= 1 << stage;
2834
2835 shs->bound_sampler_views |= 1 << (start + i);
2836
2837 update_surface_state_addrs(ice->state.surface_uploader,
2838 &view->surface_state, view->res->bo);
2839 }
2840 }
2841
2842 ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
2843 ice->state.dirty |=
2844 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2845 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2846 }
2847
2848 /**
2849 * The pipe->set_tess_state() driver hook.
2850 */
2851 static void
2852 iris_set_tess_state(struct pipe_context *ctx,
2853 const float default_outer_level[4],
2854 const float default_inner_level[2])
2855 {
2856 struct iris_context *ice = (struct iris_context *) ctx;
2857 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
2858
2859 memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
2860 memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
2861
2862 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
2863 shs->sysvals_need_upload = true;
2864 }
2865
2866 static void
2867 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
2868 {
2869 struct iris_surface *surf = (void *) p_surf;
2870 pipe_resource_reference(&p_surf->texture, NULL);
2871 pipe_resource_reference(&surf->surface_state.ref.res, NULL);
2872 pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
2873 free(surf->surface_state.cpu);
2874 free(surf);
2875 }
2876
2877 static void
2878 iris_set_clip_state(struct pipe_context *ctx,
2879 const struct pipe_clip_state *state)
2880 {
2881 struct iris_context *ice = (struct iris_context *) ctx;
2882 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
2883 struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
2884 struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
2885
2886 memcpy(&ice->state.clip_planes, state, sizeof(*state));
2887
2888 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
2889 IRIS_STAGE_DIRTY_CONSTANTS_GS |
2890 IRIS_STAGE_DIRTY_CONSTANTS_TES;
2891 shs->sysvals_need_upload = true;
2892 gshs->sysvals_need_upload = true;
2893 tshs->sysvals_need_upload = true;
2894 }
2895
2896 /**
2897 * The pipe->set_polygon_stipple() driver hook.
2898 */
2899 static void
2900 iris_set_polygon_stipple(struct pipe_context *ctx,
2901 const struct pipe_poly_stipple *state)
2902 {
2903 struct iris_context *ice = (struct iris_context *) ctx;
2904 memcpy(&ice->state.poly_stipple, state, sizeof(*state));
2905 ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
2906 }
2907
2908 /**
2909 * The pipe->set_sample_mask() driver hook.
2910 */
2911 static void
2912 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
2913 {
2914 struct iris_context *ice = (struct iris_context *) ctx;
2915
2916 /* We only support 16x MSAA, so we have 16 bits of sample maks.
2917 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2918 */
2919 ice->state.sample_mask = sample_mask & 0xffff;
2920 ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
2921 }
2922
2923 /**
2924 * The pipe->set_scissor_states() driver hook.
2925 *
2926 * This corresponds to our SCISSOR_RECT state structures. It's an
2927 * exact match, so we just store them, and memcpy them out later.
2928 */
2929 static void
2930 iris_set_scissor_states(struct pipe_context *ctx,
2931 unsigned start_slot,
2932 unsigned num_scissors,
2933 const struct pipe_scissor_state *rects)
2934 {
2935 struct iris_context *ice = (struct iris_context *) ctx;
2936
2937 for (unsigned i = 0; i < num_scissors; i++) {
2938 if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
2939 /* If the scissor was out of bounds and got clamped to 0 width/height
2940 * at the bounds, the subtraction of 1 from maximums could produce a
2941 * negative number and thus not clip anything. Instead, just provide
2942 * a min > max scissor inside the bounds, which produces the expected
2943 * no rendering.
2944 */
2945 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
2946 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
2947 };
2948 } else {
2949 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
2950 .minx = rects[i].minx, .miny = rects[i].miny,
2951 .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
2952 };
2953 }
2954 }
2955
2956 ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
2957 }
2958
2959 /**
2960 * The pipe->set_stencil_ref() driver hook.
2961 *
2962 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
2963 */
2964 static void
2965 iris_set_stencil_ref(struct pipe_context *ctx,
2966 const struct pipe_stencil_ref *state)
2967 {
2968 struct iris_context *ice = (struct iris_context *) ctx;
2969 memcpy(&ice->state.stencil_ref, state, sizeof(*state));
2970 if (GEN_GEN >= 12)
2971 ice->state.dirty |= IRIS_DIRTY_STENCIL_REF;
2972 else if (GEN_GEN >= 9)
2973 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
2974 else
2975 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
2976 }
2977
2978 static float
2979 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
2980 {
2981 return copysignf(state->scale[axis], sign) + state->translate[axis];
2982 }
2983
2984 /**
2985 * The pipe->set_viewport_states() driver hook.
2986 *
2987 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
2988 * the guardband yet, as we need the framebuffer dimensions, but we can
2989 * at least fill out the rest.
2990 */
2991 static void
2992 iris_set_viewport_states(struct pipe_context *ctx,
2993 unsigned start_slot,
2994 unsigned count,
2995 const struct pipe_viewport_state *states)
2996 {
2997 struct iris_context *ice = (struct iris_context *) ctx;
2998
2999 memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
3000
3001 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3002
3003 if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
3004 !ice->state.cso_rast->depth_clip_far))
3005 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
3006 }
3007
3008 /**
3009 * The pipe->set_framebuffer_state() driver hook.
3010 *
3011 * Sets the current draw FBO, including color render targets, depth,
3012 * and stencil buffers.
3013 */
3014 static void
3015 iris_set_framebuffer_state(struct pipe_context *ctx,
3016 const struct pipe_framebuffer_state *state)
3017 {
3018 struct iris_context *ice = (struct iris_context *) ctx;
3019 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3020 struct isl_device *isl_dev = &screen->isl_dev;
3021 struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
3022 struct iris_resource *zres;
3023 struct iris_resource *stencil_res;
3024
3025 unsigned samples = util_framebuffer_get_num_samples(state);
3026 unsigned layers = util_framebuffer_get_num_layers(state);
3027
3028 if (cso->samples != samples) {
3029 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
3030
3031 /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3032 if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
3033 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
3034 }
3035
3036 if (cso->nr_cbufs != state->nr_cbufs) {
3037 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
3038 }
3039
3040 if ((cso->layers == 0) != (layers == 0)) {
3041 ice->state.dirty |= IRIS_DIRTY_CLIP;
3042 }
3043
3044 if (cso->width != state->width || cso->height != state->height) {
3045 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3046 }
3047
3048 if (cso->zsbuf || state->zsbuf) {
3049 ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
3050 }
3051
3052 util_copy_framebuffer_state(cso, state);
3053 cso->samples = samples;
3054 cso->layers = layers;
3055
3056 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3057
3058 struct isl_view view = {
3059 .base_level = 0,
3060 .levels = 1,
3061 .base_array_layer = 0,
3062 .array_len = 1,
3063 .swizzle = ISL_SWIZZLE_IDENTITY,
3064 };
3065
3066 struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
3067
3068 if (cso->zsbuf) {
3069 iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
3070 &stencil_res);
3071
3072 view.base_level = cso->zsbuf->u.tex.level;
3073 view.base_array_layer = cso->zsbuf->u.tex.first_layer;
3074 view.array_len =
3075 cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
3076
3077 if (zres) {
3078 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
3079
3080 info.depth_surf = &zres->surf;
3081 info.depth_address = zres->bo->gtt_offset + zres->offset;
3082 info.mocs = iris_mocs(zres->bo, isl_dev);
3083
3084 view.format = zres->surf.format;
3085
3086 if (iris_resource_level_has_hiz(zres, view.base_level)) {
3087 info.hiz_usage = zres->aux.usage;
3088 info.hiz_surf = &zres->aux.surf;
3089 info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
3090 }
3091 }
3092
3093 if (stencil_res) {
3094 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3095 info.stencil_aux_usage = stencil_res->aux.usage;
3096 info.stencil_surf = &stencil_res->surf;
3097 info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
3098 if (!zres) {
3099 view.format = stencil_res->surf.format;
3100 info.mocs = iris_mocs(stencil_res->bo, isl_dev);
3101 }
3102 }
3103 }
3104
3105 isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3106
3107 /* Make a null surface for unbound buffers */
3108 void *null_surf_map =
3109 upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3110 4 * GENX(RENDER_SURFACE_STATE_length), 64);
3111 isl_null_fill_state(&screen->isl_dev, null_surf_map,
3112 isl_extent3d(MAX2(cso->width, 1),
3113 MAX2(cso->height, 1),
3114 cso->layers ? cso->layers : 1));
3115 ice->state.null_fb.offset +=
3116 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3117
3118 /* Render target change */
3119 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
3120
3121 ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
3122
3123 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3124
3125 ice->state.stage_dirty |=
3126 ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3127
3128 if (GEN_GEN == 8)
3129 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3130 }
3131
3132 /**
3133 * The pipe->set_constant_buffer() driver hook.
3134 *
3135 * This uploads any constant data in user buffers, and references
3136 * any UBO resources containing constant data.
3137 */
3138 static void
3139 iris_set_constant_buffer(struct pipe_context *ctx,
3140 enum pipe_shader_type p_stage, unsigned index,
3141 const struct pipe_constant_buffer *input)
3142 {
3143 struct iris_context *ice = (struct iris_context *) ctx;
3144 gl_shader_stage stage = stage_from_pipe(p_stage);
3145 struct iris_shader_state *shs = &ice->state.shaders[stage];
3146 struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3147
3148 /* TODO: Only do this if the buffer changes? */
3149 pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3150
3151 if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3152 shs->bound_cbufs |= 1u << index;
3153
3154 if (input->user_buffer) {
3155 void *map = NULL;
3156 pipe_resource_reference(&cbuf->buffer, NULL);
3157 u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3158 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3159
3160 if (!cbuf->buffer) {
3161 /* Allocation was unsuccessful - just unbind */
3162 iris_set_constant_buffer(ctx, p_stage, index, NULL);
3163 return;
3164 }
3165
3166 assert(map);
3167 memcpy(map, input->user_buffer, input->buffer_size);
3168 } else if (input->buffer) {
3169 pipe_resource_reference(&cbuf->buffer, input->buffer);
3170
3171 cbuf->buffer_offset = input->buffer_offset;
3172 }
3173
3174 cbuf->buffer_size =
3175 MIN2(input->buffer_size,
3176 iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3177
3178 struct iris_resource *res = (void *) cbuf->buffer;
3179 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3180 res->bind_stages |= 1 << stage;
3181 } else {
3182 shs->bound_cbufs &= ~(1u << index);
3183 pipe_resource_reference(&cbuf->buffer, NULL);
3184 }
3185
3186 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
3187 }
3188
3189 static void
3190 upload_sysvals(struct iris_context *ice,
3191 gl_shader_stage stage)
3192 {
3193 UNUSED struct iris_genx_state *genx = ice->state.genx;
3194 struct iris_shader_state *shs = &ice->state.shaders[stage];
3195
3196 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3197 if (!shader || shader->num_system_values == 0)
3198 return;
3199
3200 assert(shader->num_cbufs > 0);
3201
3202 unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3203 struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3204 unsigned upload_size = shader->num_system_values * sizeof(uint32_t);
3205 uint32_t *map = NULL;
3206
3207 assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3208 u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3209 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3210
3211 for (int i = 0; i < shader->num_system_values; i++) {
3212 uint32_t sysval = shader->system_values[i];
3213 uint32_t value = 0;
3214
3215 if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3216 #if GEN_GEN == 8
3217 unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3218 unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3219 struct brw_image_param *param =
3220 &genx->shaders[stage].image_param[img];
3221
3222 assert(offset < sizeof(struct brw_image_param));
3223 value = ((uint32_t *) param)[offset];
3224 #endif
3225 } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3226 value = 0;
3227 } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3228 int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3229 int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3230 value = fui(ice->state.clip_planes.ucp[plane][comp]);
3231 } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3232 if (stage == MESA_SHADER_TESS_CTRL) {
3233 value = ice->state.vertices_per_patch;
3234 } else {
3235 assert(stage == MESA_SHADER_TESS_EVAL);
3236 const struct shader_info *tcs_info =
3237 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3238 if (tcs_info)
3239 value = tcs_info->tess.tcs_vertices_out;
3240 else
3241 value = ice->state.vertices_per_patch;
3242 }
3243 } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3244 sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3245 unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3246 value = fui(ice->state.default_outer_level[i]);
3247 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3248 value = fui(ice->state.default_inner_level[0]);
3249 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3250 value = fui(ice->state.default_inner_level[1]);
3251 } else if (sysval >= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X &&
3252 sysval <= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z) {
3253 unsigned i = sysval - BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X;
3254 value = ice->state.last_block[i];
3255 } else {
3256 assert(!"unhandled system value");
3257 }
3258
3259 *map++ = value;
3260 }
3261
3262 cbuf->buffer_size = upload_size;
3263 iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3264 &shs->constbuf_surf_state[sysval_cbuf_index], false);
3265
3266 shs->sysvals_need_upload = false;
3267 }
3268
3269 /**
3270 * The pipe->set_shader_buffers() driver hook.
3271 *
3272 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
3273 * SURFACE_STATE here, as the buffer offset may change each time.
3274 */
3275 static void
3276 iris_set_shader_buffers(struct pipe_context *ctx,
3277 enum pipe_shader_type p_stage,
3278 unsigned start_slot, unsigned count,
3279 const struct pipe_shader_buffer *buffers,
3280 unsigned writable_bitmask)
3281 {
3282 struct iris_context *ice = (struct iris_context *) ctx;
3283 gl_shader_stage stage = stage_from_pipe(p_stage);
3284 struct iris_shader_state *shs = &ice->state.shaders[stage];
3285
3286 unsigned modified_bits = u_bit_consecutive(start_slot, count);
3287
3288 shs->bound_ssbos &= ~modified_bits;
3289 shs->writable_ssbos &= ~modified_bits;
3290 shs->writable_ssbos |= writable_bitmask << start_slot;
3291
3292 for (unsigned i = 0; i < count; i++) {
3293 if (buffers && buffers[i].buffer) {
3294 struct iris_resource *res = (void *) buffers[i].buffer;
3295 struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3296 struct iris_state_ref *surf_state =
3297 &shs->ssbo_surf_state[start_slot + i];
3298 pipe_resource_reference(&ssbo->buffer, &res->base);
3299 ssbo->buffer_offset = buffers[i].buffer_offset;
3300 ssbo->buffer_size =
3301 MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3302
3303 shs->bound_ssbos |= 1 << (start_slot + i);
3304
3305 iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true);
3306
3307 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3308 res->bind_stages |= 1 << stage;
3309
3310 util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
3311 ssbo->buffer_offset + ssbo->buffer_size);
3312 } else {
3313 pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3314 pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3315 NULL);
3316 }
3317 }
3318
3319 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
3320 }
3321
3322 static void
3323 iris_delete_state(struct pipe_context *ctx, void *state)
3324 {
3325 free(state);
3326 }
3327
3328 /**
3329 * The pipe->set_vertex_buffers() driver hook.
3330 *
3331 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3332 */
3333 static void
3334 iris_set_vertex_buffers(struct pipe_context *ctx,
3335 unsigned start_slot, unsigned count,
3336 const struct pipe_vertex_buffer *buffers)
3337 {
3338 struct iris_context *ice = (struct iris_context *) ctx;
3339 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3340 struct iris_genx_state *genx = ice->state.genx;
3341
3342 ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
3343
3344 for (unsigned i = 0; i < count; i++) {
3345 const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3346 struct iris_vertex_buffer_state *state =
3347 &genx->vertex_buffers[start_slot + i];
3348
3349 if (!buffer) {
3350 pipe_resource_reference(&state->resource, NULL);
3351 continue;
3352 }
3353
3354 /* We may see user buffers that are NULL bindings. */
3355 assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3356
3357 pipe_resource_reference(&state->resource, buffer->buffer.resource);
3358 struct iris_resource *res = (void *) state->resource;
3359
3360 state->offset = (int) buffer->buffer_offset;
3361
3362 if (res) {
3363 ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3364 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3365 }
3366
3367 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3368 vb.VertexBufferIndex = start_slot + i;
3369 vb.AddressModifyEnable = true;
3370 vb.BufferPitch = buffer->stride;
3371 if (res) {
3372 vb.BufferSize = res->base.width0 - (int) buffer->buffer_offset;
3373 vb.BufferStartingAddress =
3374 ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
3375 vb.MOCS = iris_mocs(res->bo, &screen->isl_dev);
3376 } else {
3377 vb.NullVertexBuffer = true;
3378 }
3379 }
3380 }
3381
3382 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3383 }
3384
3385 /**
3386 * Gallium CSO for vertex elements.
3387 */
3388 struct iris_vertex_element_state {
3389 uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3390 uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3391 uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3392 uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3393 unsigned count;
3394 };
3395
3396 /**
3397 * The pipe->create_vertex_elements() driver hook.
3398 *
3399 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3400 * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3401 * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3402 * needed. In these cases we will need information available at draw time.
3403 * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3404 * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3405 * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3406 */
3407 static void *
3408 iris_create_vertex_elements(struct pipe_context *ctx,
3409 unsigned count,
3410 const struct pipe_vertex_element *state)
3411 {
3412 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3413 const struct gen_device_info *devinfo = &screen->devinfo;
3414 struct iris_vertex_element_state *cso =
3415 malloc(sizeof(struct iris_vertex_element_state));
3416
3417 cso->count = count;
3418
3419 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3420 ve.DWordLength =
3421 1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3422 }
3423
3424 uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3425 uint32_t *vfi_pack_dest = cso->vf_instancing;
3426
3427 if (count == 0) {
3428 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3429 ve.Valid = true;
3430 ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3431 ve.Component0Control = VFCOMP_STORE_0;
3432 ve.Component1Control = VFCOMP_STORE_0;
3433 ve.Component2Control = VFCOMP_STORE_0;
3434 ve.Component3Control = VFCOMP_STORE_1_FP;
3435 }
3436
3437 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3438 }
3439 }
3440
3441 for (int i = 0; i < count; i++) {
3442 const struct iris_format_info fmt =
3443 iris_format_for_usage(devinfo, state[i].src_format, 0);
3444 unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3445 VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3446
3447 switch (isl_format_get_num_channels(fmt.fmt)) {
3448 case 0: comp[0] = VFCOMP_STORE_0; /* fallthrough */
3449 case 1: comp[1] = VFCOMP_STORE_0; /* fallthrough */
3450 case 2: comp[2] = VFCOMP_STORE_0; /* fallthrough */
3451 case 3:
3452 comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3453 : VFCOMP_STORE_1_FP;
3454 break;
3455 }
3456 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3457 ve.EdgeFlagEnable = false;
3458 ve.VertexBufferIndex = state[i].vertex_buffer_index;
3459 ve.Valid = true;
3460 ve.SourceElementOffset = state[i].src_offset;
3461 ve.SourceElementFormat = fmt.fmt;
3462 ve.Component0Control = comp[0];
3463 ve.Component1Control = comp[1];
3464 ve.Component2Control = comp[2];
3465 ve.Component3Control = comp[3];
3466 }
3467
3468 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3469 vi.VertexElementIndex = i;
3470 vi.InstancingEnable = state[i].instance_divisor > 0;
3471 vi.InstanceDataStepRate = state[i].instance_divisor;
3472 }
3473
3474 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3475 vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3476 }
3477
3478 /* An alternative version of the last VE and VFI is stored so it
3479 * can be used at draw time in case Vertex Shader uses EdgeFlag
3480 */
3481 if (count) {
3482 const unsigned edgeflag_index = count - 1;
3483 const struct iris_format_info fmt =
3484 iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3485 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3486 ve.EdgeFlagEnable = true ;
3487 ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3488 ve.Valid = true;
3489 ve.SourceElementOffset = state[edgeflag_index].src_offset;
3490 ve.SourceElementFormat = fmt.fmt;
3491 ve.Component0Control = VFCOMP_STORE_SRC;
3492 ve.Component1Control = VFCOMP_STORE_0;
3493 ve.Component2Control = VFCOMP_STORE_0;
3494 ve.Component3Control = VFCOMP_STORE_0;
3495 }
3496 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3497 /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3498 * at draw time, as it should change if SGVs are emitted.
3499 */
3500 vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3501 vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3502 }
3503 }
3504
3505 return cso;
3506 }
3507
3508 /**
3509 * The pipe->bind_vertex_elements_state() driver hook.
3510 */
3511 static void
3512 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3513 {
3514 struct iris_context *ice = (struct iris_context *) ctx;
3515 struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3516 struct iris_vertex_element_state *new_cso = state;
3517
3518 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3519 * we need to re-emit it to ensure we're overriding the right one.
3520 */
3521 if (new_cso && cso_changed(count))
3522 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3523
3524 ice->state.cso_vertex_elements = state;
3525 ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3526 }
3527
3528 /**
3529 * The pipe->create_stream_output_target() driver hook.
3530 *
3531 * "Target" here refers to a destination buffer. We translate this into
3532 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
3533 * know which buffer this represents, or whether we ought to zero the
3534 * write-offsets, or append. Those are handled in the set() hook.
3535 */
3536 static struct pipe_stream_output_target *
3537 iris_create_stream_output_target(struct pipe_context *ctx,
3538 struct pipe_resource *p_res,
3539 unsigned buffer_offset,
3540 unsigned buffer_size)
3541 {
3542 struct iris_resource *res = (void *) p_res;
3543 struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3544 if (!cso)
3545 return NULL;
3546
3547 res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3548
3549 pipe_reference_init(&cso->base.reference, 1);
3550 pipe_resource_reference(&cso->base.buffer, p_res);
3551 cso->base.buffer_offset = buffer_offset;
3552 cso->base.buffer_size = buffer_size;
3553 cso->base.context = ctx;
3554
3555 util_range_add(&res->base, &res->valid_buffer_range, buffer_offset,
3556 buffer_offset + buffer_size);
3557
3558 upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
3559
3560 return &cso->base;
3561 }
3562
3563 static void
3564 iris_stream_output_target_destroy(struct pipe_context *ctx,
3565 struct pipe_stream_output_target *state)
3566 {
3567 struct iris_stream_output_target *cso = (void *) state;
3568
3569 pipe_resource_reference(&cso->base.buffer, NULL);
3570 pipe_resource_reference(&cso->offset.res, NULL);
3571
3572 free(cso);
3573 }
3574
3575 /**
3576 * The pipe->set_stream_output_targets() driver hook.
3577 *
3578 * At this point, we know which targets are bound to a particular index,
3579 * and also whether we want to append or start over. We can finish the
3580 * 3DSTATE_SO_BUFFER packets we started earlier.
3581 */
3582 static void
3583 iris_set_stream_output_targets(struct pipe_context *ctx,
3584 unsigned num_targets,
3585 struct pipe_stream_output_target **targets,
3586 const unsigned *offsets)
3587 {
3588 struct iris_context *ice = (struct iris_context *) ctx;
3589 struct iris_genx_state *genx = ice->state.genx;
3590 uint32_t *so_buffers = genx->so_buffers;
3591 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3592
3593 const bool active = num_targets > 0;
3594 if (ice->state.streamout_active != active) {
3595 ice->state.streamout_active = active;
3596 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3597
3598 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3599 * it's a non-pipelined command. If we're switching streamout on, we
3600 * may have missed emitting it earlier, so do so now. (We're already
3601 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3602 */
3603 if (active) {
3604 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3605 } else {
3606 uint32_t flush = 0;
3607 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3608 struct iris_stream_output_target *tgt =
3609 (void *) ice->state.so_target[i];
3610 if (tgt) {
3611 struct iris_resource *res = (void *) tgt->base.buffer;
3612
3613 flush |= iris_flush_bits_for_history(res);
3614 iris_dirty_for_history(ice, res);
3615 }
3616 }
3617 iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3618 "make streamout results visible", flush);
3619 }
3620 }
3621
3622 for (int i = 0; i < 4; i++) {
3623 pipe_so_target_reference(&ice->state.so_target[i],
3624 i < num_targets ? targets[i] : NULL);
3625 }
3626
3627 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3628 if (!active)
3629 return;
3630
3631 for (unsigned i = 0; i < 4; i++,
3632 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3633
3634 struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3635 unsigned offset = offsets[i];
3636
3637 if (!tgt) {
3638 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3639 #if GEN_GEN < 12
3640 sob.SOBufferIndex = i;
3641 #else
3642 sob._3DCommandOpcode = 0;
3643 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3644 #endif
3645 }
3646 continue;
3647 }
3648
3649 struct iris_resource *res = (void *) tgt->base.buffer;
3650
3651 /* Note that offsets[i] will either be 0, causing us to zero
3652 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3653 * "continue appending at the existing offset."
3654 */
3655 assert(offset == 0 || offset == 0xFFFFFFFF);
3656
3657 /* We might be called by Begin (offset = 0), Pause, then Resume
3658 * (offset = 0xFFFFFFFF) before ever drawing (where these commands
3659 * will actually be sent to the GPU). In this case, we don't want
3660 * to append - we still want to do our initial zeroing.
3661 */
3662 if (!tgt->zeroed)
3663 offset = 0;
3664
3665 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3666 #if GEN_GEN < 12
3667 sob.SOBufferIndex = i;
3668 #else
3669 sob._3DCommandOpcode = 0;
3670 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3671 #endif
3672 sob.SurfaceBaseAddress =
3673 rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset,
3674 IRIS_DOMAIN_OTHER_WRITE);
3675 sob.SOBufferEnable = true;
3676 sob.StreamOffsetWriteEnable = true;
3677 sob.StreamOutputBufferOffsetAddressEnable = true;
3678 sob.MOCS = iris_mocs(res->bo, &screen->isl_dev);
3679
3680 sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3681 sob.StreamOffset = offset;
3682 sob.StreamOutputBufferOffsetAddress =
3683 rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
3684 tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
3685 }
3686 }
3687
3688 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3689 }
3690
3691 /**
3692 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3693 * 3DSTATE_STREAMOUT packets.
3694 *
3695 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3696 * hardware to record. We can create it entirely based on the shader, with
3697 * no dynamic state dependencies.
3698 *
3699 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3700 * state-based settings. We capture the shader-related ones here, and merge
3701 * the rest in at draw time.
3702 */
3703 static uint32_t *
3704 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3705 const struct brw_vue_map *vue_map)
3706 {
3707 struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3708 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3709 int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3710 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3711 int max_decls = 0;
3712 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3713
3714 memset(so_decl, 0, sizeof(so_decl));
3715
3716 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3717 * command feels strange -- each dword pair contains a SO_DECL per stream.
3718 */
3719 for (unsigned i = 0; i < info->num_outputs; i++) {
3720 const struct pipe_stream_output *output = &info->output[i];
3721 const int buffer = output->output_buffer;
3722 const int varying = output->register_index;
3723 const unsigned stream_id = output->stream;
3724 assert(stream_id < MAX_VERTEX_STREAMS);
3725
3726 buffer_mask[stream_id] |= 1 << buffer;
3727
3728 assert(vue_map->varying_to_slot[varying] >= 0);
3729
3730 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3731 * array. Instead, it simply increments DstOffset for the following
3732 * input by the number of components that should be skipped.
3733 *
3734 * Our hardware is unusual in that it requires us to program SO_DECLs
3735 * for fake "hole" components, rather than simply taking the offset
3736 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3737 * program as many size = 4 holes as we can, then a final hole to
3738 * accommodate the final 1, 2, or 3 remaining.
3739 */
3740 int skip_components = output->dst_offset - next_offset[buffer];
3741
3742 while (skip_components > 0) {
3743 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3744 .HoleFlag = 1,
3745 .OutputBufferSlot = output->output_buffer,
3746 .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
3747 };
3748 skip_components -= 4;
3749 }
3750
3751 next_offset[buffer] = output->dst_offset + output->num_components;
3752
3753 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3754 .OutputBufferSlot = output->output_buffer,
3755 .RegisterIndex = vue_map->varying_to_slot[varying],
3756 .ComponentMask =
3757 ((1 << output->num_components) - 1) << output->start_component,
3758 };
3759
3760 if (decls[stream_id] > max_decls)
3761 max_decls = decls[stream_id];
3762 }
3763
3764 unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
3765 uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
3766 uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
3767
3768 iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
3769 int urb_entry_read_offset = 0;
3770 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
3771 urb_entry_read_offset;
3772
3773 /* We always read the whole vertex. This could be reduced at some
3774 * point by reading less and offsetting the register index in the
3775 * SO_DECLs.
3776 */
3777 sol.Stream0VertexReadOffset = urb_entry_read_offset;
3778 sol.Stream0VertexReadLength = urb_entry_read_length - 1;
3779 sol.Stream1VertexReadOffset = urb_entry_read_offset;
3780 sol.Stream1VertexReadLength = urb_entry_read_length - 1;
3781 sol.Stream2VertexReadOffset = urb_entry_read_offset;
3782 sol.Stream2VertexReadLength = urb_entry_read_length - 1;
3783 sol.Stream3VertexReadOffset = urb_entry_read_offset;
3784 sol.Stream3VertexReadLength = urb_entry_read_length - 1;
3785
3786 /* Set buffer pitches; 0 means unbound. */
3787 sol.Buffer0SurfacePitch = 4 * info->stride[0];
3788 sol.Buffer1SurfacePitch = 4 * info->stride[1];
3789 sol.Buffer2SurfacePitch = 4 * info->stride[2];
3790 sol.Buffer3SurfacePitch = 4 * info->stride[3];
3791 }
3792
3793 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
3794 list.DWordLength = 3 + 2 * max_decls - 2;
3795 list.StreamtoBufferSelects0 = buffer_mask[0];
3796 list.StreamtoBufferSelects1 = buffer_mask[1];
3797 list.StreamtoBufferSelects2 = buffer_mask[2];
3798 list.StreamtoBufferSelects3 = buffer_mask[3];
3799 list.NumEntries0 = decls[0];
3800 list.NumEntries1 = decls[1];
3801 list.NumEntries2 = decls[2];
3802 list.NumEntries3 = decls[3];
3803 }
3804
3805 for (int i = 0; i < max_decls; i++) {
3806 iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
3807 entry.Stream0Decl = so_decl[0][i];
3808 entry.Stream1Decl = so_decl[1][i];
3809 entry.Stream2Decl = so_decl[2][i];
3810 entry.Stream3Decl = so_decl[3][i];
3811 }
3812 }
3813
3814 return map;
3815 }
3816
3817 static void
3818 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
3819 const struct brw_vue_map *last_vue_map,
3820 bool two_sided_color,
3821 unsigned *out_offset,
3822 unsigned *out_length)
3823 {
3824 /* The compiler computes the first URB slot without considering COL/BFC
3825 * swizzling (because it doesn't know whether it's enabled), so we need
3826 * to do that here too. This may result in a smaller offset, which
3827 * should be safe.
3828 */
3829 const unsigned first_slot =
3830 brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
3831
3832 /* This becomes the URB read offset (counted in pairs of slots). */
3833 assert(first_slot % 2 == 0);
3834 *out_offset = first_slot / 2;
3835
3836 /* We need to adjust the inputs read to account for front/back color
3837 * swizzling, as it can make the URB length longer.
3838 */
3839 for (int c = 0; c <= 1; c++) {
3840 if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
3841 /* If two sided color is enabled, the fragment shader's gl_Color
3842 * (COL0) input comes from either the gl_FrontColor (COL0) or
3843 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
3844 */
3845 if (two_sided_color)
3846 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3847
3848 /* If front color isn't written, we opt to give them back color
3849 * instead of an undefined value. Switch from COL to BFC.
3850 */
3851 if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
3852 fs_input_slots &= ~(VARYING_BIT_COL0 << c);
3853 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3854 }
3855 }
3856 }
3857
3858 /* Compute the minimum URB Read Length necessary for the FS inputs.
3859 *
3860 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3861 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3862 *
3863 * "This field should be set to the minimum length required to read the
3864 * maximum source attribute. The maximum source attribute is indicated
3865 * by the maximum value of the enabled Attribute # Source Attribute if
3866 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3867 * enable is not set.
3868 * read_length = ceiling((max_source_attr + 1) / 2)
3869 *
3870 * [errata] Corruption/Hang possible if length programmed larger than
3871 * recommended"
3872 *
3873 * Similar text exists for Ivy Bridge.
3874 *
3875 * We find the last URB slot that's actually read by the FS.
3876 */
3877 unsigned last_read_slot = last_vue_map->num_slots - 1;
3878 while (last_read_slot > first_slot && !(fs_input_slots &
3879 (1ull << last_vue_map->slot_to_varying[last_read_slot])))
3880 --last_read_slot;
3881
3882 /* The URB read length is the difference of the two, counted in pairs. */
3883 *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
3884 }
3885
3886 static void
3887 iris_emit_sbe_swiz(struct iris_batch *batch,
3888 const struct iris_context *ice,
3889 unsigned urb_read_offset,
3890 unsigned sprite_coord_enables)
3891 {
3892 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
3893 const struct brw_wm_prog_data *wm_prog_data = (void *)
3894 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3895 const struct brw_vue_map *vue_map = ice->shaders.last_vue_map;
3896 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3897
3898 /* XXX: this should be generated when putting programs in place */
3899
3900 for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
3901 const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
3902 const int input_index = wm_prog_data->urb_setup[fs_attr];
3903 if (input_index < 0 || input_index >= 16)
3904 continue;
3905
3906 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
3907 &attr_overrides[input_index];
3908 int slot = vue_map->varying_to_slot[fs_attr];
3909
3910 /* Viewport and Layer are stored in the VUE header. We need to override
3911 * them to zero if earlier stages didn't write them, as GL requires that
3912 * they read back as zero when not explicitly set.
3913 */
3914 switch (fs_attr) {
3915 case VARYING_SLOT_VIEWPORT:
3916 case VARYING_SLOT_LAYER:
3917 attr->ComponentOverrideX = true;
3918 attr->ComponentOverrideW = true;
3919 attr->ConstantSource = CONST_0000;
3920
3921 if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
3922 attr->ComponentOverrideY = true;
3923 if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
3924 attr->ComponentOverrideZ = true;
3925 continue;
3926
3927 case VARYING_SLOT_PRIMITIVE_ID:
3928 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
3929 if (slot == -1) {
3930 attr->ComponentOverrideX = true;
3931 attr->ComponentOverrideY = true;
3932 attr->ComponentOverrideZ = true;
3933 attr->ComponentOverrideW = true;
3934 attr->ConstantSource = PRIM_ID;
3935 continue;
3936 }
3937
3938 default:
3939 break;
3940 }
3941
3942 if (sprite_coord_enables & (1 << input_index))
3943 continue;
3944
3945 /* If there was only a back color written but not front, use back
3946 * as the color instead of undefined.
3947 */
3948 if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
3949 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
3950 if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
3951 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
3952
3953 /* Not written by the previous stage - undefined. */
3954 if (slot == -1) {
3955 attr->ComponentOverrideX = true;
3956 attr->ComponentOverrideY = true;
3957 attr->ComponentOverrideZ = true;
3958 attr->ComponentOverrideW = true;
3959 attr->ConstantSource = CONST_0001_FLOAT;
3960 continue;
3961 }
3962
3963 /* Compute the location of the attribute relative to the read offset,
3964 * which is counted in 256-bit increments (two 128-bit VUE slots).
3965 */
3966 const int source_attr = slot - 2 * urb_read_offset;
3967 assert(source_attr >= 0 && source_attr <= 32);
3968 attr->SourceAttribute = source_attr;
3969
3970 /* If we are doing two-sided color, and the VUE slot following this one
3971 * represents a back-facing color, then we need to instruct the SF unit
3972 * to do back-facing swizzling.
3973 */
3974 if (cso_rast->light_twoside &&
3975 ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
3976 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
3977 (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
3978 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
3979 attr->SwizzleSelect = INPUTATTR_FACING;
3980 }
3981
3982 iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
3983 for (int i = 0; i < 16; i++)
3984 sbes.Attribute[i] = attr_overrides[i];
3985 }
3986 }
3987
3988 static unsigned
3989 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
3990 const struct iris_rasterizer_state *cso)
3991 {
3992 unsigned overrides = 0;
3993
3994 if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
3995 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
3996
3997 for (int i = 0; i < 8; i++) {
3998 if ((cso->sprite_coord_enable & (1 << i)) &&
3999 prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
4000 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
4001 }
4002
4003 return overrides;
4004 }
4005
4006 static void
4007 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
4008 {
4009 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4010 const struct brw_wm_prog_data *wm_prog_data = (void *)
4011 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4012 const struct shader_info *fs_info =
4013 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
4014
4015 unsigned urb_read_offset, urb_read_length;
4016 iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
4017 ice->shaders.last_vue_map,
4018 cso_rast->light_twoside,
4019 &urb_read_offset, &urb_read_length);
4020
4021 unsigned sprite_coord_overrides =
4022 iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast);
4023
4024 iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
4025 sbe.AttributeSwizzleEnable = true;
4026 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
4027 sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
4028 sbe.VertexURBEntryReadOffset = urb_read_offset;
4029 sbe.VertexURBEntryReadLength = urb_read_length;
4030 sbe.ForceVertexURBEntryReadOffset = true;
4031 sbe.ForceVertexURBEntryReadLength = true;
4032 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
4033 sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
4034 #if GEN_GEN >= 9
4035 for (int i = 0; i < 32; i++) {
4036 sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
4037 }
4038 #endif
4039 }
4040
4041 iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides);
4042 }
4043
4044 /* ------------------------------------------------------------------- */
4045
4046 /**
4047 * Populate VS program key fields based on the current state.
4048 */
4049 static void
4050 iris_populate_vs_key(const struct iris_context *ice,
4051 const struct shader_info *info,
4052 gl_shader_stage last_stage,
4053 struct iris_vs_prog_key *key)
4054 {
4055 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4056
4057 if (info->clip_distance_array_size == 0 &&
4058 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4059 last_stage == MESA_SHADER_VERTEX)
4060 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4061 }
4062
4063 /**
4064 * Populate TCS program key fields based on the current state.
4065 */
4066 static void
4067 iris_populate_tcs_key(const struct iris_context *ice,
4068 struct iris_tcs_prog_key *key)
4069 {
4070 }
4071
4072 /**
4073 * Populate TES program key fields based on the current state.
4074 */
4075 static void
4076 iris_populate_tes_key(const struct iris_context *ice,
4077 const struct shader_info *info,
4078 gl_shader_stage last_stage,
4079 struct iris_tes_prog_key *key)
4080 {
4081 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4082
4083 if (info->clip_distance_array_size == 0 &&
4084 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4085 last_stage == MESA_SHADER_TESS_EVAL)
4086 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4087 }
4088
4089 /**
4090 * Populate GS program key fields based on the current state.
4091 */
4092 static void
4093 iris_populate_gs_key(const struct iris_context *ice,
4094 const struct shader_info *info,
4095 gl_shader_stage last_stage,
4096 struct iris_gs_prog_key *key)
4097 {
4098 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4099
4100 if (info->clip_distance_array_size == 0 &&
4101 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4102 last_stage == MESA_SHADER_GEOMETRY)
4103 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4104 }
4105
4106 /**
4107 * Populate FS program key fields based on the current state.
4108 */
4109 static void
4110 iris_populate_fs_key(const struct iris_context *ice,
4111 const struct shader_info *info,
4112 struct iris_fs_prog_key *key)
4113 {
4114 struct iris_screen *screen = (void *) ice->ctx.screen;
4115 const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4116 const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4117 const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4118 const struct iris_blend_state *blend = ice->state.cso_blend;
4119
4120 key->nr_color_regions = fb->nr_cbufs;
4121
4122 key->clamp_fragment_color = rast->clamp_fragment_color;
4123
4124 key->alpha_to_coverage = blend->alpha_to_coverage;
4125
4126 key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled;
4127
4128 key->flat_shade = rast->flatshade &&
4129 (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4130
4131 key->persample_interp = rast->force_persample_interp;
4132 key->multisample_fbo = rast->multisample && fb->samples > 1;
4133
4134 key->coherent_fb_fetch = GEN_GEN >= 9;
4135
4136 key->force_dual_color_blend =
4137 screen->driconf.dual_color_blend_by_location &&
4138 (blend->blend_enables & 1) && blend->dual_color_blending;
4139
4140 /* TODO: Respect glHint for key->high_quality_derivatives */
4141 }
4142
4143 static void
4144 iris_populate_cs_key(const struct iris_context *ice,
4145 struct iris_cs_prog_key *key)
4146 {
4147 }
4148
4149 static uint64_t
4150 KSP(const struct iris_compiled_shader *shader)
4151 {
4152 struct iris_resource *res = (void *) shader->assembly.res;
4153 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4154 }
4155
4156 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
4157 pkt.KernelStartPointer = KSP(shader); \
4158 pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
4159 pkt.FloatingPointMode = prog_data->use_alt_mode; \
4160 \
4161 pkt.DispatchGRFStartRegisterForURBData = \
4162 prog_data->dispatch_grf_start_reg; \
4163 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
4164 pkt.prefix##URBEntryReadOffset = 0; \
4165 \
4166 pkt.StatisticsEnable = true; \
4167 pkt.Enable = true; \
4168 \
4169 if (prog_data->total_scratch) { \
4170 struct iris_bo *bo = \
4171 iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
4172 uint32_t scratch_addr = bo->gtt_offset; \
4173 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
4174 pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr, \
4175 IRIS_DOMAIN_NONE); \
4176 }
4177
4178 /**
4179 * Encode most of 3DSTATE_VS based on the compiled shader.
4180 */
4181 static void
4182 iris_store_vs_state(struct iris_context *ice,
4183 const struct gen_device_info *devinfo,
4184 struct iris_compiled_shader *shader)
4185 {
4186 struct brw_stage_prog_data *prog_data = shader->prog_data;
4187 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4188
4189 iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4190 INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4191 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4192 vs.SIMD8DispatchEnable = true;
4193 vs.UserClipDistanceCullTestEnableBitmask =
4194 vue_prog_data->cull_distance_mask;
4195 }
4196 }
4197
4198 /**
4199 * Encode most of 3DSTATE_HS based on the compiled shader.
4200 */
4201 static void
4202 iris_store_tcs_state(struct iris_context *ice,
4203 const struct gen_device_info *devinfo,
4204 struct iris_compiled_shader *shader)
4205 {
4206 struct brw_stage_prog_data *prog_data = shader->prog_data;
4207 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4208 struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4209
4210 iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4211 INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4212
4213 #if GEN_GEN >= 12
4214 /* GEN:BUG:1604578095:
4215 *
4216 * Hang occurs when the number of max threads is less than 2 times
4217 * the number of instance count. The number of max threads must be
4218 * more than 2 times the number of instance count.
4219 */
4220 assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
4221 hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
4222 hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
4223 #endif
4224
4225 hs.InstanceCount = tcs_prog_data->instances - 1;
4226 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4227 hs.IncludeVertexHandles = true;
4228
4229 #if GEN_GEN == 12
4230 /* Patch Count threshold specifies the maximum number of patches that
4231 * will be accumulated before a thread dispatch is forced.
4232 */
4233 hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
4234 #endif
4235
4236 #if GEN_GEN >= 9
4237 hs.DispatchMode = vue_prog_data->dispatch_mode;
4238 hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4239 #endif
4240 }
4241 }
4242
4243 /**
4244 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4245 */
4246 static void
4247 iris_store_tes_state(struct iris_context *ice,
4248 const struct gen_device_info *devinfo,
4249 struct iris_compiled_shader *shader)
4250 {
4251 struct brw_stage_prog_data *prog_data = shader->prog_data;
4252 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4253 struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4254
4255 uint32_t *te_state = (void *) shader->derived_data;
4256 uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
4257
4258 iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4259 te.Partitioning = tes_prog_data->partitioning;
4260 te.OutputTopology = tes_prog_data->output_topology;
4261 te.TEDomain = tes_prog_data->domain;
4262 te.TEEnable = true;
4263 te.MaximumTessellationFactorOdd = 63.0;
4264 te.MaximumTessellationFactorNotOdd = 64.0;
4265 }
4266
4267 iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4268 INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4269
4270 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4271 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4272 ds.ComputeWCoordinateEnable =
4273 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4274
4275 ds.UserClipDistanceCullTestEnableBitmask =
4276 vue_prog_data->cull_distance_mask;
4277 }
4278
4279 }
4280
4281 /**
4282 * Encode most of 3DSTATE_GS based on the compiled shader.
4283 */
4284 static void
4285 iris_store_gs_state(struct iris_context *ice,
4286 const struct gen_device_info *devinfo,
4287 struct iris_compiled_shader *shader)
4288 {
4289 struct brw_stage_prog_data *prog_data = shader->prog_data;
4290 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4291 struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4292
4293 iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4294 INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4295
4296 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4297 gs.OutputTopology = gs_prog_data->output_topology;
4298 gs.ControlDataHeaderSize =
4299 gs_prog_data->control_data_header_size_hwords;
4300 gs.InstanceControl = gs_prog_data->invocations - 1;
4301 gs.DispatchMode = DISPATCH_MODE_SIMD8;
4302 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4303 gs.ControlDataFormat = gs_prog_data->control_data_format;
4304 gs.ReorderMode = TRAILING;
4305 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4306 gs.MaximumNumberofThreads =
4307 GEN_GEN == 8 ? (devinfo->max_gs_threads / 2 - 1)
4308 : (devinfo->max_gs_threads - 1);
4309
4310 if (gs_prog_data->static_vertex_count != -1) {
4311 gs.StaticOutput = true;
4312 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4313 }
4314 gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4315
4316 gs.UserClipDistanceCullTestEnableBitmask =
4317 vue_prog_data->cull_distance_mask;
4318
4319 const int urb_entry_write_offset = 1;
4320 const uint32_t urb_entry_output_length =
4321 DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4322 urb_entry_write_offset;
4323
4324 gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4325 gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4326 }
4327 }
4328
4329 /**
4330 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4331 */
4332 static void
4333 iris_store_fs_state(struct iris_context *ice,
4334 const struct gen_device_info *devinfo,
4335 struct iris_compiled_shader *shader)
4336 {
4337 struct brw_stage_prog_data *prog_data = shader->prog_data;
4338 struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4339
4340 uint32_t *ps_state = (void *) shader->derived_data;
4341 uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4342
4343 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4344 ps.VectorMaskEnable = true;
4345 ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
4346 ps.FloatingPointMode = prog_data->use_alt_mode;
4347 ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
4348
4349 ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4350
4351 /* From the documentation for this packet:
4352 * "If the PS kernel does not need the Position XY Offsets to
4353 * compute a Position Value, then this field should be programmed
4354 * to POSOFFSET_NONE."
4355 *
4356 * "SW Recommendation: If the PS kernel needs the Position Offsets
4357 * to compute a Position XY value, this field should match Position
4358 * ZW Interpolation Mode to ensure a consistent position.xyzw
4359 * computation."
4360 *
4361 * We only require XY sample offsets. So, this recommendation doesn't
4362 * look useful at the moment. We might need this in future.
4363 */
4364 ps.PositionXYOffsetSelect =
4365 wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4366
4367 if (prog_data->total_scratch) {
4368 struct iris_bo *bo =
4369 iris_get_scratch_space(ice, prog_data->total_scratch,
4370 MESA_SHADER_FRAGMENT);
4371 uint32_t scratch_addr = bo->gtt_offset;
4372 ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4373 ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr,
4374 IRIS_DOMAIN_NONE);
4375 }
4376 }
4377
4378 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4379 psx.PixelShaderValid = true;
4380 psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4381 psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4382 psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4383 psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4384 psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4385 psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4386 psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4387
4388 #if GEN_GEN >= 9
4389 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4390 psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4391 #endif
4392 }
4393 }
4394
4395 /**
4396 * Compute the size of the derived data (shader command packets).
4397 *
4398 * This must match the data written by the iris_store_xs_state() functions.
4399 */
4400 static void
4401 iris_store_cs_state(struct iris_context *ice,
4402 const struct gen_device_info *devinfo,
4403 struct iris_compiled_shader *shader)
4404 {
4405 struct brw_stage_prog_data *prog_data = shader->prog_data;
4406 struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4407 void *map = shader->derived_data;
4408
4409 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4410 desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4411 desc.SharedLocalMemorySize =
4412 encode_slm_size(GEN_GEN, prog_data->total_shared);
4413 desc.BarrierEnable = cs_prog_data->uses_barrier;
4414 desc.CrossThreadConstantDataReadLength =
4415 cs_prog_data->push.cross_thread.regs;
4416 #if GEN_GEN >= 12
4417 /* TODO: Check if we are missing workarounds and enable mid-thread
4418 * preemption.
4419 *
4420 * We still have issues with mid-thread preemption (it was already
4421 * disabled by the kernel on gen11, due to missing workarounds). It's
4422 * possible that we are just missing some workarounds, and could enable
4423 * it later, but for now let's disable it to fix a GPU in compute in Car
4424 * Chase (and possibly more).
4425 */
4426 desc.ThreadPreemptionDisable = true;
4427 #endif
4428 }
4429 }
4430
4431 static unsigned
4432 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4433 {
4434 assert(cache_id <= IRIS_CACHE_BLORP);
4435
4436 static const unsigned dwords[] = {
4437 [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4438 [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4439 [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4440 [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4441 [IRIS_CACHE_FS] =
4442 GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4443 [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4444 [IRIS_CACHE_BLORP] = 0,
4445 };
4446
4447 return sizeof(uint32_t) * dwords[cache_id];
4448 }
4449
4450 /**
4451 * Create any state packets corresponding to the given shader stage
4452 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4453 * This means that we can look up a program in the in-memory cache and
4454 * get most of the state packet without having to reconstruct it.
4455 */
4456 static void
4457 iris_store_derived_program_state(struct iris_context *ice,
4458 enum iris_program_cache_id cache_id,
4459 struct iris_compiled_shader *shader)
4460 {
4461 struct iris_screen *screen = (void *) ice->ctx.screen;
4462 const struct gen_device_info *devinfo = &screen->devinfo;
4463
4464 switch (cache_id) {
4465 case IRIS_CACHE_VS:
4466 iris_store_vs_state(ice, devinfo, shader);
4467 break;
4468 case IRIS_CACHE_TCS:
4469 iris_store_tcs_state(ice, devinfo, shader);
4470 break;
4471 case IRIS_CACHE_TES:
4472 iris_store_tes_state(ice, devinfo, shader);
4473 break;
4474 case IRIS_CACHE_GS:
4475 iris_store_gs_state(ice, devinfo, shader);
4476 break;
4477 case IRIS_CACHE_FS:
4478 iris_store_fs_state(ice, devinfo, shader);
4479 break;
4480 case IRIS_CACHE_CS:
4481 iris_store_cs_state(ice, devinfo, shader);
4482 case IRIS_CACHE_BLORP:
4483 break;
4484 default:
4485 break;
4486 }
4487 }
4488
4489 /* ------------------------------------------------------------------- */
4490
4491 static const uint32_t push_constant_opcodes[] = {
4492 [MESA_SHADER_VERTEX] = 21,
4493 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4494 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4495 [MESA_SHADER_GEOMETRY] = 22,
4496 [MESA_SHADER_FRAGMENT] = 23,
4497 [MESA_SHADER_COMPUTE] = 0,
4498 };
4499
4500 static uint32_t
4501 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4502 {
4503 struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4504
4505 iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4506
4507 return ice->state.unbound_tex.offset;
4508 }
4509
4510 static uint32_t
4511 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4512 {
4513 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4514 if (!ice->state.null_fb.res)
4515 return use_null_surface(batch, ice);
4516
4517 struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4518
4519 iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4520
4521 return ice->state.null_fb.offset;
4522 }
4523
4524 static uint32_t
4525 surf_state_offset_for_aux(struct iris_resource *res,
4526 unsigned aux_modes,
4527 enum isl_aux_usage aux_usage)
4528 {
4529 assert(aux_modes & (1 << aux_usage));
4530 return SURFACE_STATE_ALIGNMENT *
4531 util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4532 }
4533
4534 #if GEN_GEN == 9
4535 static void
4536 surf_state_update_clear_value(struct iris_batch *batch,
4537 struct iris_resource *res,
4538 struct iris_state_ref *state,
4539 unsigned aux_modes,
4540 enum isl_aux_usage aux_usage)
4541 {
4542 struct isl_device *isl_dev = &batch->screen->isl_dev;
4543 struct iris_bo *state_bo = iris_resource_bo(state->res);
4544 uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
4545 uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
4546 uint32_t clear_offset = offset_into_bo +
4547 isl_dev->ss.clear_value_offset +
4548 surf_state_offset_for_aux(res, aux_modes, aux_usage);
4549 uint32_t *color = res->aux.clear_color.u32;
4550
4551 assert(isl_dev->ss.clear_value_size == 16);
4552
4553 if (aux_usage == ISL_AUX_USAGE_HIZ) {
4554 iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4555 PIPE_CONTROL_WRITE_IMMEDIATE,
4556 state_bo, clear_offset, color[0]);
4557 } else {
4558 iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4559 PIPE_CONTROL_WRITE_IMMEDIATE,
4560 state_bo, clear_offset,
4561 (uint64_t) color[0] |
4562 (uint64_t) color[1] << 32);
4563 iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4564 PIPE_CONTROL_WRITE_IMMEDIATE,
4565 state_bo, clear_offset + 8,
4566 (uint64_t) color[2] |
4567 (uint64_t) color[3] << 32);
4568 }
4569
4570 iris_emit_pipe_control_flush(batch,
4571 "update fast clear: state cache invalidate",
4572 PIPE_CONTROL_FLUSH_ENABLE |
4573 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4574 }
4575 #endif
4576
4577 static void
4578 update_clear_value(struct iris_context *ice,
4579 struct iris_batch *batch,
4580 struct iris_resource *res,
4581 struct iris_surface_state *surf_state,
4582 unsigned all_aux_modes,
4583 struct isl_view *view)
4584 {
4585 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4586 UNUSED unsigned aux_modes = all_aux_modes;
4587
4588 /* We only need to update the clear color in the surface state for gen8 and
4589 * gen9. Newer gens can read it directly from the clear color state buffer.
4590 */
4591 #if GEN_GEN == 9
4592 /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4593 aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4594
4595 while (aux_modes) {
4596 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4597
4598 surf_state_update_clear_value(batch, res, &surf_state->ref,
4599 all_aux_modes, aux_usage);
4600 }
4601 #elif GEN_GEN == 8
4602 /* TODO: Could update rather than re-filling */
4603 alloc_surface_states(surf_state, all_aux_modes);
4604
4605 void *map = surf_state->cpu;
4606
4607 while (aux_modes) {
4608 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4609 fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
4610 0, 0, 0);
4611 map += SURFACE_STATE_ALIGNMENT;
4612 }
4613
4614 upload_surface_states(ice->state.surface_uploader, surf_state);
4615 #endif
4616 }
4617
4618 /**
4619 * Add a surface to the validation list, as well as the buffer containing
4620 * the corresponding SURFACE_STATE.
4621 *
4622 * Returns the binding table entry (offset to SURFACE_STATE).
4623 */
4624 static uint32_t
4625 use_surface(struct iris_context *ice,
4626 struct iris_batch *batch,
4627 struct pipe_surface *p_surf,
4628 bool writeable,
4629 enum isl_aux_usage aux_usage,
4630 bool is_read_surface,
4631 enum iris_domain access)
4632 {
4633 struct iris_surface *surf = (void *) p_surf;
4634 struct iris_resource *res = (void *) p_surf->texture;
4635 uint32_t offset = 0;
4636
4637 iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture),
4638 writeable, access);
4639 if (GEN_GEN == 8 && is_read_surface) {
4640 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false,
4641 IRIS_DOMAIN_NONE);
4642 } else {
4643 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false,
4644 IRIS_DOMAIN_NONE);
4645 }
4646
4647 if (res->aux.bo) {
4648 iris_use_pinned_bo(batch, res->aux.bo, writeable, access);
4649 if (res->aux.clear_color_bo)
4650 iris_use_pinned_bo(batch, res->aux.clear_color_bo, false, access);
4651
4652 if (memcmp(&res->aux.clear_color, &surf->clear_color,
4653 sizeof(surf->clear_color)) != 0) {
4654 update_clear_value(ice, batch, res, &surf->surface_state,
4655 res->aux.possible_usages, &surf->view);
4656 if (GEN_GEN == 8) {
4657 update_clear_value(ice, batch, res, &surf->surface_state_read,
4658 res->aux.possible_usages, &surf->read_view);
4659 }
4660 surf->clear_color = res->aux.clear_color;
4661 }
4662 }
4663
4664 offset = (GEN_GEN == 8 && is_read_surface)
4665 ? surf->surface_state_read.ref.offset
4666 : surf->surface_state.ref.offset;
4667
4668 return offset +
4669 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4670 }
4671
4672 static uint32_t
4673 use_sampler_view(struct iris_context *ice,
4674 struct iris_batch *batch,
4675 struct iris_sampler_view *isv)
4676 {
4677 enum isl_aux_usage aux_usage =
4678 iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
4679
4680 iris_use_pinned_bo(batch, isv->res->bo, false, IRIS_DOMAIN_OTHER_READ);
4681 iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false,
4682 IRIS_DOMAIN_NONE);
4683
4684 if (isv->res->aux.bo) {
4685 iris_use_pinned_bo(batch, isv->res->aux.bo,
4686 false, IRIS_DOMAIN_OTHER_READ);
4687 if (isv->res->aux.clear_color_bo)
4688 iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo,
4689 false, IRIS_DOMAIN_OTHER_READ);
4690 if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4691 sizeof(isv->clear_color)) != 0) {
4692 update_clear_value(ice, batch, isv->res, &isv->surface_state,
4693 isv->res->aux.sampler_usages, &isv->view);
4694 isv->clear_color = isv->res->aux.clear_color;
4695 }
4696 }
4697
4698 return isv->surface_state.ref.offset +
4699 surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
4700 aux_usage);
4701 }
4702
4703 static uint32_t
4704 use_ubo_ssbo(struct iris_batch *batch,
4705 struct iris_context *ice,
4706 struct pipe_shader_buffer *buf,
4707 struct iris_state_ref *surf_state,
4708 bool writable, enum iris_domain access)
4709 {
4710 if (!buf->buffer || !surf_state->res)
4711 return use_null_surface(batch, ice);
4712
4713 iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable, access);
4714 iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false,
4715 IRIS_DOMAIN_NONE);
4716
4717 return surf_state->offset;
4718 }
4719
4720 static uint32_t
4721 use_image(struct iris_batch *batch, struct iris_context *ice,
4722 struct iris_shader_state *shs, const struct shader_info *info,
4723 int i)
4724 {
4725 struct iris_image_view *iv = &shs->image[i];
4726 struct iris_resource *res = (void *) iv->base.resource;
4727
4728 if (!res)
4729 return use_null_surface(batch, ice);
4730
4731 bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
4732
4733 iris_use_pinned_bo(batch, res->bo, write, IRIS_DOMAIN_NONE);
4734 iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res),
4735 false, IRIS_DOMAIN_NONE);
4736
4737 if (res->aux.bo)
4738 iris_use_pinned_bo(batch, res->aux.bo, write, IRIS_DOMAIN_NONE);
4739
4740 enum isl_aux_usage aux_usage =
4741 iris_image_view_aux_usage(ice, &iv->base, info);
4742
4743 return iv->surface_state.ref.offset +
4744 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4745 }
4746
4747 #define push_bt_entry(addr) \
4748 assert(addr >= binder_addr); \
4749 assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4750 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4751
4752 #define bt_assert(section) \
4753 if (!pin_only && shader->bt.used_mask[section] != 0) \
4754 assert(shader->bt.offsets[section] == s);
4755
4756 /**
4757 * Populate the binding table for a given shader stage.
4758 *
4759 * This fills out the table of pointers to surfaces required by the shader,
4760 * and also adds those buffers to the validation list so the kernel can make
4761 * resident before running our batch.
4762 */
4763 static void
4764 iris_populate_binding_table(struct iris_context *ice,
4765 struct iris_batch *batch,
4766 gl_shader_stage stage,
4767 bool pin_only)
4768 {
4769 const struct iris_binder *binder = &ice->state.binder;
4770 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
4771 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4772 if (!shader)
4773 return;
4774
4775 struct iris_binding_table *bt = &shader->bt;
4776 UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
4777 struct iris_shader_state *shs = &ice->state.shaders[stage];
4778 uint32_t binder_addr = binder->bo->gtt_offset;
4779
4780 uint32_t *bt_map = binder->map + binder->bt_offset[stage];
4781 int s = 0;
4782
4783 const struct shader_info *info = iris_get_shader_info(ice, stage);
4784 if (!info) {
4785 /* TCS passthrough doesn't need a binding table. */
4786 assert(stage == MESA_SHADER_TESS_CTRL);
4787 return;
4788 }
4789
4790 if (stage == MESA_SHADER_COMPUTE &&
4791 shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
4792 /* surface for gl_NumWorkGroups */
4793 struct iris_state_ref *grid_data = &ice->state.grid_size;
4794 struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
4795 iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false,
4796 IRIS_DOMAIN_OTHER_READ);
4797 iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false,
4798 IRIS_DOMAIN_NONE);
4799 push_bt_entry(grid_state->offset);
4800 }
4801
4802 if (stage == MESA_SHADER_FRAGMENT) {
4803 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4804 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4805 if (cso_fb->nr_cbufs) {
4806 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
4807 uint32_t addr;
4808 if (cso_fb->cbufs[i]) {
4809 addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
4810 ice->state.draw_aux_usage[i], false,
4811 IRIS_DOMAIN_RENDER_WRITE);
4812 } else {
4813 addr = use_null_fb_surface(batch, ice);
4814 }
4815 push_bt_entry(addr);
4816 }
4817 } else if (GEN_GEN < 11) {
4818 uint32_t addr = use_null_fb_surface(batch, ice);
4819 push_bt_entry(addr);
4820 }
4821 }
4822
4823 #define foreach_surface_used(index, group) \
4824 bt_assert(group); \
4825 for (int index = 0; index < bt->sizes[group]; index++) \
4826 if (iris_group_index_to_bti(bt, group, index) != \
4827 IRIS_SURFACE_NOT_USED)
4828
4829 foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
4830 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4831 uint32_t addr;
4832 if (cso_fb->cbufs[i]) {
4833 addr = use_surface(ice, batch, cso_fb->cbufs[i],
4834 false, ice->state.draw_aux_usage[i], true,
4835 IRIS_DOMAIN_OTHER_READ);
4836 push_bt_entry(addr);
4837 }
4838 }
4839
4840 foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
4841 struct iris_sampler_view *view = shs->textures[i];
4842 uint32_t addr = view ? use_sampler_view(ice, batch, view)
4843 : use_null_surface(batch, ice);
4844 push_bt_entry(addr);
4845 }
4846
4847 foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
4848 uint32_t addr = use_image(batch, ice, shs, info, i);
4849 push_bt_entry(addr);
4850 }
4851
4852 foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
4853 uint32_t addr;
4854
4855 if (i == bt->sizes[IRIS_SURFACE_GROUP_UBO] - 1) {
4856 if (ish->const_data) {
4857 iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data), false,
4858 IRIS_DOMAIN_OTHER_READ);
4859 iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data_state.res),
4860 false, IRIS_DOMAIN_NONE);
4861 addr = ish->const_data_state.offset;
4862 } else {
4863 /* This can only happen with INTEL_DISABLE_COMPACT_BINDING_TABLE=1. */
4864 addr = use_null_surface(batch, ice);
4865 }
4866 } else {
4867 addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
4868 &shs->constbuf_surf_state[i], false,
4869 IRIS_DOMAIN_OTHER_READ);
4870 }
4871
4872 push_bt_entry(addr);
4873 }
4874
4875 foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
4876 uint32_t addr =
4877 use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
4878 shs->writable_ssbos & (1u << i), IRIS_DOMAIN_NONE);
4879 push_bt_entry(addr);
4880 }
4881
4882 #if 0
4883 /* XXX: YUV surfaces not implemented yet */
4884 bt_assert(plane_start[1], ...);
4885 bt_assert(plane_start[2], ...);
4886 #endif
4887 }
4888
4889 static void
4890 iris_use_optional_res(struct iris_batch *batch,
4891 struct pipe_resource *res,
4892 bool writeable,
4893 enum iris_domain access)
4894 {
4895 if (res) {
4896 struct iris_bo *bo = iris_resource_bo(res);
4897 iris_use_pinned_bo(batch, bo, writeable, access);
4898 }
4899 }
4900
4901 static void
4902 pin_depth_and_stencil_buffers(struct iris_batch *batch,
4903 struct pipe_surface *zsbuf,
4904 struct iris_depth_stencil_alpha_state *cso_zsa)
4905 {
4906 if (!zsbuf)
4907 return;
4908
4909 struct iris_resource *zres, *sres;
4910 iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
4911
4912 if (zres) {
4913 const enum iris_domain access = cso_zsa->depth_writes_enabled ?
4914 IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
4915 iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled,
4916 access);
4917 if (zres->aux.bo) {
4918 iris_use_pinned_bo(batch, zres->aux.bo,
4919 cso_zsa->depth_writes_enabled, access);
4920 }
4921 }
4922
4923 if (sres) {
4924 const enum iris_domain access = cso_zsa->stencil_writes_enabled ?
4925 IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
4926 iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled,
4927 access);
4928 }
4929 }
4930
4931 /* ------------------------------------------------------------------- */
4932
4933 /**
4934 * Pin any BOs which were installed by a previous batch, and restored
4935 * via the hardware logical context mechanism.
4936 *
4937 * We don't need to re-emit all state every batch - the hardware context
4938 * mechanism will save and restore it for us. This includes pointers to
4939 * various BOs...which won't exist unless we ask the kernel to pin them
4940 * by adding them to the validation list.
4941 *
4942 * We can skip buffers if we've re-emitted those packets, as we're
4943 * overwriting those stale pointers with new ones, and don't actually
4944 * refer to the old BOs.
4945 */
4946 static void
4947 iris_restore_render_saved_bos(struct iris_context *ice,
4948 struct iris_batch *batch,
4949 const struct pipe_draw_info *draw)
4950 {
4951 struct iris_genx_state *genx = ice->state.genx;
4952
4953 const uint64_t clean = ~ice->state.dirty;
4954 const uint64_t stage_clean = ~ice->state.stage_dirty;
4955
4956 if (clean & IRIS_DIRTY_CC_VIEWPORT) {
4957 iris_use_optional_res(batch, ice->state.last_res.cc_vp, false,
4958 IRIS_DOMAIN_NONE);
4959 }
4960
4961 if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
4962 iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false,
4963 IRIS_DOMAIN_NONE);
4964 }
4965
4966 if (clean & IRIS_DIRTY_BLEND_STATE) {
4967 iris_use_optional_res(batch, ice->state.last_res.blend, false,
4968 IRIS_DOMAIN_NONE);
4969 }
4970
4971 if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
4972 iris_use_optional_res(batch, ice->state.last_res.color_calc, false,
4973 IRIS_DOMAIN_NONE);
4974 }
4975
4976 if (clean & IRIS_DIRTY_SCISSOR_RECT) {
4977 iris_use_optional_res(batch, ice->state.last_res.scissor, false,
4978 IRIS_DOMAIN_NONE);
4979 }
4980
4981 if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
4982 for (int i = 0; i < 4; i++) {
4983 struct iris_stream_output_target *tgt =
4984 (void *) ice->state.so_target[i];
4985 if (tgt) {
4986 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
4987 true, IRIS_DOMAIN_OTHER_WRITE);
4988 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
4989 true, IRIS_DOMAIN_OTHER_WRITE);
4990 }
4991 }
4992 }
4993
4994 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4995 if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
4996 continue;
4997
4998 struct iris_shader_state *shs = &ice->state.shaders[stage];
4999 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5000
5001 if (!shader)
5002 continue;
5003
5004 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5005
5006 for (int i = 0; i < 4; i++) {
5007 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5008
5009 if (range->length == 0)
5010 continue;
5011
5012 /* Range block is a binding table index, map back to UBO index. */
5013 unsigned block_index = iris_bti_to_group_index(
5014 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5015 assert(block_index != IRIS_SURFACE_NOT_USED);
5016
5017 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5018 struct iris_resource *res = (void *) cbuf->buffer;
5019
5020 if (res)
5021 iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
5022 else
5023 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false,
5024 IRIS_DOMAIN_OTHER_READ);
5025 }
5026 }
5027
5028 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5029 if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5030 /* Re-pin any buffers referred to by the binding table. */
5031 iris_populate_binding_table(ice, batch, stage, true);
5032 }
5033 }
5034
5035 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5036 struct iris_shader_state *shs = &ice->state.shaders[stage];
5037 struct pipe_resource *res = shs->sampler_table.res;
5038 if (res)
5039 iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5040 IRIS_DOMAIN_NONE);
5041 }
5042
5043 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5044 if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
5045 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5046
5047 if (shader) {
5048 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5049 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5050
5051 struct brw_stage_prog_data *prog_data = shader->prog_data;
5052
5053 if (prog_data->total_scratch > 0) {
5054 struct iris_bo *bo =
5055 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5056 iris_use_pinned_bo(batch, bo, true, IRIS_DOMAIN_NONE);
5057 }
5058 }
5059 }
5060 }
5061
5062 if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
5063 (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5064 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5065 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5066 }
5067
5068 iris_use_optional_res(batch, ice->state.last_res.index_buffer, false,
5069 IRIS_DOMAIN_OTHER_READ);
5070
5071 if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
5072 uint64_t bound = ice->state.bound_vertex_buffers;
5073 while (bound) {
5074 const int i = u_bit_scan64(&bound);
5075 struct pipe_resource *res = genx->vertex_buffers[i].resource;
5076 iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5077 IRIS_DOMAIN_OTHER_READ);
5078 }
5079 }
5080 }
5081
5082 static void
5083 iris_restore_compute_saved_bos(struct iris_context *ice,
5084 struct iris_batch *batch,
5085 const struct pipe_grid_info *grid)
5086 {
5087 const uint64_t stage_clean = ~ice->state.stage_dirty;
5088
5089 const int stage = MESA_SHADER_COMPUTE;
5090 struct iris_shader_state *shs = &ice->state.shaders[stage];
5091
5092 if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
5093 /* Re-pin any buffers referred to by the binding table. */
5094 iris_populate_binding_table(ice, batch, stage, true);
5095 }
5096
5097 struct pipe_resource *sampler_res = shs->sampler_table.res;
5098 if (sampler_res)
5099 iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false,
5100 IRIS_DOMAIN_NONE);
5101
5102 if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
5103 (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
5104 (stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
5105 (stage_clean & IRIS_STAGE_DIRTY_CS)) {
5106 iris_use_optional_res(batch, ice->state.last_res.cs_desc, false,
5107 IRIS_DOMAIN_NONE);
5108 }
5109
5110 if (stage_clean & IRIS_STAGE_DIRTY_CS) {
5111 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5112
5113 if (shader) {
5114 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5115 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5116
5117 struct iris_bo *curbe_bo =
5118 iris_resource_bo(ice->state.last_res.cs_thread_ids);
5119 iris_use_pinned_bo(batch, curbe_bo, false, IRIS_DOMAIN_NONE);
5120
5121 struct brw_stage_prog_data *prog_data = shader->prog_data;
5122
5123 if (prog_data->total_scratch > 0) {
5124 struct iris_bo *bo =
5125 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5126 iris_use_pinned_bo(batch, bo, true, IRIS_DOMAIN_NONE);
5127 }
5128 }
5129 }
5130 }
5131
5132 /**
5133 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5134 */
5135 static void
5136 iris_update_surface_base_address(struct iris_batch *batch,
5137 struct iris_binder *binder)
5138 {
5139 if (batch->last_surface_base_address == binder->bo->gtt_offset)
5140 return;
5141
5142 uint32_t mocs = batch->screen->isl_dev.mocs.internal;
5143
5144 iris_batch_sync_region_start(batch);
5145
5146 flush_before_state_base_change(batch);
5147
5148 #if GEN_GEN == 12
5149 /* GEN:BUG:1607854226:
5150 *
5151 * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5152 * mode by putting the pipeline temporarily in 3D mode..
5153 */
5154 if (batch->name == IRIS_BATCH_COMPUTE)
5155 emit_pipeline_select(batch, _3D);
5156 #endif
5157
5158 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
5159 sba.SurfaceStateBaseAddressModifyEnable = true;
5160 sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
5161
5162 /* The hardware appears to pay attention to the MOCS fields even
5163 * if you don't set the "Address Modify Enable" bit for the base.
5164 */
5165 sba.GeneralStateMOCS = mocs;
5166 sba.StatelessDataPortAccessMOCS = mocs;
5167 sba.DynamicStateMOCS = mocs;
5168 sba.IndirectObjectMOCS = mocs;
5169 sba.InstructionMOCS = mocs;
5170 sba.SurfaceStateMOCS = mocs;
5171 #if GEN_GEN >= 9
5172 sba.BindlessSurfaceStateMOCS = mocs;
5173 #endif
5174 }
5175
5176 #if GEN_GEN == 12
5177 /* GEN:BUG:1607854226:
5178 *
5179 * Put the pipeline back into compute mode.
5180 */
5181 if (batch->name == IRIS_BATCH_COMPUTE)
5182 emit_pipeline_select(batch, GPGPU);
5183 #endif
5184
5185 flush_after_state_base_change(batch);
5186 iris_batch_sync_region_end(batch);
5187
5188 batch->last_surface_base_address = binder->bo->gtt_offset;
5189 }
5190
5191 static inline void
5192 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5193 bool window_space_position, float *zmin, float *zmax)
5194 {
5195 if (window_space_position) {
5196 *zmin = 0.f;
5197 *zmax = 1.f;
5198 return;
5199 }
5200 util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5201 }
5202
5203 #if GEN_GEN >= 12
5204 void
5205 genX(invalidate_aux_map_state)(struct iris_batch *batch)
5206 {
5207 struct iris_screen *screen = batch->screen;
5208 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5209 if (!aux_map_ctx)
5210 return;
5211 uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
5212 if (batch->last_aux_map_state != aux_map_state_num) {
5213 /* HSD 1209978178: docs say that before programming the aux table:
5214 *
5215 * "Driver must ensure that the engine is IDLE but ensure it doesn't
5216 * add extra flushes in the case it knows that the engine is already
5217 * IDLE."
5218 *
5219 * An end of pipe sync is needed here, otherwise we see GPU hangs in
5220 * dEQP-GLES31.functional.copy_image.* tests.
5221 */
5222 iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
5223 PIPE_CONTROL_CS_STALL);
5224
5225 /* If the aux-map state number increased, then we need to rewrite the
5226 * register. Rewriting the register is used to both set the aux-map
5227 * translation table address, and also to invalidate any previously
5228 * cached translations.
5229 */
5230 iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
5231 batch->last_aux_map_state = aux_map_state_num;
5232 }
5233 }
5234
5235 static void
5236 init_aux_map_state(struct iris_batch *batch)
5237 {
5238 struct iris_screen *screen = batch->screen;
5239 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5240 if (!aux_map_ctx)
5241 return;
5242
5243 uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
5244 assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
5245 iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5246 base_addr);
5247 }
5248 #endif
5249
5250 struct push_bos {
5251 struct {
5252 struct iris_address addr;
5253 uint32_t length;
5254 } buffers[4];
5255 int buffer_count;
5256 uint32_t max_length;
5257 };
5258
5259 static void
5260 setup_constant_buffers(struct iris_context *ice,
5261 struct iris_batch *batch,
5262 int stage,
5263 struct push_bos *push_bos)
5264 {
5265 struct iris_shader_state *shs = &ice->state.shaders[stage];
5266 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5267 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5268
5269 uint32_t push_range_sum = 0;
5270
5271 int n = 0;
5272 for (int i = 0; i < 4; i++) {
5273 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5274
5275 if (range->length == 0)
5276 continue;
5277
5278 push_range_sum += range->length;
5279
5280 if (range->length > push_bos->max_length)
5281 push_bos->max_length = range->length;
5282
5283 /* Range block is a binding table index, map back to UBO index. */
5284 unsigned block_index = iris_bti_to_group_index(
5285 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5286 assert(block_index != IRIS_SURFACE_NOT_USED);
5287
5288 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5289 struct iris_resource *res = (void *) cbuf->buffer;
5290
5291 assert(cbuf->buffer_offset % 32 == 0);
5292
5293 push_bos->buffers[n].length = range->length;
5294 push_bos->buffers[n].addr =
5295 res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5296 : batch->screen->workaround_address;
5297 n++;
5298 }
5299
5300 /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5301 *
5302 * "The sum of all four read length fields must be less than or
5303 * equal to the size of 64."
5304 */
5305 assert(push_range_sum <= 64);
5306
5307 push_bos->buffer_count = n;
5308 }
5309
5310 static void
5311 emit_push_constant_packets(struct iris_context *ice,
5312 struct iris_batch *batch,
5313 int stage,
5314 const struct push_bos *push_bos)
5315 {
5316 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
5317 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5318 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5319
5320 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5321 pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5322 #if GEN_GEN >= 12
5323 pkt.MOCS = isl_dev->mocs.internal;
5324 #endif
5325 if (prog_data) {
5326 /* The Skylake PRM contains the following restriction:
5327 *
5328 * "The driver must ensure The following case does not occur
5329 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5330 * buffer 3 read length equal to zero committed followed by a
5331 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5332 * zero committed."
5333 *
5334 * To avoid this, we program the buffers in the highest slots.
5335 * This way, slot 0 is only used if slot 3 is also used.
5336 */
5337 int n = push_bos->buffer_count;
5338 assert(n <= 4);
5339 const unsigned shift = 4 - n;
5340 for (int i = 0; i < n; i++) {
5341 pkt.ConstantBody.ReadLength[i + shift] =
5342 push_bos->buffers[i].length;
5343 pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
5344 }
5345 }
5346 }
5347 }
5348
5349 #if GEN_GEN >= 12
5350 static void
5351 emit_push_constant_packet_all(struct iris_context *ice,
5352 struct iris_batch *batch,
5353 uint32_t shader_mask,
5354 const struct push_bos *push_bos)
5355 {
5356 struct isl_device *isl_dev = &batch->screen->isl_dev;
5357
5358 if (!push_bos) {
5359 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
5360 pc.ShaderUpdateEnable = shader_mask;
5361 }
5362 return;
5363 }
5364
5365 const uint32_t n = push_bos->buffer_count;
5366 const uint32_t max_pointers = 4;
5367 const uint32_t num_dwords = 2 + 2 * n;
5368 uint32_t const_all[2 + 2 * max_pointers];
5369 uint32_t *dw = &const_all[0];
5370
5371 assert(n <= max_pointers);
5372 iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
5373 all.DWordLength = num_dwords - 2;
5374 all.MOCS = isl_dev->mocs.internal;
5375 all.ShaderUpdateEnable = shader_mask;
5376 all.PointerBufferMask = (1 << n) - 1;
5377 }
5378 dw += 2;
5379
5380 for (int i = 0; i < n; i++) {
5381 _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
5382 dw + i * 2, data) {
5383 data.PointerToConstantBuffer = push_bos->buffers[i].addr;
5384 data.ConstantBufferReadLength = push_bos->buffers[i].length;
5385 }
5386 }
5387 iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
5388 }
5389 #endif
5390
5391 static void
5392 iris_upload_dirty_render_state(struct iris_context *ice,
5393 struct iris_batch *batch,
5394 const struct pipe_draw_info *draw)
5395 {
5396 const uint64_t dirty = ice->state.dirty;
5397 const uint64_t stage_dirty = ice->state.stage_dirty;
5398
5399 if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
5400 !(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
5401 return;
5402
5403 struct iris_genx_state *genx = ice->state.genx;
5404 struct iris_binder *binder = &ice->state.binder;
5405 struct brw_wm_prog_data *wm_prog_data = (void *)
5406 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5407
5408 if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5409 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5410 uint32_t cc_vp_address;
5411
5412 /* XXX: could avoid streaming for depth_clip [0,1] case. */
5413 uint32_t *cc_vp_map =
5414 stream_state(batch, ice->state.dynamic_uploader,
5415 &ice->state.last_res.cc_vp,
5416 4 * ice->state.num_viewports *
5417 GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5418 for (int i = 0; i < ice->state.num_viewports; i++) {
5419 float zmin, zmax;
5420 iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5421 ice->state.window_space_position,
5422 &zmin, &zmax);
5423 if (cso_rast->depth_clip_near)
5424 zmin = 0.0;
5425 if (cso_rast->depth_clip_far)
5426 zmax = 1.0;
5427
5428 iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5429 ccv.MinimumDepth = zmin;
5430 ccv.MaximumDepth = zmax;
5431 }
5432
5433 cc_vp_map += GENX(CC_VIEWPORT_length);
5434 }
5435
5436 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5437 ptr.CCViewportPointer = cc_vp_address;
5438 }
5439 }
5440
5441 if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5442 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5443 uint32_t sf_cl_vp_address;
5444 uint32_t *vp_map =
5445 stream_state(batch, ice->state.dynamic_uploader,
5446 &ice->state.last_res.sf_cl_vp,
5447 4 * ice->state.num_viewports *
5448 GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5449
5450 for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5451 const struct pipe_viewport_state *state = &ice->state.viewports[i];
5452 float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5453
5454 float vp_xmin = viewport_extent(state, 0, -1.0f);
5455 float vp_xmax = viewport_extent(state, 0, 1.0f);
5456 float vp_ymin = viewport_extent(state, 1, -1.0f);
5457 float vp_ymax = viewport_extent(state, 1, 1.0f);
5458
5459 gen_calculate_guardband_size(cso_fb->width, cso_fb->height,
5460 state->scale[0], state->scale[1],
5461 state->translate[0], state->translate[1],
5462 &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5463
5464 iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5465 vp.ViewportMatrixElementm00 = state->scale[0];
5466 vp.ViewportMatrixElementm11 = state->scale[1];
5467 vp.ViewportMatrixElementm22 = state->scale[2];
5468 vp.ViewportMatrixElementm30 = state->translate[0];
5469 vp.ViewportMatrixElementm31 = state->translate[1];
5470 vp.ViewportMatrixElementm32 = state->translate[2];
5471 vp.XMinClipGuardband = gb_xmin;
5472 vp.XMaxClipGuardband = gb_xmax;
5473 vp.YMinClipGuardband = gb_ymin;
5474 vp.YMaxClipGuardband = gb_ymax;
5475 vp.XMinViewPort = MAX2(vp_xmin, 0);
5476 vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5477 vp.YMinViewPort = MAX2(vp_ymin, 0);
5478 vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5479 }
5480
5481 vp_map += GENX(SF_CLIP_VIEWPORT_length);
5482 }
5483
5484 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5485 ptr.SFClipViewportPointer = sf_cl_vp_address;
5486 }
5487 }
5488
5489 if (dirty & IRIS_DIRTY_URB) {
5490 unsigned size[4];
5491
5492 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5493 if (!ice->shaders.prog[i]) {
5494 size[i] = 1;
5495 } else {
5496 struct brw_vue_prog_data *vue_prog_data =
5497 (void *) ice->shaders.prog[i]->prog_data;
5498 size[i] = vue_prog_data->urb_entry_size;
5499 }
5500 assert(size[i] != 0);
5501 }
5502
5503 unsigned entries[4], start[4];
5504 gen_get_urb_config(&batch->screen->devinfo,
5505 batch->screen->l3_config_3d,
5506 ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5507 ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
5508 size, entries, start,
5509 &ice->state.urb_deref_block_size);
5510
5511 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5512 iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
5513 urb._3DCommandSubOpcode += i;
5514 urb.VSURBStartingAddress = start[i];
5515 urb.VSURBEntryAllocationSize = size[i] - 1;
5516 urb.VSNumberofURBEntries = entries[i];
5517 }
5518 }
5519 }
5520
5521 if (dirty & IRIS_DIRTY_BLEND_STATE) {
5522 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5523 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5524 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5525 const int header_dwords = GENX(BLEND_STATE_length);
5526
5527 /* Always write at least one BLEND_STATE - the final RT message will
5528 * reference BLEND_STATE[0] even if there aren't color writes. There
5529 * may still be alpha testing, computed depth, and so on.
5530 */
5531 const int rt_dwords =
5532 MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5533
5534 uint32_t blend_offset;
5535 uint32_t *blend_map =
5536 stream_state(batch, ice->state.dynamic_uploader,
5537 &ice->state.last_res.blend,
5538 4 * (header_dwords + rt_dwords), 64, &blend_offset);
5539
5540 uint32_t blend_state_header;
5541 iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5542 bs.AlphaTestEnable = cso_zsa->alpha.enabled;
5543 bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha.func);
5544 }
5545
5546 blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5547 memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5548
5549 iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5550 ptr.BlendStatePointer = blend_offset;
5551 ptr.BlendStatePointerValid = true;
5552 }
5553 }
5554
5555 if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5556 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5557 #if GEN_GEN == 8
5558 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5559 #endif
5560 uint32_t cc_offset;
5561 void *cc_map =
5562 stream_state(batch, ice->state.dynamic_uploader,
5563 &ice->state.last_res.color_calc,
5564 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5565 64, &cc_offset);
5566 iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5567 cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5568 cc.AlphaReferenceValueAsFLOAT32 = cso->alpha.ref_value;
5569 cc.BlendConstantColorRed = ice->state.blend_color.color[0];
5570 cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5571 cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
5572 cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5573 #if GEN_GEN == 8
5574 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5575 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5576 #endif
5577 }
5578 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5579 ptr.ColorCalcStatePointer = cc_offset;
5580 ptr.ColorCalcStatePointerValid = true;
5581 }
5582 }
5583
5584 /* GEN:BUG:1604061319
5585 *
5586 * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5587 *
5588 * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5589 * any stage has a dirty binding table.
5590 */
5591 const bool emit_const_wa = GEN_GEN >= 11 &&
5592 ((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
5593 (stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS));
5594
5595 #if GEN_GEN >= 12
5596 uint32_t nobuffer_stages = 0;
5597 #endif
5598
5599 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5600 if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
5601 !emit_const_wa)
5602 continue;
5603
5604 struct iris_shader_state *shs = &ice->state.shaders[stage];
5605 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5606
5607 if (!shader)
5608 continue;
5609
5610 if (shs->sysvals_need_upload)
5611 upload_sysvals(ice, stage);
5612
5613 struct push_bos push_bos = {};
5614 setup_constant_buffers(ice, batch, stage, &push_bos);
5615
5616 #if GEN_GEN >= 12
5617 /* If this stage doesn't have any push constants, emit it later in a
5618 * single CONSTANT_ALL packet with all the other stages.
5619 */
5620 if (push_bos.buffer_count == 0) {
5621 nobuffer_stages |= 1 << stage;
5622 continue;
5623 }
5624
5625 /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5626 * contains only 5 bits, so we can only use it for buffers smaller than
5627 * 32.
5628 */
5629 if (push_bos.max_length < 32) {
5630 emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
5631 continue;
5632 }
5633 #endif
5634 emit_push_constant_packets(ice, batch, stage, &push_bos);
5635 }
5636
5637 #if GEN_GEN >= 12
5638 if (nobuffer_stages)
5639 emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
5640 #endif
5641
5642 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5643 /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5644 * in order to commit constants. TODO: Investigate "Disable Gather
5645 * at Set Shader" to go back to legacy mode...
5646 */
5647 if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
5648 (GEN_GEN == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
5649 << stage)) {
5650 iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
5651 ptr._3DCommandSubOpcode = 38 + stage;
5652 ptr.PointertoVSBindingTable = binder->bt_offset[stage];
5653 }
5654 }
5655 }
5656
5657 if (GEN_GEN >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
5658 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5659 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5660
5661 /* The PIPE_CONTROL command description says:
5662 *
5663 * "Whenever a Binding Table Index (BTI) used by a Render Target
5664 * Message points to a different RENDER_SURFACE_STATE, SW must issue a
5665 * Render Target Cache Flush by enabling this bit. When render target
5666 * flush is set due to new association of BTI, PS Scoreboard Stall bit
5667 * must be set in this packet."
5668 */
5669 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5670 iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
5671 PIPE_CONTROL_RENDER_TARGET_FLUSH |
5672 PIPE_CONTROL_STALL_AT_SCOREBOARD);
5673 }
5674
5675 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5676 if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5677 iris_populate_binding_table(ice, batch, stage, false);
5678 }
5679 }
5680
5681 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5682 if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
5683 !ice->shaders.prog[stage])
5684 continue;
5685
5686 iris_upload_sampler_states(ice, stage);
5687
5688 struct iris_shader_state *shs = &ice->state.shaders[stage];
5689 struct pipe_resource *res = shs->sampler_table.res;
5690 if (res)
5691 iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5692 IRIS_DOMAIN_NONE);
5693
5694 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
5695 ptr._3DCommandSubOpcode = 43 + stage;
5696 ptr.PointertoVSSamplerState = shs->sampler_table.offset;
5697 }
5698 }
5699
5700 if (ice->state.need_border_colors)
5701 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
5702 IRIS_DOMAIN_NONE);
5703
5704 if (dirty & IRIS_DIRTY_MULTISAMPLE) {
5705 iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
5706 ms.PixelLocation =
5707 ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
5708 if (ice->state.framebuffer.samples > 0)
5709 ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
5710 }
5711 }
5712
5713 if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
5714 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
5715 ms.SampleMask = ice->state.sample_mask;
5716 }
5717 }
5718
5719 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5720 if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
5721 continue;
5722
5723 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5724
5725 if (shader) {
5726 struct brw_stage_prog_data *prog_data = shader->prog_data;
5727 struct iris_resource *cache = (void *) shader->assembly.res;
5728 iris_use_pinned_bo(batch, cache->bo, false, IRIS_DOMAIN_NONE);
5729
5730 if (prog_data->total_scratch > 0) {
5731 struct iris_bo *bo =
5732 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5733 iris_use_pinned_bo(batch, bo, true, IRIS_DOMAIN_NONE);
5734 }
5735
5736 if (stage == MESA_SHADER_FRAGMENT) {
5737 UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
5738 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5739
5740 uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
5741 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
5742 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
5743 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
5744 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
5745
5746 /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5747 *
5748 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5749 * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5750 * mode."
5751 *
5752 * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
5753 */
5754 if (GEN_GEN >= 9 && cso_fb->samples == 16 &&
5755 !wm_prog_data->persample_dispatch) {
5756 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
5757 ps._32PixelDispatchEnable = false;
5758 }
5759
5760 ps.DispatchGRFStartRegisterForConstantSetupData0 =
5761 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
5762 ps.DispatchGRFStartRegisterForConstantSetupData1 =
5763 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
5764 ps.DispatchGRFStartRegisterForConstantSetupData2 =
5765 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
5766
5767 ps.KernelStartPointer0 = KSP(shader) +
5768 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
5769 ps.KernelStartPointer1 = KSP(shader) +
5770 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
5771 ps.KernelStartPointer2 = KSP(shader) +
5772 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
5773 }
5774
5775 uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
5776 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
5777 #if GEN_GEN >= 9
5778 if (!wm_prog_data->uses_sample_mask)
5779 psx.InputCoverageMaskState = ICMS_NONE;
5780 else if (wm_prog_data->post_depth_coverage)
5781 psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
5782 else if (wm_prog_data->inner_coverage &&
5783 cso->conservative_rasterization)
5784 psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
5785 else
5786 psx.InputCoverageMaskState = ICMS_NORMAL;
5787 #else
5788 psx.PixelShaderUsesInputCoverageMask =
5789 wm_prog_data->uses_sample_mask;
5790 #endif
5791 }
5792
5793 uint32_t *shader_ps = (uint32_t *) shader->derived_data;
5794 uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
5795 iris_emit_merge(batch, shader_ps, ps_state,
5796 GENX(3DSTATE_PS_length));
5797 iris_emit_merge(batch, shader_psx, psx_state,
5798 GENX(3DSTATE_PS_EXTRA_length));
5799 } else {
5800 iris_batch_emit(batch, shader->derived_data,
5801 iris_derived_program_state_size(stage));
5802 }
5803 } else {
5804 if (stage == MESA_SHADER_TESS_EVAL) {
5805 iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
5806 iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
5807 iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
5808 } else if (stage == MESA_SHADER_GEOMETRY) {
5809 iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
5810 }
5811 }
5812 }
5813
5814 if (ice->state.streamout_active) {
5815 if (dirty & IRIS_DIRTY_SO_BUFFERS) {
5816 iris_batch_emit(batch, genx->so_buffers,
5817 4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
5818 for (int i = 0; i < 4; i++) {
5819 struct iris_stream_output_target *tgt =
5820 (void *) ice->state.so_target[i];
5821 if (tgt) {
5822 tgt->zeroed = true;
5823 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5824 true, IRIS_DOMAIN_OTHER_WRITE);
5825 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5826 true, IRIS_DOMAIN_OTHER_WRITE);
5827 }
5828 }
5829 }
5830
5831 if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
5832 uint32_t *decl_list =
5833 ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
5834 iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
5835 }
5836
5837 if (dirty & IRIS_DIRTY_STREAMOUT) {
5838 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5839
5840 uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
5841 iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
5842 sol.SOFunctionEnable = true;
5843 sol.SOStatisticsEnable = true;
5844
5845 sol.RenderingDisable = cso_rast->rasterizer_discard &&
5846 !ice->state.prims_generated_query_active;
5847 sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
5848 }
5849
5850 assert(ice->state.streamout);
5851
5852 iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
5853 GENX(3DSTATE_STREAMOUT_length));
5854 }
5855 } else {
5856 if (dirty & IRIS_DIRTY_STREAMOUT) {
5857 iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
5858 }
5859 }
5860
5861 if (dirty & IRIS_DIRTY_CLIP) {
5862 struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5863 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5864
5865 bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
5866 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
5867 bool points_or_lines = cso_rast->fill_mode_point_or_line ||
5868 (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
5869 : ice->state.prim_is_points_or_lines);
5870
5871 uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
5872 iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
5873 cl.StatisticsEnable = ice->state.statistics_counters_enabled;
5874 if (cso_rast->rasterizer_discard)
5875 cl.ClipMode = CLIPMODE_REJECT_ALL;
5876 else if (ice->state.window_space_position)
5877 cl.ClipMode = CLIPMODE_ACCEPT_ALL;
5878 else
5879 cl.ClipMode = CLIPMODE_NORMAL;
5880
5881 cl.PerspectiveDivideDisable = ice->state.window_space_position;
5882 cl.ViewportXYClipTestEnable = !points_or_lines;
5883
5884 if (wm_prog_data->barycentric_interp_modes &
5885 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
5886 cl.NonPerspectiveBarycentricEnable = true;
5887
5888 cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
5889 cl.MaximumVPIndex = ice->state.num_viewports - 1;
5890 }
5891 iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
5892 ARRAY_SIZE(cso_rast->clip));
5893 }
5894
5895 if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
5896 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5897 iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
5898
5899 uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
5900 iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
5901 sf.ViewportTransformEnable = !ice->state.window_space_position;
5902
5903 #if GEN_GEN >= 12
5904 sf.DerefBlockSize = ice->state.urb_deref_block_size;
5905 #endif
5906 }
5907 iris_emit_merge(batch, cso->sf, dynamic_sf,
5908 ARRAY_SIZE(dynamic_sf));
5909 }
5910
5911 if (dirty & IRIS_DIRTY_WM) {
5912 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5913 uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
5914
5915 iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
5916 wm.StatisticsEnable = ice->state.statistics_counters_enabled;
5917
5918 wm.BarycentricInterpolationMode =
5919 wm_prog_data->barycentric_interp_modes;
5920
5921 if (wm_prog_data->early_fragment_tests)
5922 wm.EarlyDepthStencilControl = EDSC_PREPS;
5923 else if (wm_prog_data->has_side_effects)
5924 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
5925
5926 /* We could skip this bit if color writes are enabled. */
5927 if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
5928 wm.ForceThreadDispatchEnable = ForceON;
5929 }
5930 iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
5931 }
5932
5933 if (dirty & IRIS_DIRTY_SBE) {
5934 iris_emit_sbe(batch, ice);
5935 }
5936
5937 if (dirty & IRIS_DIRTY_PS_BLEND) {
5938 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5939 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5940 const struct shader_info *fs_info =
5941 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
5942
5943 uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
5944 iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
5945 pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
5946 pb.AlphaTestEnable = cso_zsa->alpha.enabled;
5947
5948 /* The dual source blending docs caution against using SRC1 factors
5949 * when the shader doesn't use a dual source render target write.
5950 * Empirically, this can lead to GPU hangs, and the results are
5951 * undefined anyway, so simply disable blending to avoid the hang.
5952 */
5953 pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
5954 (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
5955 }
5956
5957 iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
5958 ARRAY_SIZE(cso_blend->ps_blend));
5959 }
5960
5961 if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
5962 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5963 #if GEN_GEN >= 9 && GEN_GEN < 12
5964 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5965 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
5966 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
5967 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
5968 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5969 }
5970 iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
5971 #else
5972 /* Use modify disable fields which allow us to emit packets
5973 * directly instead of merging them later.
5974 */
5975 iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
5976 #endif
5977
5978 #if GEN_GEN >= 12
5979 iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
5980 #endif
5981 }
5982
5983 if (dirty & IRIS_DIRTY_STENCIL_REF) {
5984 #if GEN_GEN >= 12
5985 /* Use modify disable fields which allow us to emit packets
5986 * directly instead of merging them later.
5987 */
5988 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5989 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
5990 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
5991 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
5992 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5993 wmds.StencilTestMaskModifyDisable = true;
5994 wmds.StencilWriteMaskModifyDisable = true;
5995 wmds.StencilStateModifyDisable = true;
5996 wmds.DepthStateModifyDisable = true;
5997 }
5998 iris_batch_emit(batch, stencil_refs, sizeof(stencil_refs));
5999 #endif
6000 }
6001
6002 if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
6003 uint32_t scissor_offset =
6004 emit_state(batch, ice->state.dynamic_uploader,
6005 &ice->state.last_res.scissor,
6006 ice->state.scissors,
6007 sizeof(struct pipe_scissor_state) *
6008 ice->state.num_viewports, 32);
6009
6010 iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
6011 ptr.ScissorRectPointer = scissor_offset;
6012 }
6013 }
6014
6015 if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
6016 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
6017
6018 /* Do not emit the clear params yets. We need to update the clear value
6019 * first.
6020 */
6021 uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
6022 uint32_t cso_z_size = batch->screen->isl_dev.ds.size - clear_length;;
6023
6024 #if GEN_GEN == 12
6025 /* GEN:BUG:14010455700
6026 *
6027 * ISL will change some CHICKEN registers depending on the depth surface
6028 * format, along with emitting the depth and stencil packets. In that
6029 * case, we want to do a depth flush and stall, so the pipeline is not
6030 * using these settings while we change the registers.
6031 */
6032 iris_emit_end_of_pipe_sync(batch,
6033 "Workaround: Stop pipeline for 14010455700",
6034 PIPE_CONTROL_DEPTH_STALL |
6035 PIPE_CONTROL_DEPTH_CACHE_FLUSH);
6036 #endif
6037
6038 iris_batch_emit(batch, cso_z->packets, cso_z_size);
6039 if (GEN_GEN >= 12) {
6040 /* GEN:BUG:1408224581
6041 *
6042 * Workaround: Gen12LP Astep only An additional pipe control with
6043 * post-sync = store dword operation would be required.( w/a is to
6044 * have an additional pipe control after the stencil state whenever
6045 * the surface state bits of this state is changing).
6046 */
6047 iris_emit_pipe_control_write(batch, "WA for stencil state",
6048 PIPE_CONTROL_WRITE_IMMEDIATE,
6049 batch->screen->workaround_address.bo,
6050 batch->screen->workaround_address.offset, 0);
6051 }
6052
6053 union isl_color_value clear_value = { .f32 = { 0, } };
6054
6055 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6056 if (cso_fb->zsbuf) {
6057 struct iris_resource *zres, *sres;
6058 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
6059 &zres, &sres);
6060 if (zres && zres->aux.bo)
6061 clear_value = iris_resource_get_clear_color(zres, NULL, NULL);
6062 }
6063
6064 uint32_t clear_params[GENX(3DSTATE_CLEAR_PARAMS_length)];
6065 iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
6066 clear.DepthClearValueValid = true;
6067 clear.DepthClearValue = clear_value.f32[0];
6068 }
6069 iris_batch_emit(batch, clear_params, clear_length);
6070 }
6071
6072 if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
6073 /* Listen for buffer changes, and also write enable changes. */
6074 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6075 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
6076 }
6077
6078 if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
6079 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
6080 for (int i = 0; i < 32; i++) {
6081 poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
6082 }
6083 }
6084 }
6085
6086 if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
6087 struct iris_rasterizer_state *cso = ice->state.cso_rast;
6088 iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
6089 }
6090
6091 if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
6092 iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
6093 topo.PrimitiveTopologyType =
6094 translate_prim_type(draw->mode, draw->vertices_per_patch);
6095 }
6096 }
6097
6098 if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
6099 int count = util_bitcount64(ice->state.bound_vertex_buffers);
6100 uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
6101
6102 if (ice->state.vs_uses_draw_params) {
6103 assert(ice->draw.draw_params.res);
6104
6105 struct iris_vertex_buffer_state *state =
6106 &(ice->state.genx->vertex_buffers[count]);
6107 pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
6108 struct iris_resource *res = (void *) state->resource;
6109
6110 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6111 vb.VertexBufferIndex = count;
6112 vb.AddressModifyEnable = true;
6113 vb.BufferPitch = 0;
6114 vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
6115 vb.BufferStartingAddress =
6116 ro_bo(NULL, res->bo->gtt_offset +
6117 (int) ice->draw.draw_params.offset);
6118 vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev);
6119 }
6120 dynamic_bound |= 1ull << count;
6121 count++;
6122 }
6123
6124 if (ice->state.vs_uses_derived_draw_params) {
6125 struct iris_vertex_buffer_state *state =
6126 &(ice->state.genx->vertex_buffers[count]);
6127 pipe_resource_reference(&state->resource,
6128 ice->draw.derived_draw_params.res);
6129 struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
6130
6131 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6132 vb.VertexBufferIndex = count;
6133 vb.AddressModifyEnable = true;
6134 vb.BufferPitch = 0;
6135 vb.BufferSize =
6136 res->bo->size - ice->draw.derived_draw_params.offset;
6137 vb.BufferStartingAddress =
6138 ro_bo(NULL, res->bo->gtt_offset +
6139 (int) ice->draw.derived_draw_params.offset);
6140 vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev);
6141 }
6142 dynamic_bound |= 1ull << count;
6143 count++;
6144 }
6145
6146 if (count) {
6147 #if GEN_GEN >= 11
6148 /* Gen11+ doesn't need the cache workaround below */
6149 uint64_t bound = dynamic_bound;
6150 while (bound) {
6151 const int i = u_bit_scan64(&bound);
6152 iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
6153 false, IRIS_DOMAIN_OTHER_READ);
6154 }
6155 #else
6156 /* The VF cache designers cut corners, and made the cache key's
6157 * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6158 * 32 bits of the address. If you have two vertex buffers which get
6159 * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6160 * you can get collisions (even within a single batch).
6161 *
6162 * So, we need to do a VF cache invalidate if the buffer for a VB
6163 * slot slot changes [48:32] address bits from the previous time.
6164 */
6165 unsigned flush_flags = 0;
6166
6167 uint64_t bound = dynamic_bound;
6168 while (bound) {
6169 const int i = u_bit_scan64(&bound);
6170 uint16_t high_bits = 0;
6171
6172 struct iris_resource *res =
6173 (void *) genx->vertex_buffers[i].resource;
6174 if (res) {
6175 iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
6176
6177 high_bits = res->bo->gtt_offset >> 32ull;
6178 if (high_bits != ice->state.last_vbo_high_bits[i]) {
6179 flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
6180 PIPE_CONTROL_CS_STALL;
6181 ice->state.last_vbo_high_bits[i] = high_bits;
6182 }
6183 }
6184 }
6185
6186 if (flush_flags) {
6187 iris_emit_pipe_control_flush(batch,
6188 "workaround: VF cache 32-bit key [VB]",
6189 flush_flags);
6190 }
6191 #endif
6192
6193 const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
6194
6195 uint32_t *map =
6196 iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
6197 _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
6198 vb.DWordLength = (vb_dwords * count + 1) - 2;
6199 }
6200 map += 1;
6201
6202 bound = dynamic_bound;
6203 while (bound) {
6204 const int i = u_bit_scan64(&bound);
6205 memcpy(map, genx->vertex_buffers[i].state,
6206 sizeof(uint32_t) * vb_dwords);
6207 map += vb_dwords;
6208 }
6209 }
6210 }
6211
6212 if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
6213 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6214 const unsigned entries = MAX2(cso->count, 1);
6215 if (!(ice->state.vs_needs_sgvs_element ||
6216 ice->state.vs_uses_derived_draw_params ||
6217 ice->state.vs_needs_edge_flag)) {
6218 iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
6219 (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
6220 } else {
6221 uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
6222 const unsigned dyn_count = cso->count +
6223 ice->state.vs_needs_sgvs_element +
6224 ice->state.vs_uses_derived_draw_params;
6225
6226 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
6227 &dynamic_ves, ve) {
6228 ve.DWordLength =
6229 1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
6230 }
6231 memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
6232 (cso->count - ice->state.vs_needs_edge_flag) *
6233 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
6234 uint32_t *ve_pack_dest =
6235 &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
6236 GENX(VERTEX_ELEMENT_STATE_length)];
6237
6238 if (ice->state.vs_needs_sgvs_element) {
6239 uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
6240 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
6241 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6242 ve.Valid = true;
6243 ve.VertexBufferIndex =
6244 util_bitcount64(ice->state.bound_vertex_buffers);
6245 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6246 ve.Component0Control = base_ctrl;
6247 ve.Component1Control = base_ctrl;
6248 ve.Component2Control = VFCOMP_STORE_0;
6249 ve.Component3Control = VFCOMP_STORE_0;
6250 }
6251 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6252 }
6253 if (ice->state.vs_uses_derived_draw_params) {
6254 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6255 ve.Valid = true;
6256 ve.VertexBufferIndex =
6257 util_bitcount64(ice->state.bound_vertex_buffers) +
6258 ice->state.vs_uses_draw_params;
6259 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6260 ve.Component0Control = VFCOMP_STORE_SRC;
6261 ve.Component1Control = VFCOMP_STORE_SRC;
6262 ve.Component2Control = VFCOMP_STORE_0;
6263 ve.Component3Control = VFCOMP_STORE_0;
6264 }
6265 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6266 }
6267 if (ice->state.vs_needs_edge_flag) {
6268 for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length); i++)
6269 ve_pack_dest[i] = cso->edgeflag_ve[i];
6270 }
6271
6272 iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
6273 (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
6274 }
6275
6276 if (!ice->state.vs_needs_edge_flag) {
6277 iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
6278 entries * GENX(3DSTATE_VF_INSTANCING_length));
6279 } else {
6280 assert(cso->count > 0);
6281 const unsigned edgeflag_index = cso->count - 1;
6282 uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
6283 memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
6284 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
6285
6286 uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
6287 edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
6288 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
6289 vi.VertexElementIndex = edgeflag_index +
6290 ice->state.vs_needs_sgvs_element +
6291 ice->state.vs_uses_derived_draw_params;
6292 }
6293 for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length); i++)
6294 vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
6295
6296 iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
6297 entries * GENX(3DSTATE_VF_INSTANCING_length));
6298 }
6299 }
6300
6301 if (dirty & IRIS_DIRTY_VF_SGVS) {
6302 const struct brw_vs_prog_data *vs_prog_data = (void *)
6303 ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
6304 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6305
6306 iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
6307 if (vs_prog_data->uses_vertexid) {
6308 sgv.VertexIDEnable = true;
6309 sgv.VertexIDComponentNumber = 2;
6310 sgv.VertexIDElementOffset =
6311 cso->count - ice->state.vs_needs_edge_flag;
6312 }
6313
6314 if (vs_prog_data->uses_instanceid) {
6315 sgv.InstanceIDEnable = true;
6316 sgv.InstanceIDComponentNumber = 3;
6317 sgv.InstanceIDElementOffset =
6318 cso->count - ice->state.vs_needs_edge_flag;
6319 }
6320 }
6321 }
6322
6323 if (dirty & IRIS_DIRTY_VF) {
6324 iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
6325 if (draw->primitive_restart) {
6326 vf.IndexedDrawCutIndexEnable = true;
6327 vf.CutIndex = draw->restart_index;
6328 }
6329 }
6330 }
6331
6332 if (dirty & IRIS_DIRTY_VF_STATISTICS) {
6333 iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
6334 vf.StatisticsEnable = true;
6335 }
6336 }
6337
6338 #if GEN_GEN == 8
6339 if (dirty & IRIS_DIRTY_PMA_FIX) {
6340 bool enable = want_pma_fix(ice);
6341 genX(update_pma_fix)(ice, batch, enable);
6342 }
6343 #endif
6344
6345 if (ice->state.current_hash_scale != 1)
6346 genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
6347
6348 #if GEN_GEN >= 12
6349 genX(invalidate_aux_map_state)(batch);
6350 #endif
6351 }
6352
6353 static void
6354 iris_upload_render_state(struct iris_context *ice,
6355 struct iris_batch *batch,
6356 const struct pipe_draw_info *draw)
6357 {
6358 bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
6359
6360 iris_batch_sync_region_start(batch);
6361
6362 /* Always pin the binder. If we're emitting new binding table pointers,
6363 * we need it. If not, we're probably inheriting old tables via the
6364 * context, and need it anyway. Since true zero-bindings cases are
6365 * practically non-existent, just pin it and avoid last_res tracking.
6366 */
6367 iris_use_pinned_bo(batch, ice->state.binder.bo, false,
6368 IRIS_DOMAIN_NONE);
6369
6370 if (!batch->contains_draw_with_next_seqno) {
6371 iris_restore_render_saved_bos(ice, batch, draw);
6372 batch->contains_draw_with_next_seqno = batch->contains_draw = true;
6373 }
6374
6375 iris_upload_dirty_render_state(ice, batch, draw);
6376
6377 if (draw->index_size > 0) {
6378 unsigned offset;
6379
6380 if (draw->has_user_indices) {
6381 u_upload_data(ice->ctx.stream_uploader, 0,
6382 draw->count * draw->index_size, 4, draw->index.user,
6383 &offset, &ice->state.last_res.index_buffer);
6384 } else {
6385 struct iris_resource *res = (void *) draw->index.resource;
6386 res->bind_history |= PIPE_BIND_INDEX_BUFFER;
6387
6388 pipe_resource_reference(&ice->state.last_res.index_buffer,
6389 draw->index.resource);
6390 offset = 0;
6391 }
6392
6393 struct iris_genx_state *genx = ice->state.genx;
6394 struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
6395
6396 uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
6397 iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
6398 ib.IndexFormat = draw->index_size >> 1;
6399 ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev);
6400 ib.BufferSize = bo->size - offset;
6401 ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
6402 }
6403
6404 if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
6405 memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
6406 iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
6407 iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_OTHER_READ);
6408 }
6409
6410 #if GEN_GEN < 11
6411 /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6412 uint16_t high_bits = bo->gtt_offset >> 32ull;
6413 if (high_bits != ice->state.last_index_bo_high_bits) {
6414 iris_emit_pipe_control_flush(batch,
6415 "workaround: VF cache 32-bit key [IB]",
6416 PIPE_CONTROL_VF_CACHE_INVALIDATE |
6417 PIPE_CONTROL_CS_STALL);
6418 ice->state.last_index_bo_high_bits = high_bits;
6419 }
6420 #endif
6421 }
6422
6423 #define _3DPRIM_END_OFFSET 0x2420
6424 #define _3DPRIM_START_VERTEX 0x2430
6425 #define _3DPRIM_VERTEX_COUNT 0x2434
6426 #define _3DPRIM_INSTANCE_COUNT 0x2438
6427 #define _3DPRIM_START_INSTANCE 0x243C
6428 #define _3DPRIM_BASE_VERTEX 0x2440
6429
6430 if (draw->indirect) {
6431 if (draw->indirect->indirect_draw_count) {
6432 use_predicate = true;
6433
6434 struct iris_bo *draw_count_bo =
6435 iris_resource_bo(draw->indirect->indirect_draw_count);
6436 unsigned draw_count_offset =
6437 draw->indirect->indirect_draw_count_offset;
6438
6439 iris_emit_pipe_control_flush(batch,
6440 "ensure indirect draw buffer is flushed",
6441 PIPE_CONTROL_FLUSH_ENABLE);
6442
6443 if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
6444 struct gen_mi_builder b;
6445 gen_mi_builder_init(&b, batch);
6446
6447 /* comparison = draw id < draw count */
6448 struct gen_mi_value comparison =
6449 gen_mi_ult(&b, gen_mi_imm(draw->drawid),
6450 gen_mi_mem32(ro_bo(draw_count_bo,
6451 draw_count_offset)));
6452
6453 /* predicate = comparison & conditional rendering predicate */
6454 gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
6455 gen_mi_iand(&b, comparison,
6456 gen_mi_reg32(CS_GPR(15))));
6457 } else {
6458 uint32_t mi_predicate;
6459
6460 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6461 iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid);
6462 /* Upload the current draw count from the draw parameters buffer
6463 * to MI_PREDICATE_SRC0.
6464 */
6465 iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
6466 draw_count_bo, draw_count_offset);
6467 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6468 iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
6469
6470 if (draw->drawid == 0) {
6471 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
6472 MI_PREDICATE_COMBINEOP_SET |
6473 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6474 } else {
6475 /* While draw_index < draw_count the predicate's result will be
6476 * (draw_index == draw_count) ^ TRUE = TRUE
6477 * When draw_index == draw_count the result is
6478 * (TRUE) ^ TRUE = FALSE
6479 * After this all results will be:
6480 * (FALSE) ^ FALSE = FALSE
6481 */
6482 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
6483 MI_PREDICATE_COMBINEOP_XOR |
6484 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6485 }
6486 iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
6487 }
6488 }
6489 struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
6490 assert(bo);
6491
6492 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6493 lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
6494 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 0);
6495 }
6496 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6497 lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
6498 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 4);
6499 }
6500 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6501 lrm.RegisterAddress = _3DPRIM_START_VERTEX;
6502 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 8);
6503 }
6504 if (draw->index_size) {
6505 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6506 lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
6507 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6508 }
6509 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6510 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6511 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 16);
6512 }
6513 } else {
6514 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6515 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6516 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6517 }
6518 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
6519 lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
6520 lri.DataDWord = 0;
6521 }
6522 }
6523 } else if (draw->count_from_stream_output) {
6524 struct iris_stream_output_target *so =
6525 (void *) draw->count_from_stream_output;
6526
6527 /* XXX: Replace with actual cache tracking */
6528 iris_emit_pipe_control_flush(batch,
6529 "draw count from stream output stall",
6530 PIPE_CONTROL_CS_STALL);
6531
6532 struct gen_mi_builder b;
6533 gen_mi_builder_init(&b, batch);
6534
6535 struct iris_address addr =
6536 ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
6537 struct gen_mi_value offset =
6538 gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset);
6539
6540 gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT),
6541 gen_mi_udiv32_imm(&b, offset, so->stride));
6542
6543 _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
6544 _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
6545 _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
6546 _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
6547 }
6548
6549 iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
6550 prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
6551 prim.PredicateEnable = use_predicate;
6552
6553 if (draw->indirect || draw->count_from_stream_output) {
6554 prim.IndirectParameterEnable = true;
6555 } else {
6556 prim.StartInstanceLocation = draw->start_instance;
6557 prim.InstanceCount = draw->instance_count;
6558 prim.VertexCountPerInstance = draw->count;
6559
6560 prim.StartVertexLocation = draw->start;
6561
6562 if (draw->index_size) {
6563 prim.BaseVertexLocation += draw->index_bias;
6564 } else {
6565 prim.StartVertexLocation += draw->index_bias;
6566 }
6567 }
6568 }
6569
6570 iris_batch_sync_region_end(batch);
6571 }
6572
6573 static void
6574 iris_load_indirect_location(struct iris_context *ice,
6575 struct iris_batch *batch,
6576 const struct pipe_grid_info *grid)
6577 {
6578 #define GPGPU_DISPATCHDIMX 0x2500
6579 #define GPGPU_DISPATCHDIMY 0x2504
6580 #define GPGPU_DISPATCHDIMZ 0x2508
6581
6582 assert(grid->indirect);
6583
6584 struct iris_state_ref *grid_size = &ice->state.grid_size;
6585 struct iris_bo *bo = iris_resource_bo(grid_size->res);
6586 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6587 lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
6588 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
6589 }
6590 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6591 lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
6592 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
6593 }
6594 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6595 lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
6596 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
6597 }
6598 }
6599
6600 static void
6601 iris_upload_gpgpu_walker(struct iris_context *ice,
6602 struct iris_batch *batch,
6603 const struct pipe_grid_info *grid)
6604 {
6605 const uint64_t stage_dirty = ice->state.stage_dirty;
6606 struct iris_screen *screen = batch->screen;
6607 const struct gen_device_info *devinfo = &screen->devinfo;
6608 struct iris_binder *binder = &ice->state.binder;
6609 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6610 struct iris_compiled_shader *shader =
6611 ice->shaders.prog[MESA_SHADER_COMPUTE];
6612 struct brw_stage_prog_data *prog_data = shader->prog_data;
6613 struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6614 const uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
6615 const unsigned simd_size =
6616 brw_cs_simd_size_for_group_size(devinfo, cs_prog_data, group_size);
6617 const unsigned threads = DIV_ROUND_UP(group_size, simd_size);
6618
6619
6620 if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
6621 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
6622 *
6623 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6624 * the only bits that are changed are scoreboard related: Scoreboard
6625 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
6626 * these scoreboard related states, a MEDIA_STATE_FLUSH is
6627 * sufficient."
6628 */
6629 iris_emit_pipe_control_flush(batch,
6630 "workaround: stall before MEDIA_VFE_STATE",
6631 PIPE_CONTROL_CS_STALL);
6632
6633 iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
6634 if (prog_data->total_scratch) {
6635 struct iris_bo *bo =
6636 iris_get_scratch_space(ice, prog_data->total_scratch,
6637 MESA_SHADER_COMPUTE);
6638 vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
6639 vfe.ScratchSpaceBasePointer = rw_bo(bo, 0, IRIS_DOMAIN_NONE);
6640 }
6641
6642 vfe.MaximumNumberofThreads =
6643 devinfo->max_cs_threads * screen->subslice_total - 1;
6644 #if GEN_GEN < 11
6645 vfe.ResetGatewayTimer =
6646 Resettingrelativetimerandlatchingtheglobaltimestamp;
6647 #endif
6648 #if GEN_GEN == 8
6649 vfe.BypassGatewayControl = true;
6650 #endif
6651 vfe.NumberofURBEntries = 2;
6652 vfe.URBEntryAllocationSize = 2;
6653
6654 vfe.CURBEAllocationSize =
6655 ALIGN(cs_prog_data->push.per_thread.regs * threads +
6656 cs_prog_data->push.cross_thread.regs, 2);
6657 }
6658 }
6659
6660 /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6661 if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
6662 uint32_t curbe_data_offset = 0;
6663 assert(cs_prog_data->push.cross_thread.dwords == 0 &&
6664 cs_prog_data->push.per_thread.dwords == 1 &&
6665 cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
6666 const unsigned push_const_size =
6667 brw_cs_push_const_total_size(cs_prog_data, threads);
6668 uint32_t *curbe_data_map =
6669 stream_state(batch, ice->state.dynamic_uploader,
6670 &ice->state.last_res.cs_thread_ids,
6671 ALIGN(push_const_size, 64), 64,
6672 &curbe_data_offset);
6673 assert(curbe_data_map);
6674 memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
6675 iris_fill_cs_push_const_buffer(cs_prog_data, threads, curbe_data_map);
6676
6677 iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
6678 curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
6679 curbe.CURBEDataStartAddress = curbe_data_offset;
6680 }
6681 }
6682
6683 if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
6684 IRIS_STAGE_DIRTY_BINDINGS_CS |
6685 IRIS_STAGE_DIRTY_CONSTANTS_CS |
6686 IRIS_STAGE_DIRTY_CS)) {
6687 uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
6688
6689 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
6690 idd.KernelStartPointer =
6691 KSP(shader) + brw_cs_prog_data_prog_offset(cs_prog_data, simd_size);
6692 idd.SamplerStatePointer = shs->sampler_table.offset;
6693 idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
6694 idd.NumberofThreadsinGPGPUThreadGroup = threads;
6695 }
6696
6697 for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
6698 desc[i] |= ((uint32_t *) shader->derived_data)[i];
6699
6700 iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
6701 load.InterfaceDescriptorTotalLength =
6702 GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
6703 load.InterfaceDescriptorDataStartAddress =
6704 emit_state(batch, ice->state.dynamic_uploader,
6705 &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
6706 }
6707 }
6708
6709 if (grid->indirect)
6710 iris_load_indirect_location(ice, batch, grid);
6711
6712 const uint32_t right_mask = brw_cs_right_mask(group_size, simd_size);
6713
6714 iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
6715 ggw.IndirectParameterEnable = grid->indirect != NULL;
6716 ggw.SIMDSize = simd_size / 16;
6717 ggw.ThreadDepthCounterMaximum = 0;
6718 ggw.ThreadHeightCounterMaximum = 0;
6719 ggw.ThreadWidthCounterMaximum = threads - 1;
6720 ggw.ThreadGroupIDXDimension = grid->grid[0];
6721 ggw.ThreadGroupIDYDimension = grid->grid[1];
6722 ggw.ThreadGroupIDZDimension = grid->grid[2];
6723 ggw.RightExecutionMask = right_mask;
6724 ggw.BottomExecutionMask = 0xffffffff;
6725 }
6726
6727 iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
6728 }
6729
6730 static void
6731 iris_upload_compute_state(struct iris_context *ice,
6732 struct iris_batch *batch,
6733 const struct pipe_grid_info *grid)
6734 {
6735 const uint64_t stage_dirty = ice->state.stage_dirty;
6736 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6737 struct iris_compiled_shader *shader =
6738 ice->shaders.prog[MESA_SHADER_COMPUTE];
6739
6740 iris_batch_sync_region_start(batch);
6741
6742 /* Always pin the binder. If we're emitting new binding table pointers,
6743 * we need it. If not, we're probably inheriting old tables via the
6744 * context, and need it anyway. Since true zero-bindings cases are
6745 * practically non-existent, just pin it and avoid last_res tracking.
6746 */
6747 iris_use_pinned_bo(batch, ice->state.binder.bo, false, IRIS_DOMAIN_NONE);
6748
6749 if ((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
6750 shs->sysvals_need_upload)
6751 upload_sysvals(ice, MESA_SHADER_COMPUTE);
6752
6753 if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
6754 iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
6755
6756 if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
6757 iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
6758
6759 iris_use_optional_res(batch, shs->sampler_table.res, false,
6760 IRIS_DOMAIN_NONE);
6761 iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false,
6762 IRIS_DOMAIN_NONE);
6763
6764 if (ice->state.need_border_colors)
6765 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
6766 IRIS_DOMAIN_NONE);
6767
6768 #if GEN_GEN >= 12
6769 genX(invalidate_aux_map_state)(batch);
6770 #endif
6771
6772 iris_upload_gpgpu_walker(ice, batch, grid);
6773
6774 if (!batch->contains_draw_with_next_seqno) {
6775 iris_restore_compute_saved_bos(ice, batch, grid);
6776 batch->contains_draw_with_next_seqno = batch->contains_draw = true;
6777 }
6778
6779 iris_batch_sync_region_end(batch);
6780 }
6781
6782 /**
6783 * State module teardown.
6784 */
6785 static void
6786 iris_destroy_state(struct iris_context *ice)
6787 {
6788 struct iris_genx_state *genx = ice->state.genx;
6789
6790 pipe_resource_reference(&ice->draw.draw_params.res, NULL);
6791 pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
6792
6793 /* Loop over all VBOs, including ones for draw parameters */
6794 for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
6795 pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
6796 }
6797
6798 free(ice->state.genx);
6799
6800 for (int i = 0; i < 4; i++) {
6801 pipe_so_target_reference(&ice->state.so_target[i], NULL);
6802 }
6803
6804 for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
6805 pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
6806 }
6807 pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
6808
6809 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
6810 struct iris_shader_state *shs = &ice->state.shaders[stage];
6811 pipe_resource_reference(&shs->sampler_table.res, NULL);
6812 for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
6813 pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
6814 pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
6815 }
6816 for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
6817 pipe_resource_reference(&shs->image[i].base.resource, NULL);
6818 pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
6819 free(shs->image[i].surface_state.cpu);
6820 }
6821 for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
6822 pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
6823 pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
6824 }
6825 for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
6826 pipe_sampler_view_reference((struct pipe_sampler_view **)
6827 &shs->textures[i], NULL);
6828 }
6829 }
6830
6831 pipe_resource_reference(&ice->state.grid_size.res, NULL);
6832 pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
6833
6834 pipe_resource_reference(&ice->state.null_fb.res, NULL);
6835 pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
6836
6837 pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
6838 pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
6839 pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
6840 pipe_resource_reference(&ice->state.last_res.scissor, NULL);
6841 pipe_resource_reference(&ice->state.last_res.blend, NULL);
6842 pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
6843 pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
6844 pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
6845 }
6846
6847 /* ------------------------------------------------------------------- */
6848
6849 static void
6850 iris_rebind_buffer(struct iris_context *ice,
6851 struct iris_resource *res)
6852 {
6853 struct pipe_context *ctx = &ice->ctx;
6854 struct iris_genx_state *genx = ice->state.genx;
6855
6856 assert(res->base.target == PIPE_BUFFER);
6857
6858 /* Buffers can't be framebuffer attachments, nor display related,
6859 * and we don't have upstream Clover support.
6860 */
6861 assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
6862 PIPE_BIND_RENDER_TARGET |
6863 PIPE_BIND_BLENDABLE |
6864 PIPE_BIND_DISPLAY_TARGET |
6865 PIPE_BIND_CURSOR |
6866 PIPE_BIND_COMPUTE_RESOURCE |
6867 PIPE_BIND_GLOBAL)));
6868
6869 if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
6870 uint64_t bound_vbs = ice->state.bound_vertex_buffers;
6871 while (bound_vbs) {
6872 const int i = u_bit_scan64(&bound_vbs);
6873 struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
6874
6875 /* Update the CPU struct */
6876 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
6877 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
6878 uint64_t *addr = (uint64_t *) &state->state[1];
6879 struct iris_bo *bo = iris_resource_bo(state->resource);
6880
6881 if (*addr != bo->gtt_offset + state->offset) {
6882 *addr = bo->gtt_offset + state->offset;
6883 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
6884 }
6885 }
6886 }
6887
6888 /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
6889 * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
6890 *
6891 * There is also no need to handle these:
6892 * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
6893 * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
6894 */
6895
6896 if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
6897 /* XXX: be careful about resetting vs appending... */
6898 assert(false);
6899 }
6900
6901 for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
6902 struct iris_shader_state *shs = &ice->state.shaders[s];
6903 enum pipe_shader_type p_stage = stage_to_pipe(s);
6904
6905 if (!(res->bind_stages & (1 << s)))
6906 continue;
6907
6908 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
6909 /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
6910 uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
6911 while (bound_cbufs) {
6912 const int i = u_bit_scan(&bound_cbufs);
6913 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
6914 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
6915
6916 if (res->bo == iris_resource_bo(cbuf->buffer)) {
6917 pipe_resource_reference(&surf_state->res, NULL);
6918 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
6919 }
6920 }
6921 }
6922
6923 if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
6924 uint32_t bound_ssbos = shs->bound_ssbos;
6925 while (bound_ssbos) {
6926 const int i = u_bit_scan(&bound_ssbos);
6927 struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
6928
6929 if (res->bo == iris_resource_bo(ssbo->buffer)) {
6930 struct pipe_shader_buffer buf = {
6931 .buffer = &res->base,
6932 .buffer_offset = ssbo->buffer_offset,
6933 .buffer_size = ssbo->buffer_size,
6934 };
6935 iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
6936 (shs->writable_ssbos >> i) & 1);
6937 }
6938 }
6939 }
6940
6941 if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
6942 uint32_t bound_sampler_views = shs->bound_sampler_views;
6943 while (bound_sampler_views) {
6944 const int i = u_bit_scan(&bound_sampler_views);
6945 struct iris_sampler_view *isv = shs->textures[i];
6946 struct iris_bo *bo = isv->res->bo;
6947
6948 if (update_surface_state_addrs(ice->state.surface_uploader,
6949 &isv->surface_state, bo)) {
6950 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
6951 }
6952 }
6953 }
6954
6955 if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
6956 uint32_t bound_image_views = shs->bound_image_views;
6957 while (bound_image_views) {
6958 const int i = u_bit_scan(&bound_image_views);
6959 struct iris_image_view *iv = &shs->image[i];
6960 struct iris_bo *bo = iris_resource_bo(iv->base.resource);
6961
6962 if (update_surface_state_addrs(ice->state.surface_uploader,
6963 &iv->surface_state, bo)) {
6964 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
6965 }
6966 }
6967 }
6968 }
6969 }
6970
6971 /* ------------------------------------------------------------------- */
6972
6973 /**
6974 * Introduce a batch synchronization boundary, and update its cache coherency
6975 * status to reflect the execution of a PIPE_CONTROL command with the
6976 * specified flags.
6977 */
6978 static void
6979 batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
6980 {
6981 iris_batch_sync_boundary(batch);
6982
6983 if ((flags & PIPE_CONTROL_CS_STALL)) {
6984 if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
6985 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
6986
6987 if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
6988 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
6989
6990 if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
6991 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
6992
6993 if ((flags & (PIPE_CONTROL_CACHE_FLUSH_BITS |
6994 PIPE_CONTROL_STALL_AT_SCOREBOARD)))
6995 iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_READ);
6996 }
6997
6998 if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
6999 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7000
7001 if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7002 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7003
7004 if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7005 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7006
7007 if ((flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) &&
7008 (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE))
7009 iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_READ);
7010 }
7011
7012 static unsigned
7013 flags_to_post_sync_op(uint32_t flags)
7014 {
7015 if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
7016 return WriteImmediateData;
7017
7018 if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
7019 return WritePSDepthCount;
7020
7021 if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
7022 return WriteTimestamp;
7023
7024 return 0;
7025 }
7026
7027 /**
7028 * Do the given flags have a Post Sync or LRI Post Sync operation?
7029 */
7030 static enum pipe_control_flags
7031 get_post_sync_flags(enum pipe_control_flags flags)
7032 {
7033 flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
7034 PIPE_CONTROL_WRITE_DEPTH_COUNT |
7035 PIPE_CONTROL_WRITE_TIMESTAMP |
7036 PIPE_CONTROL_LRI_POST_SYNC_OP;
7037
7038 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7039 * "LRI Post Sync Operation". So more than one bit set would be illegal.
7040 */
7041 assert(util_bitcount(flags) <= 1);
7042
7043 return flags;
7044 }
7045
7046 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7047
7048 /**
7049 * Emit a series of PIPE_CONTROL commands, taking into account any
7050 * workarounds necessary to actually accomplish the caller's request.
7051 *
7052 * Unless otherwise noted, spec quotations in this function come from:
7053 *
7054 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7055 * Restrictions for PIPE_CONTROL.
7056 *
7057 * You should not use this function directly. Use the helpers in
7058 * iris_pipe_control.c instead, which may split the pipe control further.
7059 */
7060 static void
7061 iris_emit_raw_pipe_control(struct iris_batch *batch,
7062 const char *reason,
7063 uint32_t flags,
7064 struct iris_bo *bo,
7065 uint32_t offset,
7066 uint64_t imm)
7067 {
7068 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
7069 enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
7070 enum pipe_control_flags non_lri_post_sync_flags =
7071 post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
7072
7073 /* Recursive PIPE_CONTROL workarounds --------------------------------
7074 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7075 *
7076 * We do these first because we want to look at the original operation,
7077 * rather than any workarounds we set.
7078 */
7079 if (GEN_GEN == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
7080 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7081 * lists several workarounds:
7082 *
7083 * "Project: SKL, KBL, BXT
7084 *
7085 * If the VF Cache Invalidation Enable is set to a 1 in a
7086 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7087 * sets to 0, with the VF Cache Invalidation Enable set to 0
7088 * needs to be sent prior to the PIPE_CONTROL with VF Cache
7089 * Invalidation Enable set to a 1."
7090 */
7091 iris_emit_raw_pipe_control(batch,
7092 "workaround: recursive VF cache invalidate",
7093 0, NULL, 0, 0);
7094 }
7095
7096 /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
7097 * invalidates the instruction cache
7098 */
7099 if (GEN_GEN == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
7100 iris_emit_raw_pipe_control(batch,
7101 "workaround: CS stall before instruction "
7102 "cache invalidate",
7103 PIPE_CONTROL_CS_STALL |
7104 PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
7105 imm);
7106 }
7107
7108 if ((GEN_GEN == 9 || (GEN_GEN == 12 && devinfo->revision == 0 /* A0*/)) &&
7109 IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
7110 /* Project: SKL / Argument: LRI Post Sync Operation [23]
7111 *
7112 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7113 * programmed prior to programming a PIPECONTROL command with "LRI
7114 * Post Sync Operation" in GPGPU mode of operation (i.e when
7115 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
7116 *
7117 * The same text exists a few rows below for Post Sync Op.
7118 *
7119 * On Gen12 this is GEN:BUG:1607156449.
7120 */
7121 iris_emit_raw_pipe_control(batch,
7122 "workaround: CS stall before gpgpu post-sync",
7123 PIPE_CONTROL_CS_STALL, bo, offset, imm);
7124 }
7125
7126 /* "Flush Types" workarounds ---------------------------------------------
7127 * We do these now because they may add post-sync operations or CS stalls.
7128 */
7129
7130 if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
7131 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7132 *
7133 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7134 * 'Write PS Depth Count' or 'Write Timestamp'."
7135 */
7136 if (!bo) {
7137 flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7138 post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7139 non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7140 bo = batch->screen->workaround_address.bo;
7141 offset = batch->screen->workaround_address.offset;
7142 }
7143 }
7144
7145 if (flags & PIPE_CONTROL_DEPTH_STALL) {
7146 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7147 *
7148 * "This bit must be DISABLED for operations other than writing
7149 * PS_DEPTH_COUNT."
7150 *
7151 * This seems like nonsense. An Ivybridge workaround requires us to
7152 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7153 * operation. Gen8+ requires us to emit depth stalls and depth cache
7154 * flushes together. So, it's hard to imagine this means anything other
7155 * than "we originally intended this to be used for PS_DEPTH_COUNT".
7156 *
7157 * We ignore the supposed restriction and do nothing.
7158 */
7159 }
7160
7161 if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
7162 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7163 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7164 *
7165 * "This bit must be DISABLED for End-of-pipe (Read) fences,
7166 * PS_DEPTH_COUNT or TIMESTAMP queries."
7167 *
7168 * TODO: Implement end-of-pipe checking.
7169 */
7170 assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
7171 PIPE_CONTROL_WRITE_TIMESTAMP)));
7172 }
7173
7174 if (GEN_GEN < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7175 /* From the PIPE_CONTROL instruction table, bit 1:
7176 *
7177 * "This bit is ignored if Depth Stall Enable is set.
7178 * Further, the render cache is not flushed even if Write Cache
7179 * Flush Enable bit is set."
7180 *
7181 * We assert that the caller doesn't do this combination, to try and
7182 * prevent mistakes. It shouldn't hurt the GPU, though.
7183 *
7184 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
7185 * and "Render Target Flush" combo is explicitly required for BTI
7186 * update workarounds.
7187 */
7188 assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
7189 PIPE_CONTROL_RENDER_TARGET_FLUSH)));
7190 }
7191
7192 /* PIPE_CONTROL page workarounds ------------------------------------- */
7193
7194 if (GEN_GEN <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
7195 /* From the PIPE_CONTROL page itself:
7196 *
7197 * "IVB, HSW, BDW
7198 * Restriction: Pipe_control with CS-stall bit set must be issued
7199 * before a pipe-control command that has the State Cache
7200 * Invalidate bit set."
7201 */
7202 flags |= PIPE_CONTROL_CS_STALL;
7203 }
7204
7205 if (flags & PIPE_CONTROL_FLUSH_LLC) {
7206 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7207 *
7208 * "Project: ALL
7209 * SW must always program Post-Sync Operation to "Write Immediate
7210 * Data" when Flush LLC is set."
7211 *
7212 * For now, we just require the caller to do it.
7213 */
7214 assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
7215 }
7216
7217 /* "Post-Sync Operation" workarounds -------------------------------- */
7218
7219 /* Project: All / Argument: Global Snapshot Count Reset [19]
7220 *
7221 * "This bit must not be exercised on any product.
7222 * Requires stall bit ([20] of DW1) set."
7223 *
7224 * We don't use this, so we just assert that it isn't used. The
7225 * PIPE_CONTROL instruction page indicates that they intended this
7226 * as a debug feature and don't think it is useful in production,
7227 * but it may actually be usable, should we ever want to.
7228 */
7229 assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
7230
7231 if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
7232 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
7233 /* Project: All / Arguments:
7234 *
7235 * - Generic Media State Clear [16]
7236 * - Indirect State Pointers Disable [16]
7237 *
7238 * "Requires stall bit ([20] of DW1) set."
7239 *
7240 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7241 * State Clear) says:
7242 *
7243 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7244 * programmed prior to programming a PIPECONTROL command with "Media
7245 * State Clear" set in GPGPU mode of operation"
7246 *
7247 * This is a subset of the earlier rule, so there's nothing to do.
7248 */
7249 flags |= PIPE_CONTROL_CS_STALL;
7250 }
7251
7252 if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
7253 /* Project: All / Argument: Store Data Index
7254 *
7255 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7256 * than '0'."
7257 *
7258 * For now, we just assert that the caller does this. We might want to
7259 * automatically add a write to the workaround BO...
7260 */
7261 assert(non_lri_post_sync_flags != 0);
7262 }
7263
7264 if (flags & PIPE_CONTROL_SYNC_GFDT) {
7265 /* Project: All / Argument: Sync GFDT
7266 *
7267 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7268 * than '0' or 0x2520[13] must be set."
7269 *
7270 * For now, we just assert that the caller does this.
7271 */
7272 assert(non_lri_post_sync_flags != 0);
7273 }
7274
7275 if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
7276 /* Project: IVB+ / Argument: TLB inv
7277 *
7278 * "Requires stall bit ([20] of DW1) set."
7279 *
7280 * Also, from the PIPE_CONTROL instruction table:
7281 *
7282 * "Project: SKL+
7283 * Post Sync Operation or CS stall must be set to ensure a TLB
7284 * invalidation occurs. Otherwise no cycle will occur to the TLB
7285 * cache to invalidate."
7286 *
7287 * This is not a subset of the earlier rule, so there's nothing to do.
7288 */
7289 flags |= PIPE_CONTROL_CS_STALL;
7290 }
7291
7292 if (GEN_GEN >= 12 && ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ||
7293 (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))) {
7294 /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
7295 * Enable):
7296 *
7297 * Unified Cache (Tile Cache Disabled):
7298 *
7299 * When the Color and Depth (Z) streams are enabled to be cached in
7300 * the DC space of L2, Software must use "Render Target Cache Flush
7301 * Enable" and "Depth Cache Flush Enable" along with "Tile Cache
7302 * Flush" for getting the color and depth (Z) write data to be
7303 * globally observable. In this mode of operation it is not required
7304 * to set "CS Stall" upon setting "Tile Cache Flush" bit.
7305 */
7306 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
7307 }
7308
7309 if (GEN_GEN == 9 && devinfo->gt == 4) {
7310 /* TODO: The big Skylake GT4 post sync op workaround */
7311 }
7312
7313 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7314
7315 if (IS_COMPUTE_PIPELINE(batch)) {
7316 if (GEN_GEN >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
7317 /* Project: SKL+ / Argument: Tex Invalidate
7318 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7319 */
7320 flags |= PIPE_CONTROL_CS_STALL;
7321 }
7322
7323 if (GEN_GEN == 8 && (post_sync_flags ||
7324 (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
7325 PIPE_CONTROL_DEPTH_STALL |
7326 PIPE_CONTROL_RENDER_TARGET_FLUSH |
7327 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7328 PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
7329 /* Project: BDW / Arguments:
7330 *
7331 * - LRI Post Sync Operation [23]
7332 * - Post Sync Op [15:14]
7333 * - Notify En [8]
7334 * - Depth Stall [13]
7335 * - Render Target Cache Flush [12]
7336 * - Depth Cache Flush [0]
7337 * - DC Flush Enable [5]
7338 *
7339 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
7340 * Workloads."
7341 */
7342 flags |= PIPE_CONTROL_CS_STALL;
7343
7344 /* Also, from the PIPE_CONTROL instruction table, bit 20:
7345 *
7346 * "Project: BDW
7347 * This bit must be always set when PIPE_CONTROL command is
7348 * programmed by GPGPU and MEDIA workloads, except for the cases
7349 * when only Read Only Cache Invalidation bits are set (State
7350 * Cache Invalidation Enable, Instruction cache Invalidation
7351 * Enable, Texture Cache Invalidation Enable, Constant Cache
7352 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
7353 * need not implemented when FF_DOP_CG is disable via "Fixed
7354 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7355 *
7356 * It sounds like we could avoid CS stalls in some cases, but we
7357 * don't currently bother. This list isn't exactly the list above,
7358 * either...
7359 */
7360 }
7361 }
7362
7363 /* "Stall" workarounds ----------------------------------------------
7364 * These have to come after the earlier ones because we may have added
7365 * some additional CS stalls above.
7366 */
7367
7368 if (GEN_GEN < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
7369 /* Project: PRE-SKL, VLV, CHV
7370 *
7371 * "[All Stepping][All SKUs]:
7372 *
7373 * One of the following must also be set:
7374 *
7375 * - Render Target Cache Flush Enable ([12] of DW1)
7376 * - Depth Cache Flush Enable ([0] of DW1)
7377 * - Stall at Pixel Scoreboard ([1] of DW1)
7378 * - Depth Stall ([13] of DW1)
7379 * - Post-Sync Operation ([13] of DW1)
7380 * - DC Flush Enable ([5] of DW1)"
7381 *
7382 * If we don't already have one of those bits set, we choose to add
7383 * "Stall at Pixel Scoreboard". Some of the other bits require a
7384 * CS stall as a workaround (see above), which would send us into
7385 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
7386 * appears to be safe, so we choose that.
7387 */
7388 const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
7389 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7390 PIPE_CONTROL_WRITE_IMMEDIATE |
7391 PIPE_CONTROL_WRITE_DEPTH_COUNT |
7392 PIPE_CONTROL_WRITE_TIMESTAMP |
7393 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7394 PIPE_CONTROL_DEPTH_STALL |
7395 PIPE_CONTROL_DATA_CACHE_FLUSH;
7396 if (!(flags & wa_bits))
7397 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
7398 }
7399
7400 if (GEN_GEN >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
7401 /* GEN:BUG:1409600907:
7402 *
7403 * "PIPE_CONTROL with Depth Stall Enable bit must be set
7404 * with any PIPE_CONTROL with Depth Flush Enable bit set.
7405 */
7406 flags |= PIPE_CONTROL_DEPTH_STALL;
7407 }
7408
7409 /* Emit --------------------------------------------------------------- */
7410
7411 if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
7412 fprintf(stderr,
7413 " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
7414 (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
7415 (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
7416 (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
7417 (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
7418 (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
7419 (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
7420 (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
7421 (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
7422 (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
7423 (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
7424 (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
7425 (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
7426 (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
7427 (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
7428 (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
7429 (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
7430 "SnapRes" : "",
7431 (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
7432 "ISPDis" : "",
7433 (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
7434 (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
7435 (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
7436 (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
7437 imm, reason);
7438 }
7439
7440 batch_mark_sync_for_pipe_control(batch, flags);
7441 iris_batch_sync_region_start(batch);
7442
7443 iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
7444 #if GEN_GEN >= 12
7445 pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
7446 #endif
7447 #if GEN_GEN >= 11
7448 pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
7449 #endif
7450 pc.LRIPostSyncOperation = NoLRIOperation;
7451 pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
7452 pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
7453 pc.StoreDataIndex = 0;
7454 pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
7455 pc.GlobalSnapshotCountReset =
7456 flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
7457 pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
7458 pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
7459 pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
7460 pc.RenderTargetCacheFlushEnable =
7461 flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
7462 pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
7463 pc.StateCacheInvalidationEnable =
7464 flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
7465 pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
7466 pc.ConstantCacheInvalidationEnable =
7467 flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
7468 pc.PostSyncOperation = flags_to_post_sync_op(flags);
7469 pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
7470 pc.InstructionCacheInvalidateEnable =
7471 flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
7472 pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
7473 pc.IndirectStatePointersDisable =
7474 flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
7475 pc.TextureCacheInvalidationEnable =
7476 flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
7477 pc.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
7478 pc.ImmediateData = imm;
7479 }
7480
7481 iris_batch_sync_region_end(batch);
7482 }
7483
7484 #if GEN_GEN == 9
7485 /**
7486 * Preemption on Gen9 has to be enabled or disabled in various cases.
7487 *
7488 * See these workarounds for preemption:
7489 * - WaDisableMidObjectPreemptionForGSLineStripAdj
7490 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
7491 * - WaDisableMidObjectPreemptionForLineLoop
7492 * - WA#0798
7493 *
7494 * We don't put this in the vtable because it's only used on Gen9.
7495 */
7496 void
7497 gen9_toggle_preemption(struct iris_context *ice,
7498 struct iris_batch *batch,
7499 const struct pipe_draw_info *draw)
7500 {
7501 struct iris_genx_state *genx = ice->state.genx;
7502 bool object_preemption = true;
7503
7504 /* WaDisableMidObjectPreemptionForGSLineStripAdj
7505 *
7506 * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7507 * and GS is enabled."
7508 */
7509 if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
7510 ice->shaders.prog[MESA_SHADER_GEOMETRY])
7511 object_preemption = false;
7512
7513 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7514 *
7515 * "TriFan miscompare in Execlist Preemption test. Cut index that is
7516 * on a previous context. End the previous, the resume another context
7517 * with a tri-fan or polygon, and the vertex count is corrupted. If we
7518 * prempt again we will cause corruption.
7519 *
7520 * WA: Disable mid-draw preemption when draw-call has a tri-fan."
7521 */
7522 if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
7523 object_preemption = false;
7524
7525 /* WaDisableMidObjectPreemptionForLineLoop
7526 *
7527 * "VF Stats Counters Missing a vertex when preemption enabled.
7528 *
7529 * WA: Disable mid-draw preemption when the draw uses a lineloop
7530 * topology."
7531 */
7532 if (draw->mode == PIPE_PRIM_LINE_LOOP)
7533 object_preemption = false;
7534
7535 /* WA#0798
7536 *
7537 * "VF is corrupting GAFS data when preempted on an instance boundary
7538 * and replayed with instancing enabled.
7539 *
7540 * WA: Disable preemption when using instanceing."
7541 */
7542 if (draw->instance_count > 1)
7543 object_preemption = false;
7544
7545 if (genx->object_preemption != object_preemption) {
7546 iris_enable_obj_preemption(batch, object_preemption);
7547 genx->object_preemption = object_preemption;
7548 }
7549 }
7550 #endif
7551
7552 static void
7553 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
7554 {
7555 struct iris_genx_state *genx = ice->state.genx;
7556
7557 memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
7558 }
7559
7560 static void
7561 iris_emit_mi_report_perf_count(struct iris_batch *batch,
7562 struct iris_bo *bo,
7563 uint32_t offset_in_bytes,
7564 uint32_t report_id)
7565 {
7566 iris_batch_sync_region_start(batch);
7567 iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
7568 mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes,
7569 IRIS_DOMAIN_OTHER_WRITE);
7570 mi_rpc.ReportID = report_id;
7571 }
7572 iris_batch_sync_region_end(batch);
7573 }
7574
7575 /**
7576 * Update the pixel hashing modes that determine the balancing of PS threads
7577 * across subslices and slices.
7578 *
7579 * \param width Width bound of the rendering area (already scaled down if \p
7580 * scale is greater than 1).
7581 * \param height Height bound of the rendering area (already scaled down if \p
7582 * scale is greater than 1).
7583 * \param scale The number of framebuffer samples that could potentially be
7584 * affected by an individual channel of the PS thread. This is
7585 * typically one for single-sampled rendering, but for operations
7586 * like CCS resolves and fast clears a single PS invocation may
7587 * update a huge number of pixels, in which case a finer
7588 * balancing is desirable in order to maximally utilize the
7589 * bandwidth available. UINT_MAX can be used as shorthand for
7590 * "finest hashing mode available".
7591 */
7592 void
7593 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
7594 unsigned width, unsigned height, unsigned scale)
7595 {
7596 #if GEN_GEN == 9
7597 const struct gen_device_info *devinfo = &batch->screen->devinfo;
7598 const unsigned slice_hashing[] = {
7599 /* Because all Gen9 platforms with more than one slice require
7600 * three-way subslice hashing, a single "normal" 16x16 slice hashing
7601 * block is guaranteed to suffer from substantial imbalance, with one
7602 * subslice receiving twice as much work as the other two in the
7603 * slice.
7604 *
7605 * The performance impact of that would be particularly severe when
7606 * three-way hashing is also in use for slice balancing (which is the
7607 * case for all Gen9 GT4 platforms), because one of the slices
7608 * receives one every three 16x16 blocks in either direction, which
7609 * is roughly the periodicity of the underlying subslice imbalance
7610 * pattern ("roughly" because in reality the hardware's
7611 * implementation of three-way hashing doesn't do exact modulo 3
7612 * arithmetic, which somewhat decreases the magnitude of this effect
7613 * in practice). This leads to a systematic subslice imbalance
7614 * within that slice regardless of the size of the primitive. The
7615 * 32x32 hashing mode guarantees that the subslice imbalance within a
7616 * single slice hashing block is minimal, largely eliminating this
7617 * effect.
7618 */
7619 _32x32,
7620 /* Finest slice hashing mode available. */
7621 NORMAL
7622 };
7623 const unsigned subslice_hashing[] = {
7624 /* 16x16 would provide a slight cache locality benefit especially
7625 * visible in the sampler L1 cache efficiency of low-bandwidth
7626 * non-LLC platforms, but it comes at the cost of greater subslice
7627 * imbalance for primitives of dimensions approximately intermediate
7628 * between 16x4 and 16x16.
7629 */
7630 _16x4,
7631 /* Finest subslice hashing mode available. */
7632 _8x4
7633 };
7634 /* Dimensions of the smallest hashing block of a given hashing mode. If
7635 * the rendering area is smaller than this there can't possibly be any
7636 * benefit from switching to this mode, so we optimize out the
7637 * transition.
7638 */
7639 const unsigned min_size[][2] = {
7640 { 16, 4 },
7641 { 8, 4 }
7642 };
7643 const unsigned idx = scale > 1;
7644
7645 if (width > min_size[idx][0] || height > min_size[idx][1]) {
7646 uint32_t gt_mode;
7647
7648 iris_pack_state(GENX(GT_MODE), &gt_mode, reg) {
7649 reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
7650 reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
7651 reg.SubsliceHashing = subslice_hashing[idx];
7652 reg.SubsliceHashingMask = -1;
7653 };
7654
7655 iris_emit_raw_pipe_control(batch,
7656 "workaround: CS stall before GT_MODE LRI",
7657 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7658 PIPE_CONTROL_CS_STALL,
7659 NULL, 0, 0);
7660
7661 iris_emit_lri(batch, GT_MODE, gt_mode);
7662
7663 ice->state.current_hash_scale = scale;
7664 }
7665 #endif
7666 }
7667
7668 static void
7669 iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
7670 {
7671 struct iris_context *ice = (struct iris_context *) ctx;
7672
7673 if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
7674 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
7675 ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
7676 }
7677
7678 if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
7679 ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
7680 ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
7681 }
7682 }
7683
7684 void
7685 genX(init_state)(struct iris_context *ice)
7686 {
7687 struct pipe_context *ctx = &ice->ctx;
7688 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
7689
7690 ctx->create_blend_state = iris_create_blend_state;
7691 ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
7692 ctx->create_rasterizer_state = iris_create_rasterizer_state;
7693 ctx->create_sampler_state = iris_create_sampler_state;
7694 ctx->create_sampler_view = iris_create_sampler_view;
7695 ctx->create_surface = iris_create_surface;
7696 ctx->create_vertex_elements_state = iris_create_vertex_elements;
7697 ctx->bind_blend_state = iris_bind_blend_state;
7698 ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
7699 ctx->bind_sampler_states = iris_bind_sampler_states;
7700 ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
7701 ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
7702 ctx->delete_blend_state = iris_delete_state;
7703 ctx->delete_depth_stencil_alpha_state = iris_delete_state;
7704 ctx->delete_rasterizer_state = iris_delete_state;
7705 ctx->delete_sampler_state = iris_delete_state;
7706 ctx->delete_vertex_elements_state = iris_delete_state;
7707 ctx->set_blend_color = iris_set_blend_color;
7708 ctx->set_clip_state = iris_set_clip_state;
7709 ctx->set_constant_buffer = iris_set_constant_buffer;
7710 ctx->set_shader_buffers = iris_set_shader_buffers;
7711 ctx->set_shader_images = iris_set_shader_images;
7712 ctx->set_sampler_views = iris_set_sampler_views;
7713 ctx->set_tess_state = iris_set_tess_state;
7714 ctx->set_framebuffer_state = iris_set_framebuffer_state;
7715 ctx->set_polygon_stipple = iris_set_polygon_stipple;
7716 ctx->set_sample_mask = iris_set_sample_mask;
7717 ctx->set_scissor_states = iris_set_scissor_states;
7718 ctx->set_stencil_ref = iris_set_stencil_ref;
7719 ctx->set_vertex_buffers = iris_set_vertex_buffers;
7720 ctx->set_viewport_states = iris_set_viewport_states;
7721 ctx->sampler_view_destroy = iris_sampler_view_destroy;
7722 ctx->surface_destroy = iris_surface_destroy;
7723 ctx->draw_vbo = iris_draw_vbo;
7724 ctx->launch_grid = iris_launch_grid;
7725 ctx->create_stream_output_target = iris_create_stream_output_target;
7726 ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
7727 ctx->set_stream_output_targets = iris_set_stream_output_targets;
7728 ctx->set_frontend_noop = iris_set_frontend_noop;
7729
7730 screen->vtbl.destroy_state = iris_destroy_state;
7731 screen->vtbl.init_render_context = iris_init_render_context;
7732 screen->vtbl.init_compute_context = iris_init_compute_context;
7733 screen->vtbl.upload_render_state = iris_upload_render_state;
7734 screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
7735 screen->vtbl.upload_compute_state = iris_upload_compute_state;
7736 screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
7737 screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
7738 screen->vtbl.rebind_buffer = iris_rebind_buffer;
7739 screen->vtbl.load_register_reg32 = iris_load_register_reg32;
7740 screen->vtbl.load_register_reg64 = iris_load_register_reg64;
7741 screen->vtbl.load_register_imm32 = iris_load_register_imm32;
7742 screen->vtbl.load_register_imm64 = iris_load_register_imm64;
7743 screen->vtbl.load_register_mem32 = iris_load_register_mem32;
7744 screen->vtbl.load_register_mem64 = iris_load_register_mem64;
7745 screen->vtbl.store_register_mem32 = iris_store_register_mem32;
7746 screen->vtbl.store_register_mem64 = iris_store_register_mem64;
7747 screen->vtbl.store_data_imm32 = iris_store_data_imm32;
7748 screen->vtbl.store_data_imm64 = iris_store_data_imm64;
7749 screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
7750 screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
7751 screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
7752 screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
7753 screen->vtbl.populate_vs_key = iris_populate_vs_key;
7754 screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
7755 screen->vtbl.populate_tes_key = iris_populate_tes_key;
7756 screen->vtbl.populate_gs_key = iris_populate_gs_key;
7757 screen->vtbl.populate_fs_key = iris_populate_fs_key;
7758 screen->vtbl.populate_cs_key = iris_populate_cs_key;
7759 screen->vtbl.lost_genx_state = iris_lost_genx_state;
7760
7761 ice->state.dirty = ~0ull;
7762 ice->state.stage_dirty = ~0ull;
7763
7764 ice->state.statistics_counters_enabled = true;
7765
7766 ice->state.sample_mask = 0xffff;
7767 ice->state.num_viewports = 1;
7768 ice->state.prim_mode = PIPE_PRIM_MAX;
7769 ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
7770 ice->draw.derived_params.drawid = -1;
7771
7772 /* Make a 1x1x1 null surface for unbound textures */
7773 void *null_surf_map =
7774 upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
7775 4 * GENX(RENDER_SURFACE_STATE_length), 64);
7776 isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
7777 ice->state.unbound_tex.offset +=
7778 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
7779
7780 /* Default all scissor rectangles to be empty regions. */
7781 for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
7782 ice->state.scissors[i] = (struct pipe_scissor_state) {
7783 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
7784 };
7785 }
7786 }