iris: Rename iris_syncpt to iris_syncobj for clarity.
[mesa.git] / src / gallium / drivers / iris / iris_state.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_state.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * This is the main state upload code.
31 *
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
37 *
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
44 *
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
51 *
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
55 *
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
60 *
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
66 *
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
71 */
72
73 #include <stdio.h>
74 #include <errno.h>
75
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifdef DEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
100 #include "nir.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/gen_aux_map.h"
103 #include "intel/common/gen_l3_config.h"
104 #include "intel/common/gen_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
110
111 #include "iris_genx_macros.h"
112 #include "intel/common/gen_guardband.h"
113
114 /**
115 * Statically assert that PIPE_* enums match the hardware packets.
116 * (As long as they match, we don't need to translate them.)
117 */
118 UNUSED static void pipe_asserts()
119 {
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
121
122 /* pipe_logicop happens to match the hardware. */
123 PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
124 PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
125 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
126 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
127 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
128 PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
129 PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
130 PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
131 PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
132 PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
133 PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
134 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
135 PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
136 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
137 PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
138 PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
139
140 /* pipe_blend_func happens to match the hardware. */
141 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
142 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
143 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
144 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
145 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
146 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
147 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
148 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
149 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
150 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
151 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
152 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
153 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
154 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
155 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
156 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
157 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
158 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
159 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
160
161 /* pipe_blend_func happens to match the hardware. */
162 PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
163 PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
164 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
165 PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
166 PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
167
168 /* pipe_stencil_op happens to match the hardware. */
169 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
170 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
171 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
172 PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
173 PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
174 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
175 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
176 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
177
178 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
180 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
181 #undef PIPE_ASSERT
182 }
183
184 static unsigned
185 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
186 {
187 static const unsigned map[] = {
188 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
189 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
190 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
191 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
192 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
193 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
194 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
195 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
196 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
197 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
198 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
199 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
200 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
201 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
202 [PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
203 };
204
205 return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
206 }
207
208 static unsigned
209 translate_compare_func(enum pipe_compare_func pipe_func)
210 {
211 static const unsigned map[] = {
212 [PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
213 [PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
214 [PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
215 [PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
216 [PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
217 [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
218 [PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
219 [PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
220 };
221 return map[pipe_func];
222 }
223
224 static unsigned
225 translate_shadow_func(enum pipe_compare_func pipe_func)
226 {
227 /* Gallium specifies the result of shadow comparisons as:
228 *
229 * 1 if ref <op> texel,
230 * 0 otherwise.
231 *
232 * The hardware does:
233 *
234 * 0 if texel <op> ref,
235 * 1 otherwise.
236 *
237 * So we need to flip the operator and also negate.
238 */
239 static const unsigned map[] = {
240 [PIPE_FUNC_NEVER] = PREFILTEROPALWAYS,
241 [PIPE_FUNC_LESS] = PREFILTEROPLEQUAL,
242 [PIPE_FUNC_EQUAL] = PREFILTEROPNOTEQUAL,
243 [PIPE_FUNC_LEQUAL] = PREFILTEROPLESS,
244 [PIPE_FUNC_GREATER] = PREFILTEROPGEQUAL,
245 [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
246 [PIPE_FUNC_GEQUAL] = PREFILTEROPGREATER,
247 [PIPE_FUNC_ALWAYS] = PREFILTEROPNEVER,
248 };
249 return map[pipe_func];
250 }
251
252 static unsigned
253 translate_cull_mode(unsigned pipe_face)
254 {
255 static const unsigned map[4] = {
256 [PIPE_FACE_NONE] = CULLMODE_NONE,
257 [PIPE_FACE_FRONT] = CULLMODE_FRONT,
258 [PIPE_FACE_BACK] = CULLMODE_BACK,
259 [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
260 };
261 return map[pipe_face];
262 }
263
264 static unsigned
265 translate_fill_mode(unsigned pipe_polymode)
266 {
267 static const unsigned map[4] = {
268 [PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
269 [PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
270 [PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
271 [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
272 };
273 return map[pipe_polymode];
274 }
275
276 static unsigned
277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
278 {
279 static const unsigned map[] = {
280 [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
281 [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
282 [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
283 };
284 return map[pipe_mip];
285 }
286
287 static uint32_t
288 translate_wrap(unsigned pipe_wrap)
289 {
290 static const unsigned map[] = {
291 [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
292 [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
293 [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
294 [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
295 [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
296 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
297
298 /* These are unsupported. */
299 [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
300 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
301 };
302 return map[pipe_wrap];
303 }
304
305 /**
306 * Allocate space for some indirect state.
307 *
308 * Return a pointer to the map (to fill it out) and a state ref (for
309 * referring to the state in GPU commands).
310 */
311 static void *
312 upload_state(struct u_upload_mgr *uploader,
313 struct iris_state_ref *ref,
314 unsigned size,
315 unsigned alignment)
316 {
317 void *p = NULL;
318 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
319 return p;
320 }
321
322 /**
323 * Stream out temporary/short-lived state.
324 *
325 * This allocates space, pins the BO, and includes the BO address in the
326 * returned offset (which works because all state lives in 32-bit memory
327 * zones).
328 */
329 static uint32_t *
330 stream_state(struct iris_batch *batch,
331 struct u_upload_mgr *uploader,
332 struct pipe_resource **out_res,
333 unsigned size,
334 unsigned alignment,
335 uint32_t *out_offset)
336 {
337 void *ptr = NULL;
338
339 u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
340
341 struct iris_bo *bo = iris_resource_bo(*out_res);
342 iris_use_pinned_bo(batch, bo, false);
343
344 iris_record_state_size(batch->state_sizes,
345 bo->gtt_offset + *out_offset, size);
346
347 *out_offset += iris_bo_offset_from_base_address(bo);
348
349 return ptr;
350 }
351
352 /**
353 * stream_state() + memcpy.
354 */
355 static uint32_t
356 emit_state(struct iris_batch *batch,
357 struct u_upload_mgr *uploader,
358 struct pipe_resource **out_res,
359 const void *data,
360 unsigned size,
361 unsigned alignment)
362 {
363 unsigned offset = 0;
364 uint32_t *map =
365 stream_state(batch, uploader, out_res, size, alignment, &offset);
366
367 if (map)
368 memcpy(map, data, size);
369
370 return offset;
371 }
372
373 /**
374 * Did field 'x' change between 'old_cso' and 'new_cso'?
375 *
376 * (If so, we may want to set some dirty flags.)
377 */
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
381
382 static void
383 flush_before_state_base_change(struct iris_batch *batch)
384 {
385 const struct gen_device_info *devinfo = &batch->screen->devinfo;
386
387 /* Flush before emitting STATE_BASE_ADDRESS.
388 *
389 * This isn't documented anywhere in the PRM. However, it seems to be
390 * necessary prior to changing the surface state base adress. We've
391 * seen issues in Vulkan where we get GPU hangs when using multi-level
392 * command buffers which clear depth, reset state base address, and then
393 * go render stuff.
394 *
395 * Normally, in GL, we would trust the kernel to do sufficient stalls
396 * and flushes prior to executing our batch. However, it doesn't seem
397 * as if the kernel's flushing is always sufficient and we don't want to
398 * rely on it.
399 *
400 * We make this an end-of-pipe sync instead of a normal flush because we
401 * do not know the current status of the GPU. On Haswell at least,
402 * having a fast-clear operation in flight at the same time as a normal
403 * rendering operation can cause hangs. Since the kernel's flushing is
404 * insufficient, we need to ensure that any rendering operations from
405 * other processes are definitely complete before we try to do our own
406 * rendering. It's a bit of a big hammer but it appears to work.
407 */
408 iris_emit_end_of_pipe_sync(batch,
409 "change STATE_BASE_ADDRESS (flushes)",
410 PIPE_CONTROL_RENDER_TARGET_FLUSH |
411 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
412 PIPE_CONTROL_DATA_CACHE_FLUSH |
413 /* GEN:BUG:1606662791:
414 *
415 * Software must program PIPE_CONTROL command
416 * with "HDC Pipeline Flush" prior to
417 * programming of the below two non-pipeline
418 * state :
419 * * STATE_BASE_ADDRESS
420 * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
421 */
422 ((GEN_GEN == 12 && devinfo->revision == 0 /* A0 */ ?
423 PIPE_CONTROL_FLUSH_HDC : 0)));
424 }
425
426 static void
427 flush_after_state_base_change(struct iris_batch *batch)
428 {
429 /* After re-setting the surface state base address, we have to do some
430 * cache flusing so that the sampler engine will pick up the new
431 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432 * Shared Function > 3D Sampler > State > State Caching (page 96):
433 *
434 * Coherency with system memory in the state cache, like the texture
435 * cache is handled partially by software. It is expected that the
436 * command stream or shader will issue Cache Flush operation or
437 * Cache_Flush sampler message to ensure that the L1 cache remains
438 * coherent with system memory.
439 *
440 * [...]
441 *
442 * Whenever the value of the Dynamic_State_Base_Addr,
443 * Surface_State_Base_Addr are altered, the L1 state cache must be
444 * invalidated to ensure the new surface or sampler state is fetched
445 * from system memory.
446 *
447 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448 * which, according the PIPE_CONTROL instruction documentation in the
449 * Broadwell PRM:
450 *
451 * Setting this bit is independent of any other bit in this packet.
452 * This bit controls the invalidation of the L1 and L2 state caches
453 * at the top of the pipe i.e. at the parsing time.
454 *
455 * Unfortunately, experimentation seems to indicate that state cache
456 * invalidation through a PIPE_CONTROL does nothing whatsoever in
457 * regards to surface state and binding tables. In stead, it seems that
458 * invalidating the texture cache is what is actually needed.
459 *
460 * XXX: As far as we have been able to determine through
461 * experimentation, shows that flush the texture cache appears to be
462 * sufficient. The theory here is that all of the sampling/rendering
463 * units cache the binding table in the texture cache. However, we have
464 * yet to be able to actually confirm this.
465 */
466 iris_emit_end_of_pipe_sync(batch,
467 "change STATE_BASE_ADDRESS (invalidates)",
468 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
469 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
470 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
471 }
472
473 static void
474 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
475 {
476 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
477 lri.RegisterOffset = reg;
478 lri.DataDWord = val;
479 }
480 }
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
482
483 static void
484 _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
485 {
486 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
487 lrr.SourceRegisterAddress = src;
488 lrr.DestinationRegisterAddress = dst;
489 }
490 }
491
492 static void
493 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
494 uint32_t src)
495 {
496 _iris_emit_lrr(batch, dst, src);
497 }
498
499 static void
500 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
501 uint32_t src)
502 {
503 _iris_emit_lrr(batch, dst, src);
504 _iris_emit_lrr(batch, dst + 4, src + 4);
505 }
506
507 static void
508 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
509 uint32_t val)
510 {
511 _iris_emit_lri(batch, reg, val);
512 }
513
514 static void
515 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
516 uint64_t val)
517 {
518 _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
519 _iris_emit_lri(batch, reg + 4, val >> 32);
520 }
521
522 /**
523 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
524 */
525 static void
526 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
527 struct iris_bo *bo, uint32_t offset)
528 {
529 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
530 lrm.RegisterAddress = reg;
531 lrm.MemoryAddress = ro_bo(bo, offset);
532 }
533 }
534
535 /**
536 * Load a 64-bit value from a buffer into a MMIO register via
537 * two MI_LOAD_REGISTER_MEM commands.
538 */
539 static void
540 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
541 struct iris_bo *bo, uint32_t offset)
542 {
543 iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
544 iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
545 }
546
547 static void
548 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
549 struct iris_bo *bo, uint32_t offset,
550 bool predicated)
551 {
552 iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
553 srm.RegisterAddress = reg;
554 srm.MemoryAddress = rw_bo(bo, offset);
555 srm.PredicateEnable = predicated;
556 }
557 }
558
559 static void
560 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
561 struct iris_bo *bo, uint32_t offset,
562 bool predicated)
563 {
564 iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
565 iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
566 }
567
568 static void
569 iris_store_data_imm32(struct iris_batch *batch,
570 struct iris_bo *bo, uint32_t offset,
571 uint32_t imm)
572 {
573 iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
574 sdi.Address = rw_bo(bo, offset);
575 sdi.ImmediateData = imm;
576 }
577 }
578
579 static void
580 iris_store_data_imm64(struct iris_batch *batch,
581 struct iris_bo *bo, uint32_t offset,
582 uint64_t imm)
583 {
584 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
585 * 2 in genxml but it's actually variable length and we need 5 DWords.
586 */
587 void *map = iris_get_command_space(batch, 4 * 5);
588 _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
589 sdi.DWordLength = 5 - 2;
590 sdi.Address = rw_bo(bo, offset);
591 sdi.ImmediateData = imm;
592 }
593 }
594
595 static void
596 iris_copy_mem_mem(struct iris_batch *batch,
597 struct iris_bo *dst_bo, uint32_t dst_offset,
598 struct iris_bo *src_bo, uint32_t src_offset,
599 unsigned bytes)
600 {
601 /* MI_COPY_MEM_MEM operates on DWords. */
602 assert(bytes % 4 == 0);
603 assert(dst_offset % 4 == 0);
604 assert(src_offset % 4 == 0);
605
606 for (unsigned i = 0; i < bytes; i += 4) {
607 iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
608 cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
609 cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
610 }
611 }
612 }
613
614 static void
615 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
616 {
617 #if GEN_GEN >= 8 && GEN_GEN < 10
618 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
619 *
620 * Software must clear the COLOR_CALC_STATE Valid field in
621 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
622 * with Pipeline Select set to GPGPU.
623 *
624 * The internal hardware docs recommend the same workaround for Gen9
625 * hardware too.
626 */
627 if (pipeline == GPGPU)
628 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
629 #endif
630
631
632 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
633 * PIPELINE_SELECT [DevBWR+]":
634 *
635 * "Project: DEVSNB+
636 *
637 * Software must ensure all the write caches are flushed through a
638 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
639 * command to invalidate read only caches prior to programming
640 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
641 */
642 iris_emit_pipe_control_flush(batch,
643 "workaround: PIPELINE_SELECT flushes (1/2)",
644 PIPE_CONTROL_RENDER_TARGET_FLUSH |
645 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
646 PIPE_CONTROL_DATA_CACHE_FLUSH |
647 PIPE_CONTROL_CS_STALL);
648
649 iris_emit_pipe_control_flush(batch,
650 "workaround: PIPELINE_SELECT flushes (2/2)",
651 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
652 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
653 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
654 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
655
656 iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
657 #if GEN_GEN >= 9
658 sel.MaskBits = 3;
659 #endif
660 sel.PipelineSelection = pipeline;
661 }
662 }
663
664 UNUSED static void
665 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
666 {
667 #if GEN_GEN == 9
668 /* Project: DevGLK
669 *
670 * "This chicken bit works around a hardware issue with barrier
671 * logic encountered when switching between GPGPU and 3D pipelines.
672 * To workaround the issue, this mode bit should be set after a
673 * pipeline is selected."
674 */
675 uint32_t reg_val;
676 iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
677 reg.GLKBarrierMode = value;
678 reg.GLKBarrierModeMask = 1;
679 }
680 iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
681 #endif
682 }
683
684 static void
685 init_state_base_address(struct iris_batch *batch)
686 {
687 uint32_t mocs = batch->screen->isl_dev.mocs.internal;
688 flush_before_state_base_change(batch);
689
690 /* We program most base addresses once at context initialization time.
691 * Each base address points at a 4GB memory zone, and never needs to
692 * change. See iris_bufmgr.h for a description of the memory zones.
693 *
694 * The one exception is Surface State Base Address, which needs to be
695 * updated occasionally. See iris_binder.c for the details there.
696 */
697 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
698 sba.GeneralStateMOCS = mocs;
699 sba.StatelessDataPortAccessMOCS = mocs;
700 sba.DynamicStateMOCS = mocs;
701 sba.IndirectObjectMOCS = mocs;
702 sba.InstructionMOCS = mocs;
703 sba.SurfaceStateMOCS = mocs;
704
705 sba.GeneralStateBaseAddressModifyEnable = true;
706 sba.DynamicStateBaseAddressModifyEnable = true;
707 sba.IndirectObjectBaseAddressModifyEnable = true;
708 sba.InstructionBaseAddressModifyEnable = true;
709 sba.GeneralStateBufferSizeModifyEnable = true;
710 sba.DynamicStateBufferSizeModifyEnable = true;
711 #if (GEN_GEN >= 9)
712 sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
713 sba.BindlessSurfaceStateMOCS = mocs;
714 #endif
715 sba.IndirectObjectBufferSizeModifyEnable = true;
716 sba.InstructionBuffersizeModifyEnable = true;
717
718 sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
719 sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
720
721 sba.GeneralStateBufferSize = 0xfffff;
722 sba.IndirectObjectBufferSize = 0xfffff;
723 sba.InstructionBufferSize = 0xfffff;
724 sba.DynamicStateBufferSize = 0xfffff;
725 }
726
727 flush_after_state_base_change(batch);
728 }
729
730 static void
731 iris_emit_l3_config(struct iris_batch *batch,
732 const struct gen_l3_config *cfg)
733 {
734 uint32_t reg_val;
735
736 #if GEN_GEN >= 12
737 #define L3_ALLOCATION_REG GENX(L3ALLOC)
738 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
739 #else
740 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
741 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
742 #endif
743
744 iris_pack_state(L3_ALLOCATION_REG, &reg_val, reg) {
745 #if GEN_GEN < 11
746 reg.SLMEnable = cfg->n[GEN_L3P_SLM] > 0;
747 #endif
748 #if GEN_GEN == 11
749 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
750 * in L3CNTLREG register. The default setting of the bit is not the
751 * desirable behavior.
752 */
753 reg.ErrorDetectionBehaviorControl = true;
754 reg.UseFullWays = true;
755 #endif
756 reg.URBAllocation = cfg->n[GEN_L3P_URB];
757 reg.ROAllocation = cfg->n[GEN_L3P_RO];
758 reg.DCAllocation = cfg->n[GEN_L3P_DC];
759 reg.AllAllocation = cfg->n[GEN_L3P_ALL];
760 }
761 _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val);
762 }
763
764 #if GEN_GEN == 9
765 static void
766 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
767 {
768 uint32_t reg_val;
769
770 /* A fixed function pipe flush is required before modifying this field */
771 iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
772 : "disable preemption",
773 PIPE_CONTROL_RENDER_TARGET_FLUSH);
774
775 /* enable object level preemption */
776 iris_pack_state(GENX(CS_CHICKEN1), &reg_val, reg) {
777 reg.ReplayMode = enable;
778 reg.ReplayModeMask = true;
779 }
780 iris_emit_lri(batch, CS_CHICKEN1, reg_val);
781 }
782 #endif
783
784 #if GEN_GEN == 11
785 static void
786 iris_upload_slice_hashing_state(struct iris_batch *batch)
787 {
788 const struct gen_device_info *devinfo = &batch->screen->devinfo;
789 int subslices_delta =
790 devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1];
791 if (subslices_delta == 0)
792 return;
793
794 struct iris_context *ice = NULL;
795 ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
796 assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
797
798 unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
799 uint32_t hash_address;
800 struct pipe_resource *tmp = NULL;
801 uint32_t *map =
802 stream_state(batch, ice->state.dynamic_uploader, &tmp,
803 size, 64, &hash_address);
804 pipe_resource_reference(&tmp, NULL);
805
806 struct GENX(SLICE_HASH_TABLE) table0 = {
807 .Entry = {
808 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
809 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
810 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
811 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
812 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
813 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
814 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
815 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
816 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
817 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
818 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
819 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
820 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
821 { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
822 { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
823 { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
824 }
825 };
826
827 struct GENX(SLICE_HASH_TABLE) table1 = {
828 .Entry = {
829 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
830 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
831 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
832 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
833 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
834 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
835 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
836 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
837 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
838 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
839 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
840 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
841 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
842 { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
843 { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
844 { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
845 }
846 };
847
848 const struct GENX(SLICE_HASH_TABLE) *table =
849 subslices_delta < 0 ? &table0 : &table1;
850 GENX(SLICE_HASH_TABLE_pack)(NULL, map, table);
851
852 iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
853 ptr.SliceHashStatePointerValid = true;
854 ptr.SliceHashTableStatePointer = hash_address;
855 }
856
857 iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
858 mode.SliceHashingTableEnable = true;
859 }
860 }
861 #endif
862
863 static void
864 iris_alloc_push_constants(struct iris_batch *batch)
865 {
866 /* For now, we set a static partitioning of the push constant area,
867 * assuming that all stages could be in use.
868 *
869 * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
870 * see if that improves performance by offering more space to
871 * the VS/FS when those aren't in use. Also, try dynamically
872 * enabling/disabling it like i965 does. This would be more
873 * stalls and may not actually help; we don't know yet.
874 */
875 for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
876 iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
877 alloc._3DCommandSubOpcode = 18 + i;
878 alloc.ConstantBufferOffset = 6 * i;
879 alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
880 }
881 }
882 }
883
884 #if GEN_GEN >= 12
885 static void
886 init_aux_map_state(struct iris_batch *batch);
887 #endif
888
889 /**
890 * Upload the initial GPU state for a render context.
891 *
892 * This sets some invariant state that needs to be programmed a particular
893 * way, but we never actually change.
894 */
895 static void
896 iris_init_render_context(struct iris_batch *batch)
897 {
898 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
899 uint32_t reg_val;
900
901 emit_pipeline_select(batch, _3D);
902
903 iris_emit_l3_config(batch, batch->screen->l3_config_3d);
904
905 init_state_base_address(batch);
906
907 #if GEN_GEN >= 9
908 iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
909 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
910 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
911 }
912 iris_emit_lri(batch, CS_DEBUG_MODE2, reg_val);
913 #else
914 iris_pack_state(GENX(INSTPM), &reg_val, reg) {
915 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
916 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
917 }
918 iris_emit_lri(batch, INSTPM, reg_val);
919 #endif
920
921 #if GEN_GEN == 9
922 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
923 reg.FloatBlendOptimizationEnable = true;
924 reg.FloatBlendOptimizationEnableMask = true;
925 reg.PartialResolveDisableInVC = true;
926 reg.PartialResolveDisableInVCMask = true;
927 }
928 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
929
930 if (devinfo->is_geminilake)
931 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
932 #endif
933
934 #if GEN_GEN == 11
935 iris_pack_state(GENX(TCCNTLREG), &reg_val, reg) {
936 reg.L3DataPartialWriteMergingEnable = true;
937 reg.ColorZPartialWriteMergingEnable = true;
938 reg.URBPartialWriteMergingEnable = true;
939 reg.TCDisable = true;
940 }
941 iris_emit_lri(batch, TCCNTLREG, reg_val);
942
943 iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
944 reg.HeaderlessMessageforPreemptableContexts = 1;
945 reg.HeaderlessMessageforPreemptableContextsMask = 1;
946 }
947 iris_emit_lri(batch, SAMPLER_MODE, reg_val);
948
949 /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
950 iris_pack_state(GENX(HALF_SLICE_CHICKEN7), &reg_val, reg) {
951 reg.EnabledTexelOffsetPrecisionFix = 1;
952 reg.EnabledTexelOffsetPrecisionFixMask = 1;
953 }
954 iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
955
956 /* Hardware specification recommends disabling repacking for the
957 * compatibility with decompression mechanism in display controller.
958 */
959 if (devinfo->disable_ccs_repack) {
960 iris_pack_state(GENX(CACHE_MODE_0), &reg_val, reg) {
961 reg.DisableRepackingforCompression = true;
962 reg.DisableRepackingforCompressionMask = true;
963 }
964 iris_emit_lri(batch, CACHE_MODE_0, reg_val);
965 }
966
967 iris_upload_slice_hashing_state(batch);
968 #endif
969
970 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
971 * changing it dynamically. We set it to the maximum size here, and
972 * instead include the render target dimensions in the viewport, so
973 * viewport extents clipping takes care of pruning stray geometry.
974 */
975 iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
976 rect.ClippedDrawingRectangleXMax = UINT16_MAX;
977 rect.ClippedDrawingRectangleYMax = UINT16_MAX;
978 }
979
980 /* Set the initial MSAA sample positions. */
981 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
982 GEN_SAMPLE_POS_1X(pat._1xSample);
983 GEN_SAMPLE_POS_2X(pat._2xSample);
984 GEN_SAMPLE_POS_4X(pat._4xSample);
985 GEN_SAMPLE_POS_8X(pat._8xSample);
986 #if GEN_GEN >= 9
987 GEN_SAMPLE_POS_16X(pat._16xSample);
988 #endif
989 }
990
991 /* Use the legacy AA line coverage computation. */
992 iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
993
994 /* Disable chromakeying (it's for media) */
995 iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
996
997 /* We want regular rendering, not special HiZ operations. */
998 iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
999
1000 /* No polygon stippling offsets are necessary. */
1001 /* TODO: may need to set an offset for origin-UL framebuffers */
1002 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1003
1004 iris_alloc_push_constants(batch);
1005
1006 #if GEN_GEN >= 12
1007 init_aux_map_state(batch);
1008 #endif
1009 }
1010
1011 static void
1012 iris_init_compute_context(struct iris_batch *batch)
1013 {
1014 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
1015
1016 /* GEN:BUG:1607854226:
1017 *
1018 * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1019 */
1020 #if GEN_GEN == 12
1021 emit_pipeline_select(batch, _3D);
1022 #else
1023 emit_pipeline_select(batch, GPGPU);
1024 #endif
1025
1026 iris_emit_l3_config(batch, batch->screen->l3_config_cs);
1027
1028 init_state_base_address(batch);
1029
1030 #if GEN_GEN == 12
1031 emit_pipeline_select(batch, GPGPU);
1032 #endif
1033
1034 #if GEN_GEN == 9
1035 if (devinfo->is_geminilake)
1036 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1037 #endif
1038
1039 #if GEN_GEN >= 12
1040 init_aux_map_state(batch);
1041 #endif
1042
1043 }
1044
1045 struct iris_vertex_buffer_state {
1046 /** The VERTEX_BUFFER_STATE hardware structure. */
1047 uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1048
1049 /** The resource to source vertex data from. */
1050 struct pipe_resource *resource;
1051
1052 int offset;
1053 };
1054
1055 struct iris_depth_buffer_state {
1056 /* Depth/HiZ/Stencil related hardware packets. */
1057 uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1058 GENX(3DSTATE_STENCIL_BUFFER_length) +
1059 GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1060 GENX(3DSTATE_CLEAR_PARAMS_length) +
1061 GENX(MI_LOAD_REGISTER_IMM_length) * 2];
1062 };
1063
1064 /**
1065 * Generation-specific context state (ice->state.genx->...).
1066 *
1067 * Most state can go in iris_context directly, but these encode hardware
1068 * packets which vary by generation.
1069 */
1070 struct iris_genx_state {
1071 struct iris_vertex_buffer_state vertex_buffers[33];
1072 uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1073
1074 struct iris_depth_buffer_state depth_buffer;
1075
1076 uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1077
1078 #if GEN_GEN == 8
1079 bool pma_fix_enabled;
1080 #endif
1081
1082 #if GEN_GEN == 9
1083 /* Is object level preemption enabled? */
1084 bool object_preemption;
1085 #endif
1086
1087 struct {
1088 #if GEN_GEN == 8
1089 struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1090 #endif
1091 } shaders[MESA_SHADER_STAGES];
1092 };
1093
1094 /**
1095 * The pipe->set_blend_color() driver hook.
1096 *
1097 * This corresponds to our COLOR_CALC_STATE.
1098 */
1099 static void
1100 iris_set_blend_color(struct pipe_context *ctx,
1101 const struct pipe_blend_color *state)
1102 {
1103 struct iris_context *ice = (struct iris_context *) ctx;
1104
1105 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1106 memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1107 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1108 }
1109
1110 /**
1111 * Gallium CSO for blend state (see pipe_blend_state).
1112 */
1113 struct iris_blend_state {
1114 /** Partial 3DSTATE_PS_BLEND */
1115 uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1116
1117 /** Partial BLEND_STATE */
1118 uint32_t blend_state[GENX(BLEND_STATE_length) +
1119 BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1120
1121 bool alpha_to_coverage; /* for shader key */
1122
1123 /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1124 uint8_t blend_enables;
1125
1126 /** Bitfield of whether color writes are enabled for RT[i] */
1127 uint8_t color_write_enables;
1128
1129 /** Does RT[0] use dual color blending? */
1130 bool dual_color_blending;
1131 };
1132
1133 static enum pipe_blendfactor
1134 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1135 {
1136 if (alpha_to_one) {
1137 if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1138 return PIPE_BLENDFACTOR_ONE;
1139
1140 if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1141 return PIPE_BLENDFACTOR_ZERO;
1142 }
1143
1144 return f;
1145 }
1146
1147 /**
1148 * The pipe->create_blend_state() driver hook.
1149 *
1150 * Translates a pipe_blend_state into iris_blend_state.
1151 */
1152 static void *
1153 iris_create_blend_state(struct pipe_context *ctx,
1154 const struct pipe_blend_state *state)
1155 {
1156 struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1157 uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1158
1159 cso->blend_enables = 0;
1160 cso->color_write_enables = 0;
1161 STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1162
1163 cso->alpha_to_coverage = state->alpha_to_coverage;
1164
1165 bool indep_alpha_blend = false;
1166
1167 for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1168 const struct pipe_rt_blend_state *rt =
1169 &state->rt[state->independent_blend_enable ? i : 0];
1170
1171 enum pipe_blendfactor src_rgb =
1172 fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1173 enum pipe_blendfactor src_alpha =
1174 fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1175 enum pipe_blendfactor dst_rgb =
1176 fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1177 enum pipe_blendfactor dst_alpha =
1178 fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1179
1180 if (rt->rgb_func != rt->alpha_func ||
1181 src_rgb != src_alpha || dst_rgb != dst_alpha)
1182 indep_alpha_blend = true;
1183
1184 if (rt->blend_enable)
1185 cso->blend_enables |= 1u << i;
1186
1187 if (rt->colormask)
1188 cso->color_write_enables |= 1u << i;
1189
1190 iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1191 be.LogicOpEnable = state->logicop_enable;
1192 be.LogicOpFunction = state->logicop_func;
1193
1194 be.PreBlendSourceOnlyClampEnable = false;
1195 be.ColorClampRange = COLORCLAMP_RTFORMAT;
1196 be.PreBlendColorClampEnable = true;
1197 be.PostBlendColorClampEnable = true;
1198
1199 be.ColorBufferBlendEnable = rt->blend_enable;
1200
1201 be.ColorBlendFunction = rt->rgb_func;
1202 be.AlphaBlendFunction = rt->alpha_func;
1203 be.SourceBlendFactor = src_rgb;
1204 be.SourceAlphaBlendFactor = src_alpha;
1205 be.DestinationBlendFactor = dst_rgb;
1206 be.DestinationAlphaBlendFactor = dst_alpha;
1207
1208 be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
1209 be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1210 be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
1211 be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1212 }
1213 blend_entry += GENX(BLEND_STATE_ENTRY_length);
1214 }
1215
1216 iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1217 /* pb.HasWriteableRT is filled in at draw time.
1218 * pb.AlphaTestEnable is filled in at draw time.
1219 *
1220 * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1221 * setting it when dual color blending without an appropriate shader.
1222 */
1223
1224 pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1225 pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1226
1227 pb.SourceBlendFactor =
1228 fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1229 pb.SourceAlphaBlendFactor =
1230 fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1231 pb.DestinationBlendFactor =
1232 fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1233 pb.DestinationAlphaBlendFactor =
1234 fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1235 }
1236
1237 iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1238 bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1239 bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1240 bs.AlphaToOneEnable = state->alpha_to_one;
1241 bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1242 bs.ColorDitherEnable = state->dither;
1243 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1244 }
1245
1246 cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1247
1248 return cso;
1249 }
1250
1251 /**
1252 * The pipe->bind_blend_state() driver hook.
1253 *
1254 * Bind a blending CSO and flag related dirty bits.
1255 */
1256 static void
1257 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1258 {
1259 struct iris_context *ice = (struct iris_context *) ctx;
1260 struct iris_blend_state *cso = state;
1261
1262 ice->state.cso_blend = cso;
1263 ice->state.blend_enables = cso ? cso->blend_enables : 0;
1264
1265 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1266 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1267 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1268 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
1269
1270 if (GEN_GEN == 8)
1271 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1272 }
1273
1274 /**
1275 * Return true if the FS writes to any color outputs which are not disabled
1276 * via color masking.
1277 */
1278 static bool
1279 has_writeable_rt(const struct iris_blend_state *cso_blend,
1280 const struct shader_info *fs_info)
1281 {
1282 if (!fs_info)
1283 return false;
1284
1285 unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1286
1287 if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1288 rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1289
1290 return cso_blend->color_write_enables & rt_outputs;
1291 }
1292
1293 /**
1294 * Gallium CSO for depth, stencil, and alpha testing state.
1295 */
1296 struct iris_depth_stencil_alpha_state {
1297 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1298 uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1299
1300 #if GEN_GEN >= 12
1301 uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1302 #endif
1303
1304 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1305 struct pipe_alpha_state alpha;
1306
1307 /** Outbound to resolve and cache set tracking. */
1308 bool depth_writes_enabled;
1309 bool stencil_writes_enabled;
1310
1311 /** Outbound to Gen8-9 PMA stall equations */
1312 bool depth_test_enabled;
1313 };
1314
1315 /**
1316 * The pipe->create_depth_stencil_alpha_state() driver hook.
1317 *
1318 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1319 * testing state since we need pieces of it in a variety of places.
1320 */
1321 static void *
1322 iris_create_zsa_state(struct pipe_context *ctx,
1323 const struct pipe_depth_stencil_alpha_state *state)
1324 {
1325 struct iris_depth_stencil_alpha_state *cso =
1326 malloc(sizeof(struct iris_depth_stencil_alpha_state));
1327
1328 bool two_sided_stencil = state->stencil[1].enabled;
1329
1330 cso->alpha = state->alpha;
1331 cso->depth_writes_enabled = state->depth.writemask;
1332 cso->depth_test_enabled = state->depth.enabled;
1333 cso->stencil_writes_enabled =
1334 state->stencil[0].writemask != 0 ||
1335 (two_sided_stencil && state->stencil[1].writemask != 0);
1336
1337 /* The state tracker needs to optimize away EQUAL writes for us. */
1338 assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
1339
1340 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1341 wmds.StencilFailOp = state->stencil[0].fail_op;
1342 wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1343 wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1344 wmds.StencilTestFunction =
1345 translate_compare_func(state->stencil[0].func);
1346 wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1347 wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1348 wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1349 wmds.BackfaceStencilTestFunction =
1350 translate_compare_func(state->stencil[1].func);
1351 wmds.DepthTestFunction = translate_compare_func(state->depth.func);
1352 wmds.DoubleSidedStencilEnable = two_sided_stencil;
1353 wmds.StencilTestEnable = state->stencil[0].enabled;
1354 wmds.StencilBufferWriteEnable =
1355 state->stencil[0].writemask != 0 ||
1356 (two_sided_stencil && state->stencil[1].writemask != 0);
1357 wmds.DepthTestEnable = state->depth.enabled;
1358 wmds.DepthBufferWriteEnable = state->depth.writemask;
1359 wmds.StencilTestMask = state->stencil[0].valuemask;
1360 wmds.StencilWriteMask = state->stencil[0].writemask;
1361 wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1362 wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1363 /* wmds.[Backface]StencilReferenceValue are merged later */
1364 }
1365
1366 #if GEN_GEN >= 12
1367 iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1368 depth_bounds.DepthBoundsTestValueModifyDisable = false;
1369 depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1370 depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test;
1371 depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min;
1372 depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max;
1373 }
1374 #endif
1375
1376 return cso;
1377 }
1378
1379 /**
1380 * The pipe->bind_depth_stencil_alpha_state() driver hook.
1381 *
1382 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1383 */
1384 static void
1385 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1386 {
1387 struct iris_context *ice = (struct iris_context *) ctx;
1388 struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1389 struct iris_depth_stencil_alpha_state *new_cso = state;
1390
1391 if (new_cso) {
1392 if (cso_changed(alpha.ref_value))
1393 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1394
1395 if (cso_changed(alpha.enabled))
1396 ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1397
1398 if (cso_changed(alpha.func))
1399 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1400
1401 if (cso_changed(depth_writes_enabled))
1402 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1403
1404 ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1405 ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1406
1407 #if GEN_GEN >= 12
1408 if (cso_changed(depth_bounds))
1409 ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1410 #endif
1411 }
1412
1413 ice->state.cso_zsa = new_cso;
1414 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1415 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1416 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1417
1418 if (GEN_GEN == 8)
1419 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1420 }
1421
1422 #if GEN_GEN == 8
1423 static bool
1424 want_pma_fix(struct iris_context *ice)
1425 {
1426 UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1427 UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
1428 const struct brw_wm_prog_data *wm_prog_data = (void *)
1429 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1430 const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1431 const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1432 const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1433
1434 /* In very specific combinations of state, we can instruct Gen8-9 hardware
1435 * to avoid stalling at the pixel mask array. The state equations are
1436 * documented in these places:
1437 *
1438 * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
1439 * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1440 *
1441 * Both equations share some common elements:
1442 *
1443 * no_hiz_op =
1444 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1445 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1446 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1447 * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1448 *
1449 * killpixels =
1450 * 3DSTATE_WM::ForceKillPix != ForceOff &&
1451 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1452 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1453 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1454 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1455 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1456 *
1457 * (Technically the stencil PMA treats ForceKillPix differently,
1458 * but I think this is a documentation oversight, and we don't
1459 * ever use it in this way, so it doesn't matter).
1460 *
1461 * common_pma_fix =
1462 * 3DSTATE_WM::ForceThreadDispatch != 1 &&
1463 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1464 * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1465 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1466 * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1467 * 3DSTATE_PS_EXTRA::PixelShaderValid &&
1468 * no_hiz_op
1469 *
1470 * These are always true:
1471 *
1472 * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1473 * 3DSTATE_PS_EXTRA::PixelShaderValid
1474 *
1475 * Also, we never use the normal drawing path for HiZ ops; these are true:
1476 *
1477 * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1478 * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1479 * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1480 * 3DSTATE_WM_HZ_OP::StencilBufferClear)
1481 *
1482 * This happens sometimes:
1483 *
1484 * 3DSTATE_WM::ForceThreadDispatch != 1
1485 *
1486 * However, we choose to ignore it as it either agrees with the signal
1487 * (dispatch was already enabled, so nothing out of the ordinary), or
1488 * there are no framebuffer attachments (so no depth or HiZ anyway,
1489 * meaning the PMA signal will already be disabled).
1490 */
1491
1492 if (!cso_fb->zsbuf)
1493 return false;
1494
1495 struct iris_resource *zres, *sres;
1496 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1497
1498 /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1499 * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1500 */
1501 if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1502 return false;
1503
1504 /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1505 if (wm_prog_data->early_fragment_tests)
1506 return false;
1507
1508 /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1509 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1510 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1511 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1512 * 3DSTATE_PS_BLEND::AlphaTestEnable ||
1513 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1514 */
1515 bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1516 cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled;
1517
1518 /* The Gen8 depth PMA equation becomes:
1519 *
1520 * depth_writes =
1521 * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1522 * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1523 *
1524 * stencil_writes =
1525 * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1526 * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1527 * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1528 *
1529 * Z_PMA_OPT =
1530 * common_pma_fix &&
1531 * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1532 * ((killpixels && (depth_writes || stencil_writes)) ||
1533 * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1534 *
1535 */
1536 if (!cso_zsa->depth_test_enabled)
1537 return false;
1538
1539 return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1540 (killpixels && (cso_zsa->depth_writes_enabled ||
1541 (sres && cso_zsa->stencil_writes_enabled)));
1542 }
1543 #endif
1544
1545 void
1546 genX(update_pma_fix)(struct iris_context *ice,
1547 struct iris_batch *batch,
1548 bool enable)
1549 {
1550 #if GEN_GEN == 8
1551 struct iris_genx_state *genx = ice->state.genx;
1552
1553 if (genx->pma_fix_enabled == enable)
1554 return;
1555
1556 genx->pma_fix_enabled = enable;
1557
1558 /* According to the Broadwell PIPE_CONTROL documentation, software should
1559 * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1560 * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
1561 *
1562 * The Gen9 docs say to use a depth stall rather than a command streamer
1563 * stall. However, the hardware seems to violently disagree. A full
1564 * command streamer stall seems to be needed in both cases.
1565 */
1566 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1567 PIPE_CONTROL_CS_STALL |
1568 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1569 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1570
1571 uint32_t reg_val;
1572 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
1573 reg.NPPMAFixEnable = enable;
1574 reg.NPEarlyZFailsDisable = enable;
1575 reg.NPPMAFixEnableMask = true;
1576 reg.NPEarlyZFailsDisableMask = true;
1577 }
1578 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
1579
1580 /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1581 * Flush bits is often necessary. We do it regardless because it's easier.
1582 * The render cache flush is also necessary if stencil writes are enabled.
1583 *
1584 * Again, the Gen9 docs give a different set of flushes but the Broadwell
1585 * flushes seem to work just as well.
1586 */
1587 iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1588 PIPE_CONTROL_DEPTH_STALL |
1589 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1590 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1591 #endif
1592 }
1593
1594 /**
1595 * Gallium CSO for rasterizer state.
1596 */
1597 struct iris_rasterizer_state {
1598 uint32_t sf[GENX(3DSTATE_SF_length)];
1599 uint32_t clip[GENX(3DSTATE_CLIP_length)];
1600 uint32_t raster[GENX(3DSTATE_RASTER_length)];
1601 uint32_t wm[GENX(3DSTATE_WM_length)];
1602 uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1603
1604 uint8_t num_clip_plane_consts;
1605 bool clip_halfz; /* for CC_VIEWPORT */
1606 bool depth_clip_near; /* for CC_VIEWPORT */
1607 bool depth_clip_far; /* for CC_VIEWPORT */
1608 bool flatshade; /* for shader state */
1609 bool flatshade_first; /* for stream output */
1610 bool clamp_fragment_color; /* for shader state */
1611 bool light_twoside; /* for shader state */
1612 bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1613 bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1614 bool line_stipple_enable;
1615 bool poly_stipple_enable;
1616 bool multisample;
1617 bool force_persample_interp;
1618 bool conservative_rasterization;
1619 bool fill_mode_point_or_line;
1620 enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1621 uint16_t sprite_coord_enable;
1622 };
1623
1624 static float
1625 get_line_width(const struct pipe_rasterizer_state *state)
1626 {
1627 float line_width = state->line_width;
1628
1629 /* From the OpenGL 4.4 spec:
1630 *
1631 * "The actual width of non-antialiased lines is determined by rounding
1632 * the supplied width to the nearest integer, then clamping it to the
1633 * implementation-dependent maximum non-antialiased line width."
1634 */
1635 if (!state->multisample && !state->line_smooth)
1636 line_width = roundf(state->line_width);
1637
1638 if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1639 /* For 1 pixel line thickness or less, the general anti-aliasing
1640 * algorithm gives up, and a garbage line is generated. Setting a
1641 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1642 * (one-pixel-wide), non-antialiased lines.
1643 *
1644 * Lines rendered with zero Line Width are rasterized using the
1645 * "Grid Intersection Quantization" rules as specified by the
1646 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1647 */
1648 line_width = 0.0f;
1649 }
1650
1651 return line_width;
1652 }
1653
1654 /**
1655 * The pipe->create_rasterizer_state() driver hook.
1656 */
1657 static void *
1658 iris_create_rasterizer_state(struct pipe_context *ctx,
1659 const struct pipe_rasterizer_state *state)
1660 {
1661 struct iris_rasterizer_state *cso =
1662 malloc(sizeof(struct iris_rasterizer_state));
1663
1664 cso->multisample = state->multisample;
1665 cso->force_persample_interp = state->force_persample_interp;
1666 cso->clip_halfz = state->clip_halfz;
1667 cso->depth_clip_near = state->depth_clip_near;
1668 cso->depth_clip_far = state->depth_clip_far;
1669 cso->flatshade = state->flatshade;
1670 cso->flatshade_first = state->flatshade_first;
1671 cso->clamp_fragment_color = state->clamp_fragment_color;
1672 cso->light_twoside = state->light_twoside;
1673 cso->rasterizer_discard = state->rasterizer_discard;
1674 cso->half_pixel_center = state->half_pixel_center;
1675 cso->sprite_coord_mode = state->sprite_coord_mode;
1676 cso->sprite_coord_enable = state->sprite_coord_enable;
1677 cso->line_stipple_enable = state->line_stipple_enable;
1678 cso->poly_stipple_enable = state->poly_stipple_enable;
1679 cso->conservative_rasterization =
1680 state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1681
1682 cso->fill_mode_point_or_line =
1683 state->fill_front == PIPE_POLYGON_MODE_LINE ||
1684 state->fill_front == PIPE_POLYGON_MODE_POINT ||
1685 state->fill_back == PIPE_POLYGON_MODE_LINE ||
1686 state->fill_back == PIPE_POLYGON_MODE_POINT;
1687
1688 if (state->clip_plane_enable != 0)
1689 cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1690 else
1691 cso->num_clip_plane_consts = 0;
1692
1693 float line_width = get_line_width(state);
1694
1695 iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1696 sf.StatisticsEnable = true;
1697 sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1698 sf.LineEndCapAntialiasingRegionWidth =
1699 state->line_smooth ? _10pixels : _05pixels;
1700 sf.LastPixelEnable = state->line_last_pixel;
1701 sf.LineWidth = line_width;
1702 sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1703 !state->point_quad_rasterization;
1704 sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1705 sf.PointWidth = state->point_size;
1706
1707 if (state->flatshade_first) {
1708 sf.TriangleFanProvokingVertexSelect = 1;
1709 } else {
1710 sf.TriangleStripListProvokingVertexSelect = 2;
1711 sf.TriangleFanProvokingVertexSelect = 2;
1712 sf.LineStripListProvokingVertexSelect = 1;
1713 }
1714 }
1715
1716 iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1717 rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1718 rr.CullMode = translate_cull_mode(state->cull_face);
1719 rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1720 rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1721 rr.DXMultisampleRasterizationEnable = state->multisample;
1722 rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1723 rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1724 rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1725 rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1726 rr.GlobalDepthOffsetScale = state->offset_scale;
1727 rr.GlobalDepthOffsetClamp = state->offset_clamp;
1728 rr.SmoothPointEnable = state->point_smooth;
1729 rr.AntialiasingEnable = state->line_smooth;
1730 rr.ScissorRectangleEnable = state->scissor;
1731 #if GEN_GEN >= 9
1732 rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1733 rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1734 rr.ConservativeRasterizationEnable =
1735 cso->conservative_rasterization;
1736 #else
1737 rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1738 #endif
1739 }
1740
1741 iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1742 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1743 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1744 */
1745 cl.EarlyCullEnable = true;
1746 cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1747 cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1748 cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1749 cl.GuardbandClipTestEnable = true;
1750 cl.ClipEnable = true;
1751 cl.MinimumPointWidth = 0.125;
1752 cl.MaximumPointWidth = 255.875;
1753
1754 if (state->flatshade_first) {
1755 cl.TriangleFanProvokingVertexSelect = 1;
1756 } else {
1757 cl.TriangleStripListProvokingVertexSelect = 2;
1758 cl.TriangleFanProvokingVertexSelect = 2;
1759 cl.LineStripListProvokingVertexSelect = 1;
1760 }
1761 }
1762
1763 iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1764 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1765 * filled in at draw time from the FS program.
1766 */
1767 wm.LineAntialiasingRegionWidth = _10pixels;
1768 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1769 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1770 wm.LineStippleEnable = state->line_stipple_enable;
1771 wm.PolygonStippleEnable = state->poly_stipple_enable;
1772 }
1773
1774 /* Remap from 0..255 back to 1..256 */
1775 const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1776
1777 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1778 if (state->line_stipple_enable) {
1779 line.LineStipplePattern = state->line_stipple_pattern;
1780 line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1781 line.LineStippleRepeatCount = line_stipple_factor;
1782 }
1783 }
1784
1785 return cso;
1786 }
1787
1788 /**
1789 * The pipe->bind_rasterizer_state() driver hook.
1790 *
1791 * Bind a rasterizer CSO and flag related dirty bits.
1792 */
1793 static void
1794 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1795 {
1796 struct iris_context *ice = (struct iris_context *) ctx;
1797 struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1798 struct iris_rasterizer_state *new_cso = state;
1799
1800 if (new_cso) {
1801 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1802 if (cso_changed_memcmp(line_stipple))
1803 ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1804
1805 if (cso_changed(half_pixel_center))
1806 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1807
1808 if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1809 ice->state.dirty |= IRIS_DIRTY_WM;
1810
1811 if (cso_changed(rasterizer_discard))
1812 ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1813
1814 if (cso_changed(flatshade_first))
1815 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1816
1817 if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1818 cso_changed(clip_halfz))
1819 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1820
1821 if (cso_changed(sprite_coord_enable) ||
1822 cso_changed(sprite_coord_mode) ||
1823 cso_changed(light_twoside))
1824 ice->state.dirty |= IRIS_DIRTY_SBE;
1825
1826 if (cso_changed(conservative_rasterization))
1827 ice->state.dirty |= IRIS_DIRTY_FS;
1828 }
1829
1830 ice->state.cso_rast = new_cso;
1831 ice->state.dirty |= IRIS_DIRTY_RASTER;
1832 ice->state.dirty |= IRIS_DIRTY_CLIP;
1833 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_RASTERIZER];
1834 }
1835
1836 /**
1837 * Return true if the given wrap mode requires the border color to exist.
1838 *
1839 * (We can skip uploading it if the sampler isn't going to use it.)
1840 */
1841 static bool
1842 wrap_mode_needs_border_color(unsigned wrap_mode)
1843 {
1844 return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1845 }
1846
1847 /**
1848 * Gallium CSO for sampler state.
1849 */
1850 struct iris_sampler_state {
1851 union pipe_color_union border_color;
1852 bool needs_border_color;
1853
1854 uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1855 };
1856
1857 /**
1858 * The pipe->create_sampler_state() driver hook.
1859 *
1860 * We fill out SAMPLER_STATE (except for the border color pointer), and
1861 * store that on the CPU. It doesn't make sense to upload it to a GPU
1862 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1863 * all bound sampler states to be in contiguous memor.
1864 */
1865 static void *
1866 iris_create_sampler_state(struct pipe_context *ctx,
1867 const struct pipe_sampler_state *state)
1868 {
1869 struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1870
1871 if (!cso)
1872 return NULL;
1873
1874 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1875 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1876
1877 unsigned wrap_s = translate_wrap(state->wrap_s);
1878 unsigned wrap_t = translate_wrap(state->wrap_t);
1879 unsigned wrap_r = translate_wrap(state->wrap_r);
1880
1881 memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1882
1883 cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
1884 wrap_mode_needs_border_color(wrap_t) ||
1885 wrap_mode_needs_border_color(wrap_r);
1886
1887 float min_lod = state->min_lod;
1888 unsigned mag_img_filter = state->mag_img_filter;
1889
1890 // XXX: explain this code ported from ilo...I don't get it at all...
1891 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
1892 state->min_lod > 0.0f) {
1893 min_lod = 0.0f;
1894 mag_img_filter = state->min_img_filter;
1895 }
1896
1897 iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
1898 samp.TCXAddressControlMode = wrap_s;
1899 samp.TCYAddressControlMode = wrap_t;
1900 samp.TCZAddressControlMode = wrap_r;
1901 samp.CubeSurfaceControlMode = state->seamless_cube_map;
1902 samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
1903 samp.MinModeFilter = state->min_img_filter;
1904 samp.MagModeFilter = mag_img_filter;
1905 samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
1906 samp.MaximumAnisotropy = RATIO21;
1907
1908 if (state->max_anisotropy >= 2) {
1909 if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
1910 samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
1911 samp.AnisotropicAlgorithm = EWAApproximation;
1912 }
1913
1914 if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
1915 samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
1916
1917 samp.MaximumAnisotropy =
1918 MIN2((state->max_anisotropy - 2) / 2, RATIO161);
1919 }
1920
1921 /* Set address rounding bits if not using nearest filtering. */
1922 if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
1923 samp.UAddressMinFilterRoundingEnable = true;
1924 samp.VAddressMinFilterRoundingEnable = true;
1925 samp.RAddressMinFilterRoundingEnable = true;
1926 }
1927
1928 if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
1929 samp.UAddressMagFilterRoundingEnable = true;
1930 samp.VAddressMagFilterRoundingEnable = true;
1931 samp.RAddressMagFilterRoundingEnable = true;
1932 }
1933
1934 if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
1935 samp.ShadowFunction = translate_shadow_func(state->compare_func);
1936
1937 const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
1938
1939 samp.LODPreClampMode = CLAMP_MODE_OGL;
1940 samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
1941 samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
1942 samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
1943
1944 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1945 }
1946
1947 return cso;
1948 }
1949
1950 /**
1951 * The pipe->bind_sampler_states() driver hook.
1952 */
1953 static void
1954 iris_bind_sampler_states(struct pipe_context *ctx,
1955 enum pipe_shader_type p_stage,
1956 unsigned start, unsigned count,
1957 void **states)
1958 {
1959 struct iris_context *ice = (struct iris_context *) ctx;
1960 gl_shader_stage stage = stage_from_pipe(p_stage);
1961 struct iris_shader_state *shs = &ice->state.shaders[stage];
1962
1963 assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
1964
1965 bool dirty = false;
1966
1967 for (int i = 0; i < count; i++) {
1968 if (shs->samplers[start + i] != states[i]) {
1969 shs->samplers[start + i] = states[i];
1970 dirty = true;
1971 }
1972 }
1973
1974 if (dirty)
1975 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
1976 }
1977
1978 /**
1979 * Upload the sampler states into a contiguous area of GPU memory, for
1980 * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
1981 *
1982 * Also fill out the border color state pointers.
1983 */
1984 static void
1985 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
1986 {
1987 struct iris_shader_state *shs = &ice->state.shaders[stage];
1988 const struct shader_info *info = iris_get_shader_info(ice, stage);
1989
1990 /* We assume the state tracker will call pipe->bind_sampler_states()
1991 * if the program's number of textures changes.
1992 */
1993 unsigned count = info ? util_last_bit(info->textures_used) : 0;
1994
1995 if (!count)
1996 return;
1997
1998 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
1999 * in the dynamic state memory zone, so we can point to it via the
2000 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2001 */
2002 unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
2003 uint32_t *map =
2004 upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
2005 if (unlikely(!map))
2006 return;
2007
2008 struct pipe_resource *res = shs->sampler_table.res;
2009 struct iris_bo *bo = iris_resource_bo(res);
2010
2011 iris_record_state_size(ice->state.sizes,
2012 bo->gtt_offset + shs->sampler_table.offset, size);
2013
2014 shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
2015
2016 /* Make sure all land in the same BO */
2017 iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
2018
2019 ice->state.need_border_colors &= ~(1 << stage);
2020
2021 for (int i = 0; i < count; i++) {
2022 struct iris_sampler_state *state = shs->samplers[i];
2023 struct iris_sampler_view *tex = shs->textures[i];
2024
2025 if (!state) {
2026 memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2027 } else if (!state->needs_border_color) {
2028 memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
2029 } else {
2030 ice->state.need_border_colors |= 1 << stage;
2031
2032 /* We may need to swizzle the border color for format faking.
2033 * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2034 * This means we need to move the border color's A channel into
2035 * the R or G channels so that those read swizzles will move it
2036 * back into A.
2037 */
2038 union pipe_color_union *color = &state->border_color;
2039 union pipe_color_union tmp;
2040 if (tex) {
2041 enum pipe_format internal_format = tex->res->internal_format;
2042
2043 if (util_format_is_alpha(internal_format)) {
2044 unsigned char swz[4] = {
2045 PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2046 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2047 };
2048 util_format_apply_color_swizzle(&tmp, color, swz, true);
2049 color = &tmp;
2050 } else if (util_format_is_luminance_alpha(internal_format) &&
2051 internal_format != PIPE_FORMAT_L8A8_SRGB) {
2052 unsigned char swz[4] = {
2053 PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2054 PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2055 };
2056 util_format_apply_color_swizzle(&tmp, color, swz, true);
2057 color = &tmp;
2058 }
2059 }
2060
2061 /* Stream out the border color and merge the pointer. */
2062 uint32_t offset = iris_upload_border_color(ice, color);
2063
2064 uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2065 iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2066 dyns.BorderColorPointer = offset;
2067 }
2068
2069 for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2070 map[j] = state->sampler_state[j] | dynamic[j];
2071 }
2072
2073 map += GENX(SAMPLER_STATE_length);
2074 }
2075 }
2076
2077 static enum isl_channel_select
2078 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2079 {
2080 switch (swz) {
2081 case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2082 case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2083 case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2084 case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2085 case PIPE_SWIZZLE_1: return SCS_ONE;
2086 case PIPE_SWIZZLE_0: return SCS_ZERO;
2087 default: unreachable("invalid swizzle");
2088 }
2089 }
2090
2091 static void
2092 fill_buffer_surface_state(struct isl_device *isl_dev,
2093 struct iris_resource *res,
2094 void *map,
2095 enum isl_format format,
2096 struct isl_swizzle swizzle,
2097 unsigned offset,
2098 unsigned size)
2099 {
2100 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2101 const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2102
2103 /* The ARB_texture_buffer_specification says:
2104 *
2105 * "The number of texels in the buffer texture's texel array is given by
2106 *
2107 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2108 *
2109 * where <buffer_size> is the size of the buffer object, in basic
2110 * machine units and <components> and <base_type> are the element count
2111 * and base data type for elements, as specified in Table X.1. The
2112 * number of texels in the texel array is then clamped to the
2113 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2114 *
2115 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2116 * so that when ISL divides by stride to obtain the number of texels, that
2117 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2118 */
2119 unsigned final_size =
2120 MIN3(size, res->bo->size - res->offset - offset,
2121 IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2122
2123 isl_buffer_fill_state(isl_dev, map,
2124 .address = res->bo->gtt_offset + res->offset + offset,
2125 .size_B = final_size,
2126 .format = format,
2127 .swizzle = swizzle,
2128 .stride_B = cpp,
2129 .mocs = iris_mocs(res->bo, isl_dev));
2130 }
2131
2132 #define SURFACE_STATE_ALIGNMENT 64
2133
2134 /**
2135 * Allocate several contiguous SURFACE_STATE structures, one for each
2136 * supported auxiliary surface mode. This only allocates the CPU-side
2137 * copy, they will need to be uploaded later after they're filled in.
2138 */
2139 static void
2140 alloc_surface_states(struct iris_surface_state *surf_state,
2141 unsigned aux_usages)
2142 {
2143 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2144
2145 /* If this changes, update this to explicitly align pointers */
2146 STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2147
2148 assert(aux_usages != 0);
2149
2150 /* In case we're re-allocating them... */
2151 free(surf_state->cpu);
2152
2153 surf_state->num_states = util_bitcount(aux_usages);
2154 surf_state->cpu = calloc(surf_state->num_states, surf_size);
2155 surf_state->ref.offset = 0;
2156 pipe_resource_reference(&surf_state->ref.res, NULL);
2157
2158 assert(surf_state->cpu);
2159 }
2160
2161 /**
2162 * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2163 */
2164 static void
2165 upload_surface_states(struct u_upload_mgr *mgr,
2166 struct iris_surface_state *surf_state)
2167 {
2168 const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2169 const unsigned bytes = surf_state->num_states * surf_size;
2170
2171 void *map =
2172 upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
2173
2174 surf_state->ref.offset +=
2175 iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
2176
2177 if (map)
2178 memcpy(map, surf_state->cpu, bytes);
2179 }
2180
2181 /**
2182 * Update resource addresses in a set of SURFACE_STATE descriptors,
2183 * and re-upload them if necessary.
2184 */
2185 static bool
2186 update_surface_state_addrs(struct u_upload_mgr *mgr,
2187 struct iris_surface_state *surf_state,
2188 struct iris_bo *bo)
2189 {
2190 if (surf_state->bo_address == bo->gtt_offset)
2191 return false;
2192
2193 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
2194 STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
2195
2196 uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
2197
2198 /* First, update the CPU copies. We assume no other fields exist in
2199 * the QWord containing Surface Base Address.
2200 */
2201 for (unsigned i = 0; i < surf_state->num_states; i++) {
2202 *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
2203 ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
2204 }
2205
2206 /* Next, upload the updated copies to a GPU buffer. */
2207 upload_surface_states(mgr, surf_state);
2208
2209 surf_state->bo_address = bo->gtt_offset;
2210
2211 return true;
2212 }
2213
2214 #if GEN_GEN == 8
2215 /**
2216 * Return an ISL surface for use with non-coherent render target reads.
2217 *
2218 * In a few complex cases, we can't use the SURFACE_STATE for normal render
2219 * target writes. We need to make a separate one for sampling which refers
2220 * to the single slice of the texture being read.
2221 */
2222 static void
2223 get_rt_read_isl_surf(const struct gen_device_info *devinfo,
2224 struct iris_resource *res,
2225 enum pipe_texture_target target,
2226 struct isl_view *view,
2227 uint32_t *offset_to_tile,
2228 uint32_t *tile_x_sa,
2229 uint32_t *tile_y_sa,
2230 struct isl_surf *surf)
2231 {
2232 *surf = res->surf;
2233
2234 const enum isl_dim_layout dim_layout =
2235 iris_get_isl_dim_layout(devinfo, res->surf.tiling, target);
2236
2237 surf->dim = target_to_isl_surf_dim(target);
2238
2239 if (surf->dim_layout == dim_layout)
2240 return;
2241
2242 /* The layout of the specified texture target is not compatible with the
2243 * actual layout of the miptree structure in memory -- You're entering
2244 * dangerous territory, this can only possibly work if you only intended
2245 * to access a single level and slice of the texture, and the hardware
2246 * supports the tile offset feature in order to allow non-tile-aligned
2247 * base offsets, since we'll have to point the hardware to the first
2248 * texel of the level instead of relying on the usual base level/layer
2249 * controls.
2250 */
2251 assert(view->levels == 1 && view->array_len == 1);
2252 assert(*tile_x_sa == 0 && *tile_y_sa == 0);
2253
2254 *offset_to_tile = iris_resource_get_tile_offsets(res, view->base_level,
2255 view->base_array_layer,
2256 tile_x_sa, tile_y_sa);
2257 const unsigned l = view->base_level;
2258
2259 surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
2260 surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
2261 minify(surf->logical_level0_px.height, l);
2262 surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
2263 minify(surf->logical_level0_px.depth, l);
2264
2265 surf->logical_level0_px.array_len = 1;
2266 surf->levels = 1;
2267 surf->dim_layout = dim_layout;
2268
2269 view->base_level = 0;
2270 view->base_array_layer = 0;
2271 }
2272 #endif
2273
2274 static void
2275 fill_surface_state(struct isl_device *isl_dev,
2276 void *map,
2277 struct iris_resource *res,
2278 struct isl_surf *surf,
2279 struct isl_view *view,
2280 unsigned aux_usage,
2281 uint32_t extra_main_offset,
2282 uint32_t tile_x_sa,
2283 uint32_t tile_y_sa)
2284 {
2285 struct isl_surf_fill_state_info f = {
2286 .surf = surf,
2287 .view = view,
2288 .mocs = iris_mocs(res->bo, isl_dev),
2289 .address = res->bo->gtt_offset + res->offset + extra_main_offset,
2290 .x_offset_sa = tile_x_sa,
2291 .y_offset_sa = tile_y_sa,
2292 };
2293
2294 assert(!iris_resource_unfinished_aux_import(res));
2295
2296 if (aux_usage != ISL_AUX_USAGE_NONE) {
2297 f.aux_surf = &res->aux.surf;
2298 f.aux_usage = aux_usage;
2299 f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
2300
2301 struct iris_bo *clear_bo = NULL;
2302 uint64_t clear_offset = 0;
2303 f.clear_color =
2304 iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
2305 if (clear_bo) {
2306 f.clear_address = clear_bo->gtt_offset + clear_offset;
2307 f.use_clear_address = isl_dev->info->gen > 9;
2308 }
2309 }
2310
2311 isl_surf_fill_state_s(isl_dev, map, &f);
2312 }
2313
2314 /**
2315 * The pipe->create_sampler_view() driver hook.
2316 */
2317 static struct pipe_sampler_view *
2318 iris_create_sampler_view(struct pipe_context *ctx,
2319 struct pipe_resource *tex,
2320 const struct pipe_sampler_view *tmpl)
2321 {
2322 struct iris_context *ice = (struct iris_context *) ctx;
2323 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2324 const struct gen_device_info *devinfo = &screen->devinfo;
2325 struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2326
2327 if (!isv)
2328 return NULL;
2329
2330 /* initialize base object */
2331 isv->base = *tmpl;
2332 isv->base.context = ctx;
2333 isv->base.texture = NULL;
2334 pipe_reference_init(&isv->base.reference, 1);
2335 pipe_resource_reference(&isv->base.texture, tex);
2336
2337 if (util_format_is_depth_or_stencil(tmpl->format)) {
2338 struct iris_resource *zres, *sres;
2339 const struct util_format_description *desc =
2340 util_format_description(tmpl->format);
2341
2342 iris_get_depth_stencil_resources(tex, &zres, &sres);
2343
2344 tex = util_format_has_depth(desc) ? &zres->base : &sres->base;
2345 }
2346
2347 isv->res = (struct iris_resource *) tex;
2348
2349 alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
2350
2351 isv->surface_state.bo_address = isv->res->bo->gtt_offset;
2352
2353 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2354
2355 if (isv->base.target == PIPE_TEXTURE_CUBE ||
2356 isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2357 usage |= ISL_SURF_USAGE_CUBE_BIT;
2358
2359 const struct iris_format_info fmt =
2360 iris_format_for_usage(devinfo, tmpl->format, usage);
2361
2362 isv->clear_color = isv->res->aux.clear_color;
2363
2364 isv->view = (struct isl_view) {
2365 .format = fmt.fmt,
2366 .swizzle = (struct isl_swizzle) {
2367 .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2368 .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2369 .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2370 .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2371 },
2372 .usage = usage,
2373 };
2374
2375 void *map = isv->surface_state.cpu;
2376
2377 /* Fill out SURFACE_STATE for this view. */
2378 if (tmpl->target != PIPE_BUFFER) {
2379 isv->view.base_level = tmpl->u.tex.first_level;
2380 isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2381 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2382 isv->view.base_array_layer = tmpl->u.tex.first_layer;
2383 isv->view.array_len =
2384 tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2385
2386 if (iris_resource_unfinished_aux_import(isv->res))
2387 iris_resource_finish_aux_import(&screen->base, isv->res);
2388
2389 unsigned aux_modes = isv->res->aux.sampler_usages;
2390 while (aux_modes) {
2391 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2392
2393 /* If we have a multisampled depth buffer, do not create a sampler
2394 * surface state with HiZ.
2395 */
2396 fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
2397 &isv->view, aux_usage, 0, 0, 0);
2398
2399 map += SURFACE_STATE_ALIGNMENT;
2400 }
2401 } else {
2402 fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
2403 isv->view.format, isv->view.swizzle,
2404 tmpl->u.buf.offset, tmpl->u.buf.size);
2405 }
2406
2407 upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
2408
2409 return &isv->base;
2410 }
2411
2412 static void
2413 iris_sampler_view_destroy(struct pipe_context *ctx,
2414 struct pipe_sampler_view *state)
2415 {
2416 struct iris_sampler_view *isv = (void *) state;
2417 pipe_resource_reference(&state->texture, NULL);
2418 pipe_resource_reference(&isv->surface_state.ref.res, NULL);
2419 free(isv->surface_state.cpu);
2420 free(isv);
2421 }
2422
2423 /**
2424 * The pipe->create_surface() driver hook.
2425 *
2426 * In Gallium nomenclature, "surfaces" are a view of a resource that
2427 * can be bound as a render target or depth/stencil buffer.
2428 */
2429 static struct pipe_surface *
2430 iris_create_surface(struct pipe_context *ctx,
2431 struct pipe_resource *tex,
2432 const struct pipe_surface *tmpl)
2433 {
2434 struct iris_context *ice = (struct iris_context *) ctx;
2435 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2436 const struct gen_device_info *devinfo = &screen->devinfo;
2437
2438 isl_surf_usage_flags_t usage = 0;
2439 if (tmpl->writable)
2440 usage = ISL_SURF_USAGE_STORAGE_BIT;
2441 else if (util_format_is_depth_or_stencil(tmpl->format))
2442 usage = ISL_SURF_USAGE_DEPTH_BIT;
2443 else
2444 usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2445
2446 const struct iris_format_info fmt =
2447 iris_format_for_usage(devinfo, tmpl->format, usage);
2448
2449 if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2450 !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2451 /* Framebuffer validation will reject this invalid case, but it
2452 * hasn't had the opportunity yet. In the meantime, we need to
2453 * avoid hitting ISL asserts about unsupported formats below.
2454 */
2455 return NULL;
2456 }
2457
2458 struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2459 struct pipe_surface *psurf = &surf->base;
2460 struct iris_resource *res = (struct iris_resource *) tex;
2461
2462 if (!surf)
2463 return NULL;
2464
2465 pipe_reference_init(&psurf->reference, 1);
2466 pipe_resource_reference(&psurf->texture, tex);
2467 psurf->context = ctx;
2468 psurf->format = tmpl->format;
2469 psurf->width = tex->width0;
2470 psurf->height = tex->height0;
2471 psurf->texture = tex;
2472 psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2473 psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2474 psurf->u.tex.level = tmpl->u.tex.level;
2475
2476 uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2477
2478 struct isl_view *view = &surf->view;
2479 *view = (struct isl_view) {
2480 .format = fmt.fmt,
2481 .base_level = tmpl->u.tex.level,
2482 .levels = 1,
2483 .base_array_layer = tmpl->u.tex.first_layer,
2484 .array_len = array_len,
2485 .swizzle = ISL_SWIZZLE_IDENTITY,
2486 .usage = usage,
2487 };
2488
2489 #if GEN_GEN == 8
2490 enum pipe_texture_target target = (tex->target == PIPE_TEXTURE_3D &&
2491 array_len == 1) ? PIPE_TEXTURE_2D :
2492 tex->target == PIPE_TEXTURE_1D_ARRAY ?
2493 PIPE_TEXTURE_2D_ARRAY : tex->target;
2494
2495 struct isl_view *read_view = &surf->read_view;
2496 *read_view = (struct isl_view) {
2497 .format = fmt.fmt,
2498 .base_level = tmpl->u.tex.level,
2499 .levels = 1,
2500 .base_array_layer = tmpl->u.tex.first_layer,
2501 .array_len = array_len,
2502 .swizzle = ISL_SWIZZLE_IDENTITY,
2503 .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2504 };
2505 #endif
2506
2507 surf->clear_color = res->aux.clear_color;
2508
2509 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2510 if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2511 ISL_SURF_USAGE_STENCIL_BIT))
2512 return psurf;
2513
2514
2515 alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
2516 surf->surface_state.bo_address = res->bo->gtt_offset;
2517
2518 #if GEN_GEN == 8
2519 alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
2520 surf->surface_state_read.bo_address = res->bo->gtt_offset;
2521 #endif
2522
2523 if (!isl_format_is_compressed(res->surf.format)) {
2524 if (iris_resource_unfinished_aux_import(res))
2525 iris_resource_finish_aux_import(&screen->base, res);
2526
2527 void *map = surf->surface_state.cpu;
2528 UNUSED void *map_read = surf->surface_state_read.cpu;
2529
2530 /* This is a normal surface. Fill out a SURFACE_STATE for each possible
2531 * auxiliary surface mode and return the pipe_surface.
2532 */
2533 unsigned aux_modes = res->aux.possible_usages;
2534 while (aux_modes) {
2535 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2536 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2537 view, aux_usage, 0, 0, 0);
2538 map += SURFACE_STATE_ALIGNMENT;
2539
2540 #if GEN_GEN == 8
2541 struct isl_surf surf;
2542 uint32_t offset_to_tile = 0, tile_x_sa = 0, tile_y_sa = 0;
2543 get_rt_read_isl_surf(devinfo, res, target, read_view,
2544 &offset_to_tile, &tile_x_sa, &tile_y_sa, &surf);
2545 fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view,
2546 aux_usage, offset_to_tile, tile_x_sa, tile_y_sa);
2547 map_read += SURFACE_STATE_ALIGNMENT;
2548 #endif
2549 }
2550
2551 upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
2552
2553 #if GEN_GEN == 8
2554 upload_surface_states(ice->state.surface_uploader,
2555 &surf->surface_state_read);
2556 #endif
2557
2558 return psurf;
2559 }
2560
2561 /* The resource has a compressed format, which is not renderable, but we
2562 * have a renderable view format. We must be attempting to upload blocks
2563 * of compressed data via an uncompressed view.
2564 *
2565 * In this case, we can assume there are no auxiliary buffers, a single
2566 * miplevel, and that the resource is single-sampled. Gallium may try
2567 * and create an uncompressed view with multiple layers, however.
2568 */
2569 assert(!isl_format_is_compressed(fmt.fmt));
2570 assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
2571 assert(res->surf.samples == 1);
2572 assert(view->levels == 1);
2573
2574 struct isl_surf isl_surf;
2575 uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0;
2576
2577 if (view->base_level > 0) {
2578 /* We can't rely on the hardware's miplevel selection with such
2579 * a substantial lie about the format, so we select a single image
2580 * using the Tile X/Y Offset fields. In this case, we can't handle
2581 * multiple array slices.
2582 *
2583 * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2584 * hard-coded to align to exactly the block size of the compressed
2585 * texture. This means that, when reinterpreted as a non-compressed
2586 * texture, the tile offsets may be anything and we can't rely on
2587 * X/Y Offset.
2588 *
2589 * Return NULL to force the state tracker to take fallback paths.
2590 */
2591 if (view->array_len > 1 || GEN_GEN == 8)
2592 return NULL;
2593
2594 const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D;
2595 isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2596 view->base_level,
2597 is_3d ? 0 : view->base_array_layer,
2598 is_3d ? view->base_array_layer : 0,
2599 &isl_surf,
2600 &offset_B, &tile_x_sa, &tile_y_sa);
2601
2602 /* We use address and tile offsets to access a single level/layer
2603 * as a subimage, so reset level/layer so it doesn't offset again.
2604 */
2605 view->base_array_layer = 0;
2606 view->base_level = 0;
2607 } else {
2608 /* Level 0 doesn't require tile offsets, and the hardware can find
2609 * array slices using QPitch even with the format override, so we
2610 * can allow layers in this case. Copy the original ISL surface.
2611 */
2612 memcpy(&isl_surf, &res->surf, sizeof(isl_surf));
2613 }
2614
2615 /* Scale down the image dimensions by the block size. */
2616 const struct isl_format_layout *fmtl =
2617 isl_format_get_layout(res->surf.format);
2618 isl_surf.format = fmt.fmt;
2619 isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf);
2620 isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf);
2621 tile_x_sa /= fmtl->bw;
2622 tile_y_sa /= fmtl->bh;
2623
2624 psurf->width = isl_surf.logical_level0_px.width;
2625 psurf->height = isl_surf.logical_level0_px.height;
2626
2627 struct isl_surf_fill_state_info f = {
2628 .surf = &isl_surf,
2629 .view = view,
2630 .mocs = iris_mocs(res->bo, &screen->isl_dev),
2631 .address = res->bo->gtt_offset + offset_B,
2632 .x_offset_sa = tile_x_sa,
2633 .y_offset_sa = tile_y_sa,
2634 };
2635
2636 isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
2637
2638 upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
2639
2640 return psurf;
2641 }
2642
2643 #if GEN_GEN < 9
2644 static void
2645 fill_default_image_param(struct brw_image_param *param)
2646 {
2647 memset(param, 0, sizeof(*param));
2648 /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2649 * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2650 * detailed explanation of these parameters.
2651 */
2652 param->swizzling[0] = 0xff;
2653 param->swizzling[1] = 0xff;
2654 }
2655
2656 static void
2657 fill_buffer_image_param(struct brw_image_param *param,
2658 enum pipe_format pfmt,
2659 unsigned size)
2660 {
2661 const unsigned cpp = util_format_get_blocksize(pfmt);
2662
2663 fill_default_image_param(param);
2664 param->size[0] = size / cpp;
2665 param->stride[0] = cpp;
2666 }
2667 #else
2668 #define isl_surf_fill_image_param(x, ...)
2669 #define fill_default_image_param(x, ...)
2670 #define fill_buffer_image_param(x, ...)
2671 #endif
2672
2673 /**
2674 * The pipe->set_shader_images() driver hook.
2675 */
2676 static void
2677 iris_set_shader_images(struct pipe_context *ctx,
2678 enum pipe_shader_type p_stage,
2679 unsigned start_slot, unsigned count,
2680 const struct pipe_image_view *p_images)
2681 {
2682 struct iris_context *ice = (struct iris_context *) ctx;
2683 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2684 gl_shader_stage stage = stage_from_pipe(p_stage);
2685 struct iris_shader_state *shs = &ice->state.shaders[stage];
2686 #if GEN_GEN == 8
2687 struct iris_genx_state *genx = ice->state.genx;
2688 struct brw_image_param *image_params = genx->shaders[stage].image_param;
2689 #endif
2690
2691 shs->bound_image_views &= ~u_bit_consecutive(start_slot, count);
2692
2693 for (unsigned i = 0; i < count; i++) {
2694 struct iris_image_view *iv = &shs->image[start_slot + i];
2695
2696 if (p_images && p_images[i].resource) {
2697 const struct pipe_image_view *img = &p_images[i];
2698 struct iris_resource *res = (void *) img->resource;
2699
2700 util_copy_image_view(&iv->base, img);
2701
2702 shs->bound_image_views |= 1 << (start_slot + i);
2703
2704 res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2705 res->bind_stages |= 1 << stage;
2706
2707 enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
2708
2709 /* Render compression with images supported on gen12+ only. */
2710 unsigned aux_usages = GEN_GEN >= 12 ? res->aux.possible_usages :
2711 1 << ISL_AUX_USAGE_NONE;
2712
2713 alloc_surface_states(&iv->surface_state, aux_usages);
2714 iv->surface_state.bo_address = res->bo->gtt_offset;
2715
2716 void *map = iv->surface_state.cpu;
2717
2718 if (res->base.target != PIPE_BUFFER) {
2719 struct isl_view view = {
2720 .format = isl_fmt,
2721 .base_level = img->u.tex.level,
2722 .levels = 1,
2723 .base_array_layer = img->u.tex.first_layer,
2724 .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2725 .swizzle = ISL_SWIZZLE_IDENTITY,
2726 .usage = ISL_SURF_USAGE_STORAGE_BIT,
2727 };
2728
2729 /* If using untyped fallback. */
2730 if (isl_fmt == ISL_FORMAT_RAW) {
2731 fill_buffer_surface_state(&screen->isl_dev, res, map,
2732 isl_fmt, ISL_SWIZZLE_IDENTITY,
2733 0, res->bo->size);
2734 } else {
2735 unsigned aux_modes = aux_usages;
2736 while (aux_modes) {
2737 enum isl_aux_usage usage = u_bit_scan(&aux_modes);
2738
2739 fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2740 &view, usage, 0, 0, 0);
2741
2742 map += SURFACE_STATE_ALIGNMENT;
2743 }
2744 }
2745
2746 isl_surf_fill_image_param(&screen->isl_dev,
2747 &image_params[start_slot + i],
2748 &res->surf, &view);
2749 } else {
2750 util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset,
2751 img->u.buf.offset + img->u.buf.size);
2752
2753 fill_buffer_surface_state(&screen->isl_dev, res, map,
2754 isl_fmt, ISL_SWIZZLE_IDENTITY,
2755 img->u.buf.offset, img->u.buf.size);
2756 fill_buffer_image_param(&image_params[start_slot + i],
2757 img->format, img->u.buf.size);
2758 }
2759
2760 upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
2761 } else {
2762 pipe_resource_reference(&iv->base.resource, NULL);
2763 pipe_resource_reference(&iv->surface_state.ref.res, NULL);
2764 fill_default_image_param(&image_params[start_slot + i]);
2765 }
2766 }
2767
2768 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
2769 ice->state.dirty |=
2770 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2771 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2772
2773 /* Broadwell also needs brw_image_params re-uploaded */
2774 if (GEN_GEN < 9) {
2775 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
2776 shs->sysvals_need_upload = true;
2777 }
2778 }
2779
2780
2781 /**
2782 * The pipe->set_sampler_views() driver hook.
2783 */
2784 static void
2785 iris_set_sampler_views(struct pipe_context *ctx,
2786 enum pipe_shader_type p_stage,
2787 unsigned start, unsigned count,
2788 struct pipe_sampler_view **views)
2789 {
2790 struct iris_context *ice = (struct iris_context *) ctx;
2791 gl_shader_stage stage = stage_from_pipe(p_stage);
2792 struct iris_shader_state *shs = &ice->state.shaders[stage];
2793
2794 shs->bound_sampler_views &= ~u_bit_consecutive(start, count);
2795
2796 for (unsigned i = 0; i < count; i++) {
2797 struct pipe_sampler_view *pview = views ? views[i] : NULL;
2798 pipe_sampler_view_reference((struct pipe_sampler_view **)
2799 &shs->textures[start + i], pview);
2800 struct iris_sampler_view *view = (void *) pview;
2801 if (view) {
2802 view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2803 view->res->bind_stages |= 1 << stage;
2804
2805 shs->bound_sampler_views |= 1 << (start + i);
2806
2807 update_surface_state_addrs(ice->state.surface_uploader,
2808 &view->surface_state, view->res->bo);
2809 }
2810 }
2811
2812 ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
2813 ice->state.dirty |=
2814 stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2815 : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2816 }
2817
2818 /**
2819 * The pipe->set_tess_state() driver hook.
2820 */
2821 static void
2822 iris_set_tess_state(struct pipe_context *ctx,
2823 const float default_outer_level[4],
2824 const float default_inner_level[2])
2825 {
2826 struct iris_context *ice = (struct iris_context *) ctx;
2827 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
2828
2829 memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
2830 memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
2831
2832 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
2833 shs->sysvals_need_upload = true;
2834 }
2835
2836 static void
2837 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
2838 {
2839 struct iris_surface *surf = (void *) p_surf;
2840 pipe_resource_reference(&p_surf->texture, NULL);
2841 pipe_resource_reference(&surf->surface_state.ref.res, NULL);
2842 pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
2843 free(surf->surface_state.cpu);
2844 free(surf);
2845 }
2846
2847 static void
2848 iris_set_clip_state(struct pipe_context *ctx,
2849 const struct pipe_clip_state *state)
2850 {
2851 struct iris_context *ice = (struct iris_context *) ctx;
2852 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
2853 struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
2854 struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
2855
2856 memcpy(&ice->state.clip_planes, state, sizeof(*state));
2857
2858 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_CONSTANTS_GS |
2859 IRIS_DIRTY_CONSTANTS_TES;
2860 shs->sysvals_need_upload = true;
2861 gshs->sysvals_need_upload = true;
2862 tshs->sysvals_need_upload = true;
2863 }
2864
2865 /**
2866 * The pipe->set_polygon_stipple() driver hook.
2867 */
2868 static void
2869 iris_set_polygon_stipple(struct pipe_context *ctx,
2870 const struct pipe_poly_stipple *state)
2871 {
2872 struct iris_context *ice = (struct iris_context *) ctx;
2873 memcpy(&ice->state.poly_stipple, state, sizeof(*state));
2874 ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
2875 }
2876
2877 /**
2878 * The pipe->set_sample_mask() driver hook.
2879 */
2880 static void
2881 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
2882 {
2883 struct iris_context *ice = (struct iris_context *) ctx;
2884
2885 /* We only support 16x MSAA, so we have 16 bits of sample maks.
2886 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2887 */
2888 ice->state.sample_mask = sample_mask & 0xffff;
2889 ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
2890 }
2891
2892 /**
2893 * The pipe->set_scissor_states() driver hook.
2894 *
2895 * This corresponds to our SCISSOR_RECT state structures. It's an
2896 * exact match, so we just store them, and memcpy them out later.
2897 */
2898 static void
2899 iris_set_scissor_states(struct pipe_context *ctx,
2900 unsigned start_slot,
2901 unsigned num_scissors,
2902 const struct pipe_scissor_state *rects)
2903 {
2904 struct iris_context *ice = (struct iris_context *) ctx;
2905
2906 for (unsigned i = 0; i < num_scissors; i++) {
2907 if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
2908 /* If the scissor was out of bounds and got clamped to 0 width/height
2909 * at the bounds, the subtraction of 1 from maximums could produce a
2910 * negative number and thus not clip anything. Instead, just provide
2911 * a min > max scissor inside the bounds, which produces the expected
2912 * no rendering.
2913 */
2914 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
2915 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
2916 };
2917 } else {
2918 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
2919 .minx = rects[i].minx, .miny = rects[i].miny,
2920 .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
2921 };
2922 }
2923 }
2924
2925 ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
2926 }
2927
2928 /**
2929 * The pipe->set_stencil_ref() driver hook.
2930 *
2931 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
2932 */
2933 static void
2934 iris_set_stencil_ref(struct pipe_context *ctx,
2935 const struct pipe_stencil_ref *state)
2936 {
2937 struct iris_context *ice = (struct iris_context *) ctx;
2938 memcpy(&ice->state.stencil_ref, state, sizeof(*state));
2939 if (GEN_GEN == 8)
2940 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
2941 else
2942 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
2943 }
2944
2945 static float
2946 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
2947 {
2948 return copysignf(state->scale[axis], sign) + state->translate[axis];
2949 }
2950
2951 /**
2952 * The pipe->set_viewport_states() driver hook.
2953 *
2954 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
2955 * the guardband yet, as we need the framebuffer dimensions, but we can
2956 * at least fill out the rest.
2957 */
2958 static void
2959 iris_set_viewport_states(struct pipe_context *ctx,
2960 unsigned start_slot,
2961 unsigned count,
2962 const struct pipe_viewport_state *states)
2963 {
2964 struct iris_context *ice = (struct iris_context *) ctx;
2965
2966 memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
2967
2968 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
2969
2970 if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
2971 !ice->state.cso_rast->depth_clip_far))
2972 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
2973 }
2974
2975 /**
2976 * The pipe->set_framebuffer_state() driver hook.
2977 *
2978 * Sets the current draw FBO, including color render targets, depth,
2979 * and stencil buffers.
2980 */
2981 static void
2982 iris_set_framebuffer_state(struct pipe_context *ctx,
2983 const struct pipe_framebuffer_state *state)
2984 {
2985 struct iris_context *ice = (struct iris_context *) ctx;
2986 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2987 struct isl_device *isl_dev = &screen->isl_dev;
2988 struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
2989 struct iris_resource *zres;
2990 struct iris_resource *stencil_res;
2991
2992 unsigned samples = util_framebuffer_get_num_samples(state);
2993 unsigned layers = util_framebuffer_get_num_layers(state);
2994
2995 if (cso->samples != samples) {
2996 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
2997
2998 /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
2999 if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
3000 ice->state.dirty |= IRIS_DIRTY_FS;
3001 }
3002
3003 if (cso->nr_cbufs != state->nr_cbufs) {
3004 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
3005 }
3006
3007 if ((cso->layers == 0) != (layers == 0)) {
3008 ice->state.dirty |= IRIS_DIRTY_CLIP;
3009 }
3010
3011 if (cso->width != state->width || cso->height != state->height) {
3012 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3013 }
3014
3015 if (cso->zsbuf || state->zsbuf) {
3016 ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
3017 }
3018
3019 util_copy_framebuffer_state(cso, state);
3020 cso->samples = samples;
3021 cso->layers = layers;
3022
3023 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3024
3025 struct isl_view view = {
3026 .base_level = 0,
3027 .levels = 1,
3028 .base_array_layer = 0,
3029 .array_len = 1,
3030 .swizzle = ISL_SWIZZLE_IDENTITY,
3031 };
3032
3033 struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
3034
3035 if (cso->zsbuf) {
3036 iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
3037 &stencil_res);
3038
3039 view.base_level = cso->zsbuf->u.tex.level;
3040 view.base_array_layer = cso->zsbuf->u.tex.first_layer;
3041 view.array_len =
3042 cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
3043
3044 if (zres) {
3045 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
3046
3047 info.depth_surf = &zres->surf;
3048 info.depth_address = zres->bo->gtt_offset + zres->offset;
3049 info.mocs = iris_mocs(zres->bo, isl_dev);
3050
3051 view.format = zres->surf.format;
3052
3053 if (iris_resource_level_has_hiz(zres, view.base_level)) {
3054 info.hiz_usage = zres->aux.usage;
3055 info.hiz_surf = &zres->aux.surf;
3056 info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
3057 }
3058 }
3059
3060 if (stencil_res) {
3061 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3062 info.stencil_aux_usage = stencil_res->aux.usage;
3063 info.stencil_surf = &stencil_res->surf;
3064 info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
3065 if (!zres) {
3066 view.format = stencil_res->surf.format;
3067 info.mocs = iris_mocs(stencil_res->bo, isl_dev);
3068 }
3069 }
3070 }
3071
3072 isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3073
3074 /* Make a null surface for unbound buffers */
3075 void *null_surf_map =
3076 upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3077 4 * GENX(RENDER_SURFACE_STATE_length), 64);
3078 isl_null_fill_state(&screen->isl_dev, null_surf_map,
3079 isl_extent3d(MAX2(cso->width, 1),
3080 MAX2(cso->height, 1),
3081 cso->layers ? cso->layers : 1));
3082 ice->state.null_fb.offset +=
3083 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3084
3085 /* Render target change */
3086 ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
3087
3088 ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
3089
3090 ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3091
3092 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3093
3094 if (GEN_GEN == 8)
3095 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3096 }
3097
3098 /**
3099 * The pipe->set_constant_buffer() driver hook.
3100 *
3101 * This uploads any constant data in user buffers, and references
3102 * any UBO resources containing constant data.
3103 */
3104 static void
3105 iris_set_constant_buffer(struct pipe_context *ctx,
3106 enum pipe_shader_type p_stage, unsigned index,
3107 const struct pipe_constant_buffer *input)
3108 {
3109 struct iris_context *ice = (struct iris_context *) ctx;
3110 gl_shader_stage stage = stage_from_pipe(p_stage);
3111 struct iris_shader_state *shs = &ice->state.shaders[stage];
3112 struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3113
3114 /* TODO: Only do this if the buffer changes? */
3115 pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3116
3117 if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3118 shs->bound_cbufs |= 1u << index;
3119
3120 if (input->user_buffer) {
3121 void *map = NULL;
3122 pipe_resource_reference(&cbuf->buffer, NULL);
3123 u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3124 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3125
3126 if (!cbuf->buffer) {
3127 /* Allocation was unsuccessful - just unbind */
3128 iris_set_constant_buffer(ctx, p_stage, index, NULL);
3129 return;
3130 }
3131
3132 assert(map);
3133 memcpy(map, input->user_buffer, input->buffer_size);
3134 } else if (input->buffer) {
3135 pipe_resource_reference(&cbuf->buffer, input->buffer);
3136
3137 cbuf->buffer_offset = input->buffer_offset;
3138 }
3139
3140 cbuf->buffer_size =
3141 MIN2(input->buffer_size,
3142 iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3143
3144 struct iris_resource *res = (void *) cbuf->buffer;
3145 res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3146 res->bind_stages |= 1 << stage;
3147 } else {
3148 shs->bound_cbufs &= ~(1u << index);
3149 pipe_resource_reference(&cbuf->buffer, NULL);
3150 }
3151
3152 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
3153 }
3154
3155 static void
3156 upload_sysvals(struct iris_context *ice,
3157 gl_shader_stage stage)
3158 {
3159 UNUSED struct iris_genx_state *genx = ice->state.genx;
3160 struct iris_shader_state *shs = &ice->state.shaders[stage];
3161
3162 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3163 if (!shader || shader->num_system_values == 0)
3164 return;
3165
3166 assert(shader->num_cbufs > 0);
3167
3168 unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3169 struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3170 unsigned upload_size = shader->num_system_values * sizeof(uint32_t);
3171 uint32_t *map = NULL;
3172
3173 assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3174 u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3175 &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3176
3177 for (int i = 0; i < shader->num_system_values; i++) {
3178 uint32_t sysval = shader->system_values[i];
3179 uint32_t value = 0;
3180
3181 if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3182 #if GEN_GEN == 8
3183 unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3184 unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3185 struct brw_image_param *param =
3186 &genx->shaders[stage].image_param[img];
3187
3188 assert(offset < sizeof(struct brw_image_param));
3189 value = ((uint32_t *) param)[offset];
3190 #endif
3191 } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3192 value = 0;
3193 } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3194 int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3195 int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3196 value = fui(ice->state.clip_planes.ucp[plane][comp]);
3197 } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3198 if (stage == MESA_SHADER_TESS_CTRL) {
3199 value = ice->state.vertices_per_patch;
3200 } else {
3201 assert(stage == MESA_SHADER_TESS_EVAL);
3202 const struct shader_info *tcs_info =
3203 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3204 if (tcs_info)
3205 value = tcs_info->tess.tcs_vertices_out;
3206 else
3207 value = ice->state.vertices_per_patch;
3208 }
3209 } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3210 sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3211 unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3212 value = fui(ice->state.default_outer_level[i]);
3213 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3214 value = fui(ice->state.default_inner_level[0]);
3215 } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3216 value = fui(ice->state.default_inner_level[1]);
3217 } else {
3218 assert(!"unhandled system value");
3219 }
3220
3221 *map++ = value;
3222 }
3223
3224 cbuf->buffer_size = upload_size;
3225 iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3226 &shs->constbuf_surf_state[sysval_cbuf_index], false);
3227
3228 shs->sysvals_need_upload = false;
3229 }
3230
3231 /**
3232 * The pipe->set_shader_buffers() driver hook.
3233 *
3234 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
3235 * SURFACE_STATE here, as the buffer offset may change each time.
3236 */
3237 static void
3238 iris_set_shader_buffers(struct pipe_context *ctx,
3239 enum pipe_shader_type p_stage,
3240 unsigned start_slot, unsigned count,
3241 const struct pipe_shader_buffer *buffers,
3242 unsigned writable_bitmask)
3243 {
3244 struct iris_context *ice = (struct iris_context *) ctx;
3245 gl_shader_stage stage = stage_from_pipe(p_stage);
3246 struct iris_shader_state *shs = &ice->state.shaders[stage];
3247
3248 unsigned modified_bits = u_bit_consecutive(start_slot, count);
3249
3250 shs->bound_ssbos &= ~modified_bits;
3251 shs->writable_ssbos &= ~modified_bits;
3252 shs->writable_ssbos |= writable_bitmask << start_slot;
3253
3254 for (unsigned i = 0; i < count; i++) {
3255 if (buffers && buffers[i].buffer) {
3256 struct iris_resource *res = (void *) buffers[i].buffer;
3257 struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3258 struct iris_state_ref *surf_state =
3259 &shs->ssbo_surf_state[start_slot + i];
3260 pipe_resource_reference(&ssbo->buffer, &res->base);
3261 ssbo->buffer_offset = buffers[i].buffer_offset;
3262 ssbo->buffer_size =
3263 MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3264
3265 shs->bound_ssbos |= 1 << (start_slot + i);
3266
3267 iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true);
3268
3269 res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3270 res->bind_stages |= 1 << stage;
3271
3272 util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
3273 ssbo->buffer_offset + ssbo->buffer_size);
3274 } else {
3275 pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3276 pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3277 NULL);
3278 }
3279 }
3280
3281 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
3282 }
3283
3284 static void
3285 iris_delete_state(struct pipe_context *ctx, void *state)
3286 {
3287 free(state);
3288 }
3289
3290 /**
3291 * The pipe->set_vertex_buffers() driver hook.
3292 *
3293 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3294 */
3295 static void
3296 iris_set_vertex_buffers(struct pipe_context *ctx,
3297 unsigned start_slot, unsigned count,
3298 const struct pipe_vertex_buffer *buffers)
3299 {
3300 struct iris_context *ice = (struct iris_context *) ctx;
3301 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3302 struct iris_genx_state *genx = ice->state.genx;
3303
3304 ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
3305
3306 for (unsigned i = 0; i < count; i++) {
3307 const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3308 struct iris_vertex_buffer_state *state =
3309 &genx->vertex_buffers[start_slot + i];
3310
3311 if (!buffer) {
3312 pipe_resource_reference(&state->resource, NULL);
3313 continue;
3314 }
3315
3316 /* We may see user buffers that are NULL bindings. */
3317 assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3318
3319 pipe_resource_reference(&state->resource, buffer->buffer.resource);
3320 struct iris_resource *res = (void *) state->resource;
3321
3322 state->offset = (int) buffer->buffer_offset;
3323
3324 if (res) {
3325 ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3326 res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3327 }
3328
3329 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3330 vb.VertexBufferIndex = start_slot + i;
3331 vb.AddressModifyEnable = true;
3332 vb.BufferPitch = buffer->stride;
3333 if (res) {
3334 vb.BufferSize = res->base.width0 - (int) buffer->buffer_offset;
3335 vb.BufferStartingAddress =
3336 ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
3337 vb.MOCS = iris_mocs(res->bo, &screen->isl_dev);
3338 } else {
3339 vb.NullVertexBuffer = true;
3340 }
3341 }
3342 }
3343
3344 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3345 }
3346
3347 /**
3348 * Gallium CSO for vertex elements.
3349 */
3350 struct iris_vertex_element_state {
3351 uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3352 uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3353 uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3354 uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3355 unsigned count;
3356 };
3357
3358 /**
3359 * The pipe->create_vertex_elements() driver hook.
3360 *
3361 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3362 * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3363 * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3364 * needed. In these cases we will need information available at draw time.
3365 * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3366 * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3367 * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3368 */
3369 static void *
3370 iris_create_vertex_elements(struct pipe_context *ctx,
3371 unsigned count,
3372 const struct pipe_vertex_element *state)
3373 {
3374 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3375 const struct gen_device_info *devinfo = &screen->devinfo;
3376 struct iris_vertex_element_state *cso =
3377 malloc(sizeof(struct iris_vertex_element_state));
3378
3379 cso->count = count;
3380
3381 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3382 ve.DWordLength =
3383 1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3384 }
3385
3386 uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3387 uint32_t *vfi_pack_dest = cso->vf_instancing;
3388
3389 if (count == 0) {
3390 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3391 ve.Valid = true;
3392 ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3393 ve.Component0Control = VFCOMP_STORE_0;
3394 ve.Component1Control = VFCOMP_STORE_0;
3395 ve.Component2Control = VFCOMP_STORE_0;
3396 ve.Component3Control = VFCOMP_STORE_1_FP;
3397 }
3398
3399 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3400 }
3401 }
3402
3403 for (int i = 0; i < count; i++) {
3404 const struct iris_format_info fmt =
3405 iris_format_for_usage(devinfo, state[i].src_format, 0);
3406 unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3407 VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3408
3409 switch (isl_format_get_num_channels(fmt.fmt)) {
3410 case 0: comp[0] = VFCOMP_STORE_0; /* fallthrough */
3411 case 1: comp[1] = VFCOMP_STORE_0; /* fallthrough */
3412 case 2: comp[2] = VFCOMP_STORE_0; /* fallthrough */
3413 case 3:
3414 comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3415 : VFCOMP_STORE_1_FP;
3416 break;
3417 }
3418 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3419 ve.EdgeFlagEnable = false;
3420 ve.VertexBufferIndex = state[i].vertex_buffer_index;
3421 ve.Valid = true;
3422 ve.SourceElementOffset = state[i].src_offset;
3423 ve.SourceElementFormat = fmt.fmt;
3424 ve.Component0Control = comp[0];
3425 ve.Component1Control = comp[1];
3426 ve.Component2Control = comp[2];
3427 ve.Component3Control = comp[3];
3428 }
3429
3430 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3431 vi.VertexElementIndex = i;
3432 vi.InstancingEnable = state[i].instance_divisor > 0;
3433 vi.InstanceDataStepRate = state[i].instance_divisor;
3434 }
3435
3436 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3437 vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3438 }
3439
3440 /* An alternative version of the last VE and VFI is stored so it
3441 * can be used at draw time in case Vertex Shader uses EdgeFlag
3442 */
3443 if (count) {
3444 const unsigned edgeflag_index = count - 1;
3445 const struct iris_format_info fmt =
3446 iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3447 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3448 ve.EdgeFlagEnable = true ;
3449 ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3450 ve.Valid = true;
3451 ve.SourceElementOffset = state[edgeflag_index].src_offset;
3452 ve.SourceElementFormat = fmt.fmt;
3453 ve.Component0Control = VFCOMP_STORE_SRC;
3454 ve.Component1Control = VFCOMP_STORE_0;
3455 ve.Component2Control = VFCOMP_STORE_0;
3456 ve.Component3Control = VFCOMP_STORE_0;
3457 }
3458 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3459 /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3460 * at draw time, as it should change if SGVs are emitted.
3461 */
3462 vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3463 vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3464 }
3465 }
3466
3467 return cso;
3468 }
3469
3470 /**
3471 * The pipe->bind_vertex_elements_state() driver hook.
3472 */
3473 static void
3474 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3475 {
3476 struct iris_context *ice = (struct iris_context *) ctx;
3477 struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3478 struct iris_vertex_element_state *new_cso = state;
3479
3480 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3481 * we need to re-emit it to ensure we're overriding the right one.
3482 */
3483 if (new_cso && cso_changed(count))
3484 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3485
3486 ice->state.cso_vertex_elements = state;
3487 ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3488 }
3489
3490 /**
3491 * The pipe->create_stream_output_target() driver hook.
3492 *
3493 * "Target" here refers to a destination buffer. We translate this into
3494 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
3495 * know which buffer this represents, or whether we ought to zero the
3496 * write-offsets, or append. Those are handled in the set() hook.
3497 */
3498 static struct pipe_stream_output_target *
3499 iris_create_stream_output_target(struct pipe_context *ctx,
3500 struct pipe_resource *p_res,
3501 unsigned buffer_offset,
3502 unsigned buffer_size)
3503 {
3504 struct iris_resource *res = (void *) p_res;
3505 struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3506 if (!cso)
3507 return NULL;
3508
3509 res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3510
3511 pipe_reference_init(&cso->base.reference, 1);
3512 pipe_resource_reference(&cso->base.buffer, p_res);
3513 cso->base.buffer_offset = buffer_offset;
3514 cso->base.buffer_size = buffer_size;
3515 cso->base.context = ctx;
3516
3517 util_range_add(&res->base, &res->valid_buffer_range, buffer_offset,
3518 buffer_offset + buffer_size);
3519
3520 upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
3521
3522 return &cso->base;
3523 }
3524
3525 static void
3526 iris_stream_output_target_destroy(struct pipe_context *ctx,
3527 struct pipe_stream_output_target *state)
3528 {
3529 struct iris_stream_output_target *cso = (void *) state;
3530
3531 pipe_resource_reference(&cso->base.buffer, NULL);
3532 pipe_resource_reference(&cso->offset.res, NULL);
3533
3534 free(cso);
3535 }
3536
3537 /**
3538 * The pipe->set_stream_output_targets() driver hook.
3539 *
3540 * At this point, we know which targets are bound to a particular index,
3541 * and also whether we want to append or start over. We can finish the
3542 * 3DSTATE_SO_BUFFER packets we started earlier.
3543 */
3544 static void
3545 iris_set_stream_output_targets(struct pipe_context *ctx,
3546 unsigned num_targets,
3547 struct pipe_stream_output_target **targets,
3548 const unsigned *offsets)
3549 {
3550 struct iris_context *ice = (struct iris_context *) ctx;
3551 struct iris_genx_state *genx = ice->state.genx;
3552 uint32_t *so_buffers = genx->so_buffers;
3553 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3554
3555 const bool active = num_targets > 0;
3556 if (ice->state.streamout_active != active) {
3557 ice->state.streamout_active = active;
3558 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3559
3560 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3561 * it's a non-pipelined command. If we're switching streamout on, we
3562 * may have missed emitting it earlier, so do so now. (We're already
3563 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3564 */
3565 if (active) {
3566 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3567 } else {
3568 uint32_t flush = 0;
3569 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3570 struct iris_stream_output_target *tgt =
3571 (void *) ice->state.so_target[i];
3572 if (tgt) {
3573 struct iris_resource *res = (void *) tgt->base.buffer;
3574
3575 flush |= iris_flush_bits_for_history(res);
3576 iris_dirty_for_history(ice, res);
3577 }
3578 }
3579 iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3580 "make streamout results visible", flush);
3581 }
3582 }
3583
3584 for (int i = 0; i < 4; i++) {
3585 pipe_so_target_reference(&ice->state.so_target[i],
3586 i < num_targets ? targets[i] : NULL);
3587 }
3588
3589 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3590 if (!active)
3591 return;
3592
3593 for (unsigned i = 0; i < 4; i++,
3594 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3595
3596 struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3597 unsigned offset = offsets[i];
3598
3599 if (!tgt) {
3600 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3601 #if GEN_GEN < 12
3602 sob.SOBufferIndex = i;
3603 #else
3604 sob._3DCommandOpcode = 0;
3605 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3606 #endif
3607 }
3608 continue;
3609 }
3610
3611 struct iris_resource *res = (void *) tgt->base.buffer;
3612
3613 /* Note that offsets[i] will either be 0, causing us to zero
3614 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3615 * "continue appending at the existing offset."
3616 */
3617 assert(offset == 0 || offset == 0xFFFFFFFF);
3618
3619 /* We might be called by Begin (offset = 0), Pause, then Resume
3620 * (offset = 0xFFFFFFFF) before ever drawing (where these commands
3621 * will actually be sent to the GPU). In this case, we don't want
3622 * to append - we still want to do our initial zeroing.
3623 */
3624 if (!tgt->zeroed)
3625 offset = 0;
3626
3627 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3628 #if GEN_GEN < 12
3629 sob.SOBufferIndex = i;
3630 #else
3631 sob._3DCommandOpcode = 0;
3632 sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3633 #endif
3634 sob.SurfaceBaseAddress =
3635 rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset);
3636 sob.SOBufferEnable = true;
3637 sob.StreamOffsetWriteEnable = true;
3638 sob.StreamOutputBufferOffsetAddressEnable = true;
3639 sob.MOCS = iris_mocs(res->bo, &screen->isl_dev);
3640
3641 sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3642 sob.StreamOffset = offset;
3643 sob.StreamOutputBufferOffsetAddress =
3644 rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
3645 tgt->offset.offset);
3646 }
3647 }
3648
3649 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3650 }
3651
3652 /**
3653 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3654 * 3DSTATE_STREAMOUT packets.
3655 *
3656 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3657 * hardware to record. We can create it entirely based on the shader, with
3658 * no dynamic state dependencies.
3659 *
3660 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3661 * state-based settings. We capture the shader-related ones here, and merge
3662 * the rest in at draw time.
3663 */
3664 static uint32_t *
3665 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3666 const struct brw_vue_map *vue_map)
3667 {
3668 struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3669 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3670 int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3671 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3672 int max_decls = 0;
3673 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3674
3675 memset(so_decl, 0, sizeof(so_decl));
3676
3677 /* Construct the list of SO_DECLs to be emitted. The formatting of the
3678 * command feels strange -- each dword pair contains a SO_DECL per stream.
3679 */
3680 for (unsigned i = 0; i < info->num_outputs; i++) {
3681 const struct pipe_stream_output *output = &info->output[i];
3682 const int buffer = output->output_buffer;
3683 const int varying = output->register_index;
3684 const unsigned stream_id = output->stream;
3685 assert(stream_id < MAX_VERTEX_STREAMS);
3686
3687 buffer_mask[stream_id] |= 1 << buffer;
3688
3689 assert(vue_map->varying_to_slot[varying] >= 0);
3690
3691 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3692 * array. Instead, it simply increments DstOffset for the following
3693 * input by the number of components that should be skipped.
3694 *
3695 * Our hardware is unusual in that it requires us to program SO_DECLs
3696 * for fake "hole" components, rather than simply taking the offset
3697 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
3698 * program as many size = 4 holes as we can, then a final hole to
3699 * accommodate the final 1, 2, or 3 remaining.
3700 */
3701 int skip_components = output->dst_offset - next_offset[buffer];
3702
3703 while (skip_components > 0) {
3704 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3705 .HoleFlag = 1,
3706 .OutputBufferSlot = output->output_buffer,
3707 .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
3708 };
3709 skip_components -= 4;
3710 }
3711
3712 next_offset[buffer] = output->dst_offset + output->num_components;
3713
3714 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3715 .OutputBufferSlot = output->output_buffer,
3716 .RegisterIndex = vue_map->varying_to_slot[varying],
3717 .ComponentMask =
3718 ((1 << output->num_components) - 1) << output->start_component,
3719 };
3720
3721 if (decls[stream_id] > max_decls)
3722 max_decls = decls[stream_id];
3723 }
3724
3725 unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
3726 uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
3727 uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
3728
3729 iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
3730 int urb_entry_read_offset = 0;
3731 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
3732 urb_entry_read_offset;
3733
3734 /* We always read the whole vertex. This could be reduced at some
3735 * point by reading less and offsetting the register index in the
3736 * SO_DECLs.
3737 */
3738 sol.Stream0VertexReadOffset = urb_entry_read_offset;
3739 sol.Stream0VertexReadLength = urb_entry_read_length - 1;
3740 sol.Stream1VertexReadOffset = urb_entry_read_offset;
3741 sol.Stream1VertexReadLength = urb_entry_read_length - 1;
3742 sol.Stream2VertexReadOffset = urb_entry_read_offset;
3743 sol.Stream2VertexReadLength = urb_entry_read_length - 1;
3744 sol.Stream3VertexReadOffset = urb_entry_read_offset;
3745 sol.Stream3VertexReadLength = urb_entry_read_length - 1;
3746
3747 /* Set buffer pitches; 0 means unbound. */
3748 sol.Buffer0SurfacePitch = 4 * info->stride[0];
3749 sol.Buffer1SurfacePitch = 4 * info->stride[1];
3750 sol.Buffer2SurfacePitch = 4 * info->stride[2];
3751 sol.Buffer3SurfacePitch = 4 * info->stride[3];
3752 }
3753
3754 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
3755 list.DWordLength = 3 + 2 * max_decls - 2;
3756 list.StreamtoBufferSelects0 = buffer_mask[0];
3757 list.StreamtoBufferSelects1 = buffer_mask[1];
3758 list.StreamtoBufferSelects2 = buffer_mask[2];
3759 list.StreamtoBufferSelects3 = buffer_mask[3];
3760 list.NumEntries0 = decls[0];
3761 list.NumEntries1 = decls[1];
3762 list.NumEntries2 = decls[2];
3763 list.NumEntries3 = decls[3];
3764 }
3765
3766 for (int i = 0; i < max_decls; i++) {
3767 iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
3768 entry.Stream0Decl = so_decl[0][i];
3769 entry.Stream1Decl = so_decl[1][i];
3770 entry.Stream2Decl = so_decl[2][i];
3771 entry.Stream3Decl = so_decl[3][i];
3772 }
3773 }
3774
3775 return map;
3776 }
3777
3778 static void
3779 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
3780 const struct brw_vue_map *last_vue_map,
3781 bool two_sided_color,
3782 unsigned *out_offset,
3783 unsigned *out_length)
3784 {
3785 /* The compiler computes the first URB slot without considering COL/BFC
3786 * swizzling (because it doesn't know whether it's enabled), so we need
3787 * to do that here too. This may result in a smaller offset, which
3788 * should be safe.
3789 */
3790 const unsigned first_slot =
3791 brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
3792
3793 /* This becomes the URB read offset (counted in pairs of slots). */
3794 assert(first_slot % 2 == 0);
3795 *out_offset = first_slot / 2;
3796
3797 /* We need to adjust the inputs read to account for front/back color
3798 * swizzling, as it can make the URB length longer.
3799 */
3800 for (int c = 0; c <= 1; c++) {
3801 if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
3802 /* If two sided color is enabled, the fragment shader's gl_Color
3803 * (COL0) input comes from either the gl_FrontColor (COL0) or
3804 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
3805 */
3806 if (two_sided_color)
3807 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3808
3809 /* If front color isn't written, we opt to give them back color
3810 * instead of an undefined value. Switch from COL to BFC.
3811 */
3812 if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
3813 fs_input_slots &= ~(VARYING_BIT_COL0 << c);
3814 fs_input_slots |= (VARYING_BIT_BFC0 << c);
3815 }
3816 }
3817 }
3818
3819 /* Compute the minimum URB Read Length necessary for the FS inputs.
3820 *
3821 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3822 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3823 *
3824 * "This field should be set to the minimum length required to read the
3825 * maximum source attribute. The maximum source attribute is indicated
3826 * by the maximum value of the enabled Attribute # Source Attribute if
3827 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3828 * enable is not set.
3829 * read_length = ceiling((max_source_attr + 1) / 2)
3830 *
3831 * [errata] Corruption/Hang possible if length programmed larger than
3832 * recommended"
3833 *
3834 * Similar text exists for Ivy Bridge.
3835 *
3836 * We find the last URB slot that's actually read by the FS.
3837 */
3838 unsigned last_read_slot = last_vue_map->num_slots - 1;
3839 while (last_read_slot > first_slot && !(fs_input_slots &
3840 (1ull << last_vue_map->slot_to_varying[last_read_slot])))
3841 --last_read_slot;
3842
3843 /* The URB read length is the difference of the two, counted in pairs. */
3844 *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
3845 }
3846
3847 static void
3848 iris_emit_sbe_swiz(struct iris_batch *batch,
3849 const struct iris_context *ice,
3850 unsigned urb_read_offset,
3851 unsigned sprite_coord_enables)
3852 {
3853 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
3854 const struct brw_wm_prog_data *wm_prog_data = (void *)
3855 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3856 const struct brw_vue_map *vue_map = ice->shaders.last_vue_map;
3857 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3858
3859 /* XXX: this should be generated when putting programs in place */
3860
3861 for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
3862 const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
3863 const int input_index = wm_prog_data->urb_setup[fs_attr];
3864 if (input_index < 0 || input_index >= 16)
3865 continue;
3866
3867 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
3868 &attr_overrides[input_index];
3869 int slot = vue_map->varying_to_slot[fs_attr];
3870
3871 /* Viewport and Layer are stored in the VUE header. We need to override
3872 * them to zero if earlier stages didn't write them, as GL requires that
3873 * they read back as zero when not explicitly set.
3874 */
3875 switch (fs_attr) {
3876 case VARYING_SLOT_VIEWPORT:
3877 case VARYING_SLOT_LAYER:
3878 attr->ComponentOverrideX = true;
3879 attr->ComponentOverrideW = true;
3880 attr->ConstantSource = CONST_0000;
3881
3882 if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
3883 attr->ComponentOverrideY = true;
3884 if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
3885 attr->ComponentOverrideZ = true;
3886 continue;
3887
3888 case VARYING_SLOT_PRIMITIVE_ID:
3889 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
3890 if (slot == -1) {
3891 attr->ComponentOverrideX = true;
3892 attr->ComponentOverrideY = true;
3893 attr->ComponentOverrideZ = true;
3894 attr->ComponentOverrideW = true;
3895 attr->ConstantSource = PRIM_ID;
3896 continue;
3897 }
3898
3899 default:
3900 break;
3901 }
3902
3903 if (sprite_coord_enables & (1 << input_index))
3904 continue;
3905
3906 /* If there was only a back color written but not front, use back
3907 * as the color instead of undefined.
3908 */
3909 if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
3910 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
3911 if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
3912 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
3913
3914 /* Not written by the previous stage - undefined. */
3915 if (slot == -1) {
3916 attr->ComponentOverrideX = true;
3917 attr->ComponentOverrideY = true;
3918 attr->ComponentOverrideZ = true;
3919 attr->ComponentOverrideW = true;
3920 attr->ConstantSource = CONST_0001_FLOAT;
3921 continue;
3922 }
3923
3924 /* Compute the location of the attribute relative to the read offset,
3925 * which is counted in 256-bit increments (two 128-bit VUE slots).
3926 */
3927 const int source_attr = slot - 2 * urb_read_offset;
3928 assert(source_attr >= 0 && source_attr <= 32);
3929 attr->SourceAttribute = source_attr;
3930
3931 /* If we are doing two-sided color, and the VUE slot following this one
3932 * represents a back-facing color, then we need to instruct the SF unit
3933 * to do back-facing swizzling.
3934 */
3935 if (cso_rast->light_twoside &&
3936 ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
3937 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
3938 (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
3939 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
3940 attr->SwizzleSelect = INPUTATTR_FACING;
3941 }
3942
3943 iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
3944 for (int i = 0; i < 16; i++)
3945 sbes.Attribute[i] = attr_overrides[i];
3946 }
3947 }
3948
3949 static unsigned
3950 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
3951 const struct iris_rasterizer_state *cso)
3952 {
3953 unsigned overrides = 0;
3954
3955 if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
3956 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
3957
3958 for (int i = 0; i < 8; i++) {
3959 if ((cso->sprite_coord_enable & (1 << i)) &&
3960 prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
3961 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
3962 }
3963
3964 return overrides;
3965 }
3966
3967 static void
3968 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
3969 {
3970 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3971 const struct brw_wm_prog_data *wm_prog_data = (void *)
3972 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3973 const struct shader_info *fs_info =
3974 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
3975
3976 unsigned urb_read_offset, urb_read_length;
3977 iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
3978 ice->shaders.last_vue_map,
3979 cso_rast->light_twoside,
3980 &urb_read_offset, &urb_read_length);
3981
3982 unsigned sprite_coord_overrides =
3983 iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast);
3984
3985 iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
3986 sbe.AttributeSwizzleEnable = true;
3987 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
3988 sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
3989 sbe.VertexURBEntryReadOffset = urb_read_offset;
3990 sbe.VertexURBEntryReadLength = urb_read_length;
3991 sbe.ForceVertexURBEntryReadOffset = true;
3992 sbe.ForceVertexURBEntryReadLength = true;
3993 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
3994 sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
3995 #if GEN_GEN >= 9
3996 for (int i = 0; i < 32; i++) {
3997 sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
3998 }
3999 #endif
4000 }
4001
4002 iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides);
4003 }
4004
4005 /* ------------------------------------------------------------------- */
4006
4007 /**
4008 * Populate VS program key fields based on the current state.
4009 */
4010 static void
4011 iris_populate_vs_key(const struct iris_context *ice,
4012 const struct shader_info *info,
4013 gl_shader_stage last_stage,
4014 struct iris_vs_prog_key *key)
4015 {
4016 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4017
4018 if (info->clip_distance_array_size == 0 &&
4019 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4020 last_stage == MESA_SHADER_VERTEX)
4021 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4022 }
4023
4024 /**
4025 * Populate TCS program key fields based on the current state.
4026 */
4027 static void
4028 iris_populate_tcs_key(const struct iris_context *ice,
4029 struct iris_tcs_prog_key *key)
4030 {
4031 }
4032
4033 /**
4034 * Populate TES program key fields based on the current state.
4035 */
4036 static void
4037 iris_populate_tes_key(const struct iris_context *ice,
4038 const struct shader_info *info,
4039 gl_shader_stage last_stage,
4040 struct iris_tes_prog_key *key)
4041 {
4042 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4043
4044 if (info->clip_distance_array_size == 0 &&
4045 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4046 last_stage == MESA_SHADER_TESS_EVAL)
4047 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4048 }
4049
4050 /**
4051 * Populate GS program key fields based on the current state.
4052 */
4053 static void
4054 iris_populate_gs_key(const struct iris_context *ice,
4055 const struct shader_info *info,
4056 gl_shader_stage last_stage,
4057 struct iris_gs_prog_key *key)
4058 {
4059 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4060
4061 if (info->clip_distance_array_size == 0 &&
4062 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4063 last_stage == MESA_SHADER_GEOMETRY)
4064 key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4065 }
4066
4067 /**
4068 * Populate FS program key fields based on the current state.
4069 */
4070 static void
4071 iris_populate_fs_key(const struct iris_context *ice,
4072 const struct shader_info *info,
4073 struct iris_fs_prog_key *key)
4074 {
4075 struct iris_screen *screen = (void *) ice->ctx.screen;
4076 const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4077 const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4078 const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4079 const struct iris_blend_state *blend = ice->state.cso_blend;
4080
4081 key->nr_color_regions = fb->nr_cbufs;
4082
4083 key->clamp_fragment_color = rast->clamp_fragment_color;
4084
4085 key->alpha_to_coverage = blend->alpha_to_coverage;
4086
4087 key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled;
4088
4089 key->flat_shade = rast->flatshade &&
4090 (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4091
4092 key->persample_interp = rast->force_persample_interp;
4093 key->multisample_fbo = rast->multisample && fb->samples > 1;
4094
4095 key->coherent_fb_fetch = GEN_GEN >= 9;
4096
4097 key->force_dual_color_blend =
4098 screen->driconf.dual_color_blend_by_location &&
4099 (blend->blend_enables & 1) && blend->dual_color_blending;
4100
4101 /* TODO: Respect glHint for key->high_quality_derivatives */
4102 }
4103
4104 static void
4105 iris_populate_cs_key(const struct iris_context *ice,
4106 struct iris_cs_prog_key *key)
4107 {
4108 }
4109
4110 static uint64_t
4111 KSP(const struct iris_compiled_shader *shader)
4112 {
4113 struct iris_resource *res = (void *) shader->assembly.res;
4114 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4115 }
4116
4117 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
4118 pkt.KernelStartPointer = KSP(shader); \
4119 pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
4120 pkt.FloatingPointMode = prog_data->use_alt_mode; \
4121 \
4122 pkt.DispatchGRFStartRegisterForURBData = \
4123 prog_data->dispatch_grf_start_reg; \
4124 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
4125 pkt.prefix##URBEntryReadOffset = 0; \
4126 \
4127 pkt.StatisticsEnable = true; \
4128 pkt.Enable = true; \
4129 \
4130 if (prog_data->total_scratch) { \
4131 struct iris_bo *bo = \
4132 iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
4133 uint32_t scratch_addr = bo->gtt_offset; \
4134 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
4135 pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); \
4136 }
4137
4138 /**
4139 * Encode most of 3DSTATE_VS based on the compiled shader.
4140 */
4141 static void
4142 iris_store_vs_state(struct iris_context *ice,
4143 const struct gen_device_info *devinfo,
4144 struct iris_compiled_shader *shader)
4145 {
4146 struct brw_stage_prog_data *prog_data = shader->prog_data;
4147 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4148
4149 iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4150 INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4151 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4152 vs.SIMD8DispatchEnable = true;
4153 vs.UserClipDistanceCullTestEnableBitmask =
4154 vue_prog_data->cull_distance_mask;
4155 }
4156 }
4157
4158 /**
4159 * Encode most of 3DSTATE_HS based on the compiled shader.
4160 */
4161 static void
4162 iris_store_tcs_state(struct iris_context *ice,
4163 const struct gen_device_info *devinfo,
4164 struct iris_compiled_shader *shader)
4165 {
4166 struct brw_stage_prog_data *prog_data = shader->prog_data;
4167 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4168 struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4169
4170 iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4171 INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4172
4173 #if GEN_GEN >= 12
4174 /* GEN:BUG:1604578095:
4175 *
4176 * Hang occurs when the number of max threads is less than 2 times
4177 * the number of instance count. The number of max threads must be
4178 * more than 2 times the number of instance count.
4179 */
4180 assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
4181 hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
4182 hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
4183 #endif
4184
4185 hs.InstanceCount = tcs_prog_data->instances - 1;
4186 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4187 hs.IncludeVertexHandles = true;
4188
4189 #if GEN_GEN == 12
4190 /* Patch Count threshold specifies the maximum number of patches that
4191 * will be accumulated before a thread dispatch is forced.
4192 */
4193 hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
4194 #endif
4195
4196 #if GEN_GEN >= 9
4197 hs.DispatchMode = vue_prog_data->dispatch_mode;
4198 hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4199 #endif
4200 }
4201 }
4202
4203 /**
4204 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4205 */
4206 static void
4207 iris_store_tes_state(struct iris_context *ice,
4208 const struct gen_device_info *devinfo,
4209 struct iris_compiled_shader *shader)
4210 {
4211 struct brw_stage_prog_data *prog_data = shader->prog_data;
4212 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4213 struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4214
4215 uint32_t *te_state = (void *) shader->derived_data;
4216 uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
4217
4218 iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4219 te.Partitioning = tes_prog_data->partitioning;
4220 te.OutputTopology = tes_prog_data->output_topology;
4221 te.TEDomain = tes_prog_data->domain;
4222 te.TEEnable = true;
4223 te.MaximumTessellationFactorOdd = 63.0;
4224 te.MaximumTessellationFactorNotOdd = 64.0;
4225 }
4226
4227 iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4228 INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4229
4230 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4231 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4232 ds.ComputeWCoordinateEnable =
4233 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4234
4235 ds.UserClipDistanceCullTestEnableBitmask =
4236 vue_prog_data->cull_distance_mask;
4237 }
4238
4239 }
4240
4241 /**
4242 * Encode most of 3DSTATE_GS based on the compiled shader.
4243 */
4244 static void
4245 iris_store_gs_state(struct iris_context *ice,
4246 const struct gen_device_info *devinfo,
4247 struct iris_compiled_shader *shader)
4248 {
4249 struct brw_stage_prog_data *prog_data = shader->prog_data;
4250 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4251 struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4252
4253 iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4254 INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4255
4256 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4257 gs.OutputTopology = gs_prog_data->output_topology;
4258 gs.ControlDataHeaderSize =
4259 gs_prog_data->control_data_header_size_hwords;
4260 gs.InstanceControl = gs_prog_data->invocations - 1;
4261 gs.DispatchMode = DISPATCH_MODE_SIMD8;
4262 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4263 gs.ControlDataFormat = gs_prog_data->control_data_format;
4264 gs.ReorderMode = TRAILING;
4265 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4266 gs.MaximumNumberofThreads =
4267 GEN_GEN == 8 ? (devinfo->max_gs_threads / 2 - 1)
4268 : (devinfo->max_gs_threads - 1);
4269
4270 if (gs_prog_data->static_vertex_count != -1) {
4271 gs.StaticOutput = true;
4272 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4273 }
4274 gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4275
4276 gs.UserClipDistanceCullTestEnableBitmask =
4277 vue_prog_data->cull_distance_mask;
4278
4279 const int urb_entry_write_offset = 1;
4280 const uint32_t urb_entry_output_length =
4281 DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4282 urb_entry_write_offset;
4283
4284 gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4285 gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4286 }
4287 }
4288
4289 /**
4290 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4291 */
4292 static void
4293 iris_store_fs_state(struct iris_context *ice,
4294 const struct gen_device_info *devinfo,
4295 struct iris_compiled_shader *shader)
4296 {
4297 struct brw_stage_prog_data *prog_data = shader->prog_data;
4298 struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4299
4300 uint32_t *ps_state = (void *) shader->derived_data;
4301 uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4302
4303 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4304 ps.VectorMaskEnable = true;
4305 ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
4306 ps.FloatingPointMode = prog_data->use_alt_mode;
4307 ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
4308
4309 ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4310
4311 /* From the documentation for this packet:
4312 * "If the PS kernel does not need the Position XY Offsets to
4313 * compute a Position Value, then this field should be programmed
4314 * to POSOFFSET_NONE."
4315 *
4316 * "SW Recommendation: If the PS kernel needs the Position Offsets
4317 * to compute a Position XY value, this field should match Position
4318 * ZW Interpolation Mode to ensure a consistent position.xyzw
4319 * computation."
4320 *
4321 * We only require XY sample offsets. So, this recommendation doesn't
4322 * look useful at the moment. We might need this in future.
4323 */
4324 ps.PositionXYOffsetSelect =
4325 wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4326
4327 if (prog_data->total_scratch) {
4328 struct iris_bo *bo =
4329 iris_get_scratch_space(ice, prog_data->total_scratch,
4330 MESA_SHADER_FRAGMENT);
4331 uint32_t scratch_addr = bo->gtt_offset;
4332 ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4333 ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
4334 }
4335 }
4336
4337 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4338 psx.PixelShaderValid = true;
4339 psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4340 psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4341 psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4342 psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4343 psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4344 psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4345 psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4346
4347 #if GEN_GEN >= 9
4348 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4349 psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4350 #endif
4351 }
4352 }
4353
4354 /**
4355 * Compute the size of the derived data (shader command packets).
4356 *
4357 * This must match the data written by the iris_store_xs_state() functions.
4358 */
4359 static void
4360 iris_store_cs_state(struct iris_context *ice,
4361 const struct gen_device_info *devinfo,
4362 struct iris_compiled_shader *shader)
4363 {
4364 struct brw_stage_prog_data *prog_data = shader->prog_data;
4365 struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4366 void *map = shader->derived_data;
4367
4368 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4369 desc.KernelStartPointer = KSP(shader);
4370 desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4371 desc.SharedLocalMemorySize =
4372 encode_slm_size(GEN_GEN, prog_data->total_shared);
4373 desc.BarrierEnable = cs_prog_data->uses_barrier;
4374 desc.CrossThreadConstantDataReadLength =
4375 cs_prog_data->push.cross_thread.regs;
4376 #if GEN_GEN >= 12
4377 /* TODO: Check if we are missing workarounds and enable mid-thread
4378 * preemption.
4379 *
4380 * We still have issues with mid-thread preemption (it was already
4381 * disabled by the kernel on gen11, due to missing workarounds). It's
4382 * possible that we are just missing some workarounds, and could enable
4383 * it later, but for now let's disable it to fix a GPU in compute in Car
4384 * Chase (and possibly more).
4385 */
4386 desc.ThreadPreemptionDisable = true;
4387 #endif
4388 }
4389 }
4390
4391 static unsigned
4392 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4393 {
4394 assert(cache_id <= IRIS_CACHE_BLORP);
4395
4396 static const unsigned dwords[] = {
4397 [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4398 [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4399 [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4400 [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4401 [IRIS_CACHE_FS] =
4402 GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4403 [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4404 [IRIS_CACHE_BLORP] = 0,
4405 };
4406
4407 return sizeof(uint32_t) * dwords[cache_id];
4408 }
4409
4410 /**
4411 * Create any state packets corresponding to the given shader stage
4412 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4413 * This means that we can look up a program in the in-memory cache and
4414 * get most of the state packet without having to reconstruct it.
4415 */
4416 static void
4417 iris_store_derived_program_state(struct iris_context *ice,
4418 enum iris_program_cache_id cache_id,
4419 struct iris_compiled_shader *shader)
4420 {
4421 struct iris_screen *screen = (void *) ice->ctx.screen;
4422 const struct gen_device_info *devinfo = &screen->devinfo;
4423
4424 switch (cache_id) {
4425 case IRIS_CACHE_VS:
4426 iris_store_vs_state(ice, devinfo, shader);
4427 break;
4428 case IRIS_CACHE_TCS:
4429 iris_store_tcs_state(ice, devinfo, shader);
4430 break;
4431 case IRIS_CACHE_TES:
4432 iris_store_tes_state(ice, devinfo, shader);
4433 break;
4434 case IRIS_CACHE_GS:
4435 iris_store_gs_state(ice, devinfo, shader);
4436 break;
4437 case IRIS_CACHE_FS:
4438 iris_store_fs_state(ice, devinfo, shader);
4439 break;
4440 case IRIS_CACHE_CS:
4441 iris_store_cs_state(ice, devinfo, shader);
4442 case IRIS_CACHE_BLORP:
4443 break;
4444 default:
4445 break;
4446 }
4447 }
4448
4449 /* ------------------------------------------------------------------- */
4450
4451 static const uint32_t push_constant_opcodes[] = {
4452 [MESA_SHADER_VERTEX] = 21,
4453 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4454 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4455 [MESA_SHADER_GEOMETRY] = 22,
4456 [MESA_SHADER_FRAGMENT] = 23,
4457 [MESA_SHADER_COMPUTE] = 0,
4458 };
4459
4460 static uint32_t
4461 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4462 {
4463 struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4464
4465 iris_use_pinned_bo(batch, state_bo, false);
4466
4467 return ice->state.unbound_tex.offset;
4468 }
4469
4470 static uint32_t
4471 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4472 {
4473 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4474 if (!ice->state.null_fb.res)
4475 return use_null_surface(batch, ice);
4476
4477 struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4478
4479 iris_use_pinned_bo(batch, state_bo, false);
4480
4481 return ice->state.null_fb.offset;
4482 }
4483
4484 static uint32_t
4485 surf_state_offset_for_aux(struct iris_resource *res,
4486 unsigned aux_modes,
4487 enum isl_aux_usage aux_usage)
4488 {
4489 return SURFACE_STATE_ALIGNMENT *
4490 util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4491 }
4492
4493 #if GEN_GEN == 9
4494 static void
4495 surf_state_update_clear_value(struct iris_batch *batch,
4496 struct iris_resource *res,
4497 struct iris_state_ref *state,
4498 unsigned aux_modes,
4499 enum isl_aux_usage aux_usage)
4500 {
4501 struct isl_device *isl_dev = &batch->screen->isl_dev;
4502 struct iris_bo *state_bo = iris_resource_bo(state->res);
4503 uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
4504 uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
4505 uint32_t clear_offset = offset_into_bo +
4506 isl_dev->ss.clear_value_offset +
4507 surf_state_offset_for_aux(res, aux_modes, aux_usage);
4508 uint32_t *color = res->aux.clear_color.u32;
4509
4510 assert(isl_dev->ss.clear_value_size == 16);
4511
4512 if (aux_usage == ISL_AUX_USAGE_HIZ) {
4513 iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4514 PIPE_CONTROL_WRITE_IMMEDIATE,
4515 state_bo, clear_offset, color[0]);
4516 } else {
4517 iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4518 PIPE_CONTROL_WRITE_IMMEDIATE,
4519 state_bo, clear_offset,
4520 (uint64_t) color[0] |
4521 (uint64_t) color[1] << 32);
4522 iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4523 PIPE_CONTROL_WRITE_IMMEDIATE,
4524 state_bo, clear_offset + 8,
4525 (uint64_t) color[2] |
4526 (uint64_t) color[3] << 32);
4527 }
4528
4529 iris_emit_pipe_control_flush(batch,
4530 "update fast clear: state cache invalidate",
4531 PIPE_CONTROL_FLUSH_ENABLE |
4532 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4533 }
4534 #endif
4535
4536 static void
4537 update_clear_value(struct iris_context *ice,
4538 struct iris_batch *batch,
4539 struct iris_resource *res,
4540 struct iris_surface_state *surf_state,
4541 unsigned all_aux_modes,
4542 struct isl_view *view)
4543 {
4544 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4545 UNUSED unsigned aux_modes = all_aux_modes;
4546
4547 /* We only need to update the clear color in the surface state for gen8 and
4548 * gen9. Newer gens can read it directly from the clear color state buffer.
4549 */
4550 #if GEN_GEN == 9
4551 /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4552 aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4553
4554 while (aux_modes) {
4555 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4556
4557 surf_state_update_clear_value(batch, res, &surf_state->ref,
4558 all_aux_modes, aux_usage);
4559 }
4560 #elif GEN_GEN == 8
4561 /* TODO: Could update rather than re-filling */
4562 alloc_surface_states(surf_state, all_aux_modes);
4563
4564 void *map = surf_state->cpu;
4565
4566 while (aux_modes) {
4567 enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4568 fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
4569 0, 0, 0);
4570 map += SURFACE_STATE_ALIGNMENT;
4571 }
4572
4573 upload_surface_states(ice->state.surface_uploader, surf_state);
4574 #endif
4575 }
4576
4577 /**
4578 * Add a surface to the validation list, as well as the buffer containing
4579 * the corresponding SURFACE_STATE.
4580 *
4581 * Returns the binding table entry (offset to SURFACE_STATE).
4582 */
4583 static uint32_t
4584 use_surface(struct iris_context *ice,
4585 struct iris_batch *batch,
4586 struct pipe_surface *p_surf,
4587 bool writeable,
4588 enum isl_aux_usage aux_usage,
4589 bool is_read_surface)
4590 {
4591 struct iris_surface *surf = (void *) p_surf;
4592 struct iris_resource *res = (void *) p_surf->texture;
4593 uint32_t offset = 0;
4594
4595 iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
4596 if (GEN_GEN == 8 && is_read_surface) {
4597 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false);
4598 } else {
4599 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false);
4600 }
4601
4602 if (res->aux.bo) {
4603 iris_use_pinned_bo(batch, res->aux.bo, writeable);
4604 if (res->aux.clear_color_bo)
4605 iris_use_pinned_bo(batch, res->aux.clear_color_bo, false);
4606
4607 if (memcmp(&res->aux.clear_color, &surf->clear_color,
4608 sizeof(surf->clear_color)) != 0) {
4609 update_clear_value(ice, batch, res, &surf->surface_state,
4610 res->aux.possible_usages, &surf->view);
4611 if (GEN_GEN == 8) {
4612 update_clear_value(ice, batch, res, &surf->surface_state_read,
4613 res->aux.possible_usages, &surf->read_view);
4614 }
4615 surf->clear_color = res->aux.clear_color;
4616 }
4617 }
4618
4619 offset = (GEN_GEN == 8 && is_read_surface)
4620 ? surf->surface_state_read.ref.offset
4621 : surf->surface_state.ref.offset;
4622
4623 return offset +
4624 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4625 }
4626
4627 static uint32_t
4628 use_sampler_view(struct iris_context *ice,
4629 struct iris_batch *batch,
4630 struct iris_sampler_view *isv)
4631 {
4632 enum isl_aux_usage aux_usage =
4633 iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
4634
4635 iris_use_pinned_bo(batch, isv->res->bo, false);
4636 iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false);
4637
4638 if (isv->res->aux.bo) {
4639 iris_use_pinned_bo(batch, isv->res->aux.bo, false);
4640 if (isv->res->aux.clear_color_bo)
4641 iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo, false);
4642 if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4643 sizeof(isv->clear_color)) != 0) {
4644 update_clear_value(ice, batch, isv->res, &isv->surface_state,
4645 isv->res->aux.sampler_usages, &isv->view);
4646 isv->clear_color = isv->res->aux.clear_color;
4647 }
4648 }
4649
4650 return isv->surface_state.ref.offset +
4651 surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
4652 aux_usage);
4653 }
4654
4655 static uint32_t
4656 use_ubo_ssbo(struct iris_batch *batch,
4657 struct iris_context *ice,
4658 struct pipe_shader_buffer *buf,
4659 struct iris_state_ref *surf_state,
4660 bool writable)
4661 {
4662 if (!buf->buffer || !surf_state->res)
4663 return use_null_surface(batch, ice);
4664
4665 iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable);
4666 iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
4667
4668 return surf_state->offset;
4669 }
4670
4671 static uint32_t
4672 use_image(struct iris_batch *batch, struct iris_context *ice,
4673 struct iris_shader_state *shs, const struct shader_info *info,
4674 int i)
4675 {
4676 struct iris_image_view *iv = &shs->image[i];
4677 struct iris_resource *res = (void *) iv->base.resource;
4678
4679 if (!res)
4680 return use_null_surface(batch, ice);
4681
4682 bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
4683
4684 iris_use_pinned_bo(batch, res->bo, write);
4685 iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res), false);
4686
4687 if (res->aux.bo)
4688 iris_use_pinned_bo(batch, res->aux.bo, write);
4689
4690 enum isl_aux_usage aux_usage =
4691 iris_image_view_aux_usage(ice, &iv->base, info);
4692
4693 return iv->surface_state.ref.offset +
4694 surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4695 }
4696
4697 #define push_bt_entry(addr) \
4698 assert(addr >= binder_addr); \
4699 assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4700 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4701
4702 #define bt_assert(section) \
4703 if (!pin_only && shader->bt.used_mask[section] != 0) \
4704 assert(shader->bt.offsets[section] == s);
4705
4706 /**
4707 * Populate the binding table for a given shader stage.
4708 *
4709 * This fills out the table of pointers to surfaces required by the shader,
4710 * and also adds those buffers to the validation list so the kernel can make
4711 * resident before running our batch.
4712 */
4713 static void
4714 iris_populate_binding_table(struct iris_context *ice,
4715 struct iris_batch *batch,
4716 gl_shader_stage stage,
4717 bool pin_only)
4718 {
4719 const struct iris_binder *binder = &ice->state.binder;
4720 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
4721 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4722 if (!shader)
4723 return;
4724
4725 struct iris_binding_table *bt = &shader->bt;
4726 UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
4727 struct iris_shader_state *shs = &ice->state.shaders[stage];
4728 uint32_t binder_addr = binder->bo->gtt_offset;
4729
4730 uint32_t *bt_map = binder->map + binder->bt_offset[stage];
4731 int s = 0;
4732
4733 const struct shader_info *info = iris_get_shader_info(ice, stage);
4734 if (!info) {
4735 /* TCS passthrough doesn't need a binding table. */
4736 assert(stage == MESA_SHADER_TESS_CTRL);
4737 return;
4738 }
4739
4740 if (stage == MESA_SHADER_COMPUTE &&
4741 shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
4742 /* surface for gl_NumWorkGroups */
4743 struct iris_state_ref *grid_data = &ice->state.grid_size;
4744 struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
4745 iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false);
4746 iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false);
4747 push_bt_entry(grid_state->offset);
4748 }
4749
4750 if (stage == MESA_SHADER_FRAGMENT) {
4751 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4752 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4753 if (cso_fb->nr_cbufs) {
4754 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
4755 uint32_t addr;
4756 if (cso_fb->cbufs[i]) {
4757 addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
4758 ice->state.draw_aux_usage[i], false);
4759 } else {
4760 addr = use_null_fb_surface(batch, ice);
4761 }
4762 push_bt_entry(addr);
4763 }
4764 } else if (GEN_GEN < 11) {
4765 uint32_t addr = use_null_fb_surface(batch, ice);
4766 push_bt_entry(addr);
4767 }
4768 }
4769
4770 #define foreach_surface_used(index, group) \
4771 bt_assert(group); \
4772 for (int index = 0; index < bt->sizes[group]; index++) \
4773 if (iris_group_index_to_bti(bt, group, index) != \
4774 IRIS_SURFACE_NOT_USED)
4775
4776 foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
4777 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4778 uint32_t addr;
4779 if (cso_fb->cbufs[i]) {
4780 addr = use_surface(ice, batch, cso_fb->cbufs[i],
4781 true, ice->state.draw_aux_usage[i], true);
4782 push_bt_entry(addr);
4783 }
4784 }
4785
4786 foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
4787 struct iris_sampler_view *view = shs->textures[i];
4788 uint32_t addr = view ? use_sampler_view(ice, batch, view)
4789 : use_null_surface(batch, ice);
4790 push_bt_entry(addr);
4791 }
4792
4793 foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
4794 uint32_t addr = use_image(batch, ice, shs, info, i);
4795 push_bt_entry(addr);
4796 }
4797
4798 foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
4799 uint32_t addr;
4800
4801 if (i == bt->sizes[IRIS_SURFACE_GROUP_UBO] - 1) {
4802 if (ish->const_data) {
4803 iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data), false);
4804 iris_use_pinned_bo(batch, iris_resource_bo(ish->const_data_state.res),
4805 false);
4806 addr = ish->const_data_state.offset;
4807 } else {
4808 /* This can only happen with INTEL_DISABLE_COMPACT_BINDING_TABLE=1. */
4809 addr = use_null_surface(batch, ice);
4810 }
4811 } else {
4812 addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
4813 &shs->constbuf_surf_state[i], false);
4814 }
4815
4816 push_bt_entry(addr);
4817 }
4818
4819 foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
4820 uint32_t addr =
4821 use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
4822 shs->writable_ssbos & (1u << i));
4823 push_bt_entry(addr);
4824 }
4825
4826 #if 0
4827 /* XXX: YUV surfaces not implemented yet */
4828 bt_assert(plane_start[1], ...);
4829 bt_assert(plane_start[2], ...);
4830 #endif
4831 }
4832
4833 static void
4834 iris_use_optional_res(struct iris_batch *batch,
4835 struct pipe_resource *res,
4836 bool writeable)
4837 {
4838 if (res) {
4839 struct iris_bo *bo = iris_resource_bo(res);
4840 iris_use_pinned_bo(batch, bo, writeable);
4841 }
4842 }
4843
4844 static void
4845 pin_depth_and_stencil_buffers(struct iris_batch *batch,
4846 struct pipe_surface *zsbuf,
4847 struct iris_depth_stencil_alpha_state *cso_zsa)
4848 {
4849 if (!zsbuf)
4850 return;
4851
4852 struct iris_resource *zres, *sres;
4853 iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
4854
4855 if (zres) {
4856 iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled);
4857 if (zres->aux.bo) {
4858 iris_use_pinned_bo(batch, zres->aux.bo,
4859 cso_zsa->depth_writes_enabled);
4860 }
4861 }
4862
4863 if (sres) {
4864 iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled);
4865 }
4866 }
4867
4868 /* ------------------------------------------------------------------- */
4869
4870 /**
4871 * Pin any BOs which were installed by a previous batch, and restored
4872 * via the hardware logical context mechanism.
4873 *
4874 * We don't need to re-emit all state every batch - the hardware context
4875 * mechanism will save and restore it for us. This includes pointers to
4876 * various BOs...which won't exist unless we ask the kernel to pin them
4877 * by adding them to the validation list.
4878 *
4879 * We can skip buffers if we've re-emitted those packets, as we're
4880 * overwriting those stale pointers with new ones, and don't actually
4881 * refer to the old BOs.
4882 */
4883 static void
4884 iris_restore_render_saved_bos(struct iris_context *ice,
4885 struct iris_batch *batch,
4886 const struct pipe_draw_info *draw)
4887 {
4888 struct iris_genx_state *genx = ice->state.genx;
4889
4890 const uint64_t clean = ~ice->state.dirty;
4891
4892 if (clean & IRIS_DIRTY_CC_VIEWPORT) {
4893 iris_use_optional_res(batch, ice->state.last_res.cc_vp, false);
4894 }
4895
4896 if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
4897 iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false);
4898 }
4899
4900 if (clean & IRIS_DIRTY_BLEND_STATE) {
4901 iris_use_optional_res(batch, ice->state.last_res.blend, false);
4902 }
4903
4904 if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
4905 iris_use_optional_res(batch, ice->state.last_res.color_calc, false);
4906 }
4907
4908 if (clean & IRIS_DIRTY_SCISSOR_RECT) {
4909 iris_use_optional_res(batch, ice->state.last_res.scissor, false);
4910 }
4911
4912 if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
4913 for (int i = 0; i < 4; i++) {
4914 struct iris_stream_output_target *tgt =
4915 (void *) ice->state.so_target[i];
4916 if (tgt) {
4917 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
4918 true);
4919 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
4920 true);
4921 }
4922 }
4923 }
4924
4925 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4926 if (!(clean & (IRIS_DIRTY_CONSTANTS_VS << stage)))
4927 continue;
4928
4929 struct iris_shader_state *shs = &ice->state.shaders[stage];
4930 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4931
4932 if (!shader)
4933 continue;
4934
4935 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
4936
4937 for (int i = 0; i < 4; i++) {
4938 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
4939
4940 if (range->length == 0)
4941 continue;
4942
4943 /* Range block is a binding table index, map back to UBO index. */
4944 unsigned block_index = iris_bti_to_group_index(
4945 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
4946 assert(block_index != IRIS_SURFACE_NOT_USED);
4947
4948 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
4949 struct iris_resource *res = (void *) cbuf->buffer;
4950
4951 if (res)
4952 iris_use_pinned_bo(batch, res->bo, false);
4953 else
4954 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false);
4955 }
4956 }
4957
4958 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4959 if (clean & (IRIS_DIRTY_BINDINGS_VS << stage)) {
4960 /* Re-pin any buffers referred to by the binding table. */
4961 iris_populate_binding_table(ice, batch, stage, true);
4962 }
4963 }
4964
4965 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4966 struct iris_shader_state *shs = &ice->state.shaders[stage];
4967 struct pipe_resource *res = shs->sampler_table.res;
4968 if (res)
4969 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
4970 }
4971
4972 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4973 if (clean & (IRIS_DIRTY_VS << stage)) {
4974 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4975
4976 if (shader) {
4977 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
4978 iris_use_pinned_bo(batch, bo, false);
4979
4980 struct brw_stage_prog_data *prog_data = shader->prog_data;
4981
4982 if (prog_data->total_scratch > 0) {
4983 struct iris_bo *bo =
4984 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
4985 iris_use_pinned_bo(batch, bo, true);
4986 }
4987 }
4988 }
4989 }
4990
4991 if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
4992 (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
4993 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4994 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
4995 }
4996
4997 iris_use_optional_res(batch, ice->state.last_res.index_buffer, false);
4998
4999 if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
5000 uint64_t bound = ice->state.bound_vertex_buffers;
5001 while (bound) {
5002 const int i = u_bit_scan64(&bound);
5003 struct pipe_resource *res = genx->vertex_buffers[i].resource;
5004 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
5005 }
5006 }
5007 }
5008
5009 static void
5010 iris_restore_compute_saved_bos(struct iris_context *ice,
5011 struct iris_batch *batch,
5012 const struct pipe_grid_info *grid)
5013 {
5014 const uint64_t clean = ~ice->state.dirty;
5015
5016 const int stage = MESA_SHADER_COMPUTE;
5017 struct iris_shader_state *shs = &ice->state.shaders[stage];
5018
5019 if (clean & IRIS_DIRTY_BINDINGS_CS) {
5020 /* Re-pin any buffers referred to by the binding table. */
5021 iris_populate_binding_table(ice, batch, stage, true);
5022 }
5023
5024 struct pipe_resource *sampler_res = shs->sampler_table.res;
5025 if (sampler_res)
5026 iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false);
5027
5028 if ((clean & IRIS_DIRTY_SAMPLER_STATES_CS) &&
5029 (clean & IRIS_DIRTY_BINDINGS_CS) &&
5030 (clean & IRIS_DIRTY_CONSTANTS_CS) &&
5031 (clean & IRIS_DIRTY_CS)) {
5032 iris_use_optional_res(batch, ice->state.last_res.cs_desc, false);
5033 }
5034
5035 if (clean & IRIS_DIRTY_CS) {
5036 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5037
5038 if (shader) {
5039 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5040 iris_use_pinned_bo(batch, bo, false);
5041
5042 struct iris_bo *curbe_bo =
5043 iris_resource_bo(ice->state.last_res.cs_thread_ids);
5044 iris_use_pinned_bo(batch, curbe_bo, false);
5045
5046 struct brw_stage_prog_data *prog_data = shader->prog_data;
5047
5048 if (prog_data->total_scratch > 0) {
5049 struct iris_bo *bo =
5050 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5051 iris_use_pinned_bo(batch, bo, true);
5052 }
5053 }
5054 }
5055 }
5056
5057 /**
5058 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5059 */
5060 static void
5061 iris_update_surface_base_address(struct iris_batch *batch,
5062 struct iris_binder *binder)
5063 {
5064 if (batch->last_surface_base_address == binder->bo->gtt_offset)
5065 return;
5066
5067 uint32_t mocs = batch->screen->isl_dev.mocs.internal;
5068
5069 flush_before_state_base_change(batch);
5070
5071 #if GEN_GEN == 12
5072 /* GEN:BUG:1607854226:
5073 *
5074 * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5075 * mode by putting the pipeline temporarily in 3D mode..
5076 */
5077 if (batch->name == IRIS_BATCH_COMPUTE)
5078 emit_pipeline_select(batch, _3D);
5079 #endif
5080
5081 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
5082 sba.SurfaceStateBaseAddressModifyEnable = true;
5083 sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
5084
5085 /* The hardware appears to pay attention to the MOCS fields even
5086 * if you don't set the "Address Modify Enable" bit for the base.
5087 */
5088 sba.GeneralStateMOCS = mocs;
5089 sba.StatelessDataPortAccessMOCS = mocs;
5090 sba.DynamicStateMOCS = mocs;
5091 sba.IndirectObjectMOCS = mocs;
5092 sba.InstructionMOCS = mocs;
5093 sba.SurfaceStateMOCS = mocs;
5094 #if GEN_GEN >= 9
5095 sba.BindlessSurfaceStateMOCS = mocs;
5096 #endif
5097 }
5098
5099 #if GEN_GEN == 12
5100 /* GEN:BUG:1607854226:
5101 *
5102 * Put the pipeline back into compute mode.
5103 */
5104 if (batch->name == IRIS_BATCH_COMPUTE)
5105 emit_pipeline_select(batch, GPGPU);
5106 #endif
5107
5108 flush_after_state_base_change(batch);
5109
5110 batch->last_surface_base_address = binder->bo->gtt_offset;
5111 }
5112
5113 static inline void
5114 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5115 bool window_space_position, float *zmin, float *zmax)
5116 {
5117 if (window_space_position) {
5118 *zmin = 0.f;
5119 *zmax = 1.f;
5120 return;
5121 }
5122 util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5123 }
5124
5125 #if GEN_GEN >= 12
5126 void
5127 genX(invalidate_aux_map_state)(struct iris_batch *batch)
5128 {
5129 struct iris_screen *screen = batch->screen;
5130 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5131 if (!aux_map_ctx)
5132 return;
5133 uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
5134 if (batch->last_aux_map_state != aux_map_state_num) {
5135 /* HSD 1209978178: docs say that before programming the aux table:
5136 *
5137 * "Driver must ensure that the engine is IDLE but ensure it doesn't
5138 * add extra flushes in the case it knows that the engine is already
5139 * IDLE."
5140 *
5141 * An end of pipe sync is needed here, otherwise we see GPU hangs in
5142 * dEQP-GLES31.functional.copy_image.* tests.
5143 */
5144 iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
5145 PIPE_CONTROL_CS_STALL);
5146
5147 /* If the aux-map state number increased, then we need to rewrite the
5148 * register. Rewriting the register is used to both set the aux-map
5149 * translation table address, and also to invalidate any previously
5150 * cached translations.
5151 */
5152 iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
5153 batch->last_aux_map_state = aux_map_state_num;
5154 }
5155 }
5156
5157 static void
5158 init_aux_map_state(struct iris_batch *batch)
5159 {
5160 struct iris_screen *screen = batch->screen;
5161 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5162 if (!aux_map_ctx)
5163 return;
5164
5165 uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
5166 assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
5167 iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5168 base_addr);
5169 }
5170 #endif
5171
5172 struct push_bos {
5173 struct {
5174 struct iris_address addr;
5175 uint32_t length;
5176 } buffers[4];
5177 int buffer_count;
5178 uint32_t max_length;
5179 };
5180
5181 static void
5182 setup_constant_buffers(struct iris_context *ice,
5183 struct iris_batch *batch,
5184 int stage,
5185 struct push_bos *push_bos)
5186 {
5187 struct iris_shader_state *shs = &ice->state.shaders[stage];
5188 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5189 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5190
5191 uint32_t push_range_sum = 0;
5192
5193 int n = 0;
5194 for (int i = 0; i < 4; i++) {
5195 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5196
5197 if (range->length == 0)
5198 continue;
5199
5200 push_range_sum += range->length;
5201
5202 if (range->length > push_bos->max_length)
5203 push_bos->max_length = range->length;
5204
5205 /* Range block is a binding table index, map back to UBO index. */
5206 unsigned block_index = iris_bti_to_group_index(
5207 &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5208 assert(block_index != IRIS_SURFACE_NOT_USED);
5209
5210 struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5211 struct iris_resource *res = (void *) cbuf->buffer;
5212
5213 assert(cbuf->buffer_offset % 32 == 0);
5214
5215 push_bos->buffers[n].length = range->length;
5216 push_bos->buffers[n].addr =
5217 res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5218 : ro_bo(batch->screen->workaround_bo, 0);
5219 n++;
5220 }
5221
5222 /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5223 *
5224 * "The sum of all four read length fields must be less than or
5225 * equal to the size of 64."
5226 */
5227 assert(push_range_sum <= 64);
5228
5229 push_bos->buffer_count = n;
5230 }
5231
5232 static void
5233 emit_push_constant_packets(struct iris_context *ice,
5234 struct iris_batch *batch,
5235 int stage,
5236 const struct push_bos *push_bos)
5237 {
5238 UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
5239 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5240 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5241
5242 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5243 pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5244 #if GEN_GEN >= 12
5245 pkt.MOCS = isl_dev->mocs.internal;
5246 #endif
5247 if (prog_data) {
5248 /* The Skylake PRM contains the following restriction:
5249 *
5250 * "The driver must ensure The following case does not occur
5251 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5252 * buffer 3 read length equal to zero committed followed by a
5253 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5254 * zero committed."
5255 *
5256 * To avoid this, we program the buffers in the highest slots.
5257 * This way, slot 0 is only used if slot 3 is also used.
5258 */
5259 int n = push_bos->buffer_count;
5260 assert(n <= 4);
5261 const unsigned shift = 4 - n;
5262 for (int i = 0; i < n; i++) {
5263 pkt.ConstantBody.ReadLength[i + shift] =
5264 push_bos->buffers[i].length;
5265 pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
5266 }
5267 }
5268 }
5269 }
5270
5271 #if GEN_GEN >= 12
5272 static void
5273 emit_push_constant_packet_all(struct iris_context *ice,
5274 struct iris_batch *batch,
5275 uint32_t shader_mask,
5276 const struct push_bos *push_bos)
5277 {
5278 struct isl_device *isl_dev = &batch->screen->isl_dev;
5279
5280 if (!push_bos) {
5281 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
5282 pc.ShaderUpdateEnable = shader_mask;
5283 }
5284 return;
5285 }
5286
5287 const uint32_t n = push_bos->buffer_count;
5288 const uint32_t max_pointers = 4;
5289 const uint32_t num_dwords = 2 + 2 * n;
5290 uint32_t const_all[2 + 2 * max_pointers];
5291 uint32_t *dw = &const_all[0];
5292
5293 assert(n <= max_pointers);
5294 iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
5295 all.DWordLength = num_dwords - 2;
5296 all.MOCS = isl_dev->mocs.internal;
5297 all.ShaderUpdateEnable = shader_mask;
5298 all.PointerBufferMask = (1 << n) - 1;
5299 }
5300 dw += 2;
5301
5302 for (int i = 0; i < n; i++) {
5303 _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
5304 dw + i * 2, data) {
5305 data.PointerToConstantBuffer = push_bos->buffers[i].addr;
5306 data.ConstantBufferReadLength = push_bos->buffers[i].length;
5307 }
5308 }
5309 iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
5310 }
5311 #endif
5312
5313 static void
5314 iris_upload_dirty_render_state(struct iris_context *ice,
5315 struct iris_batch *batch,
5316 const struct pipe_draw_info *draw)
5317 {
5318 const uint64_t dirty = ice->state.dirty;
5319
5320 if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER))
5321 return;
5322
5323 struct iris_genx_state *genx = ice->state.genx;
5324 struct iris_binder *binder = &ice->state.binder;
5325 struct brw_wm_prog_data *wm_prog_data = (void *)
5326 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5327
5328 if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5329 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5330 uint32_t cc_vp_address;
5331
5332 /* XXX: could avoid streaming for depth_clip [0,1] case. */
5333 uint32_t *cc_vp_map =
5334 stream_state(batch, ice->state.dynamic_uploader,
5335 &ice->state.last_res.cc_vp,
5336 4 * ice->state.num_viewports *
5337 GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5338 for (int i = 0; i < ice->state.num_viewports; i++) {
5339 float zmin, zmax;
5340 iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5341 ice->state.window_space_position,
5342 &zmin, &zmax);
5343 if (cso_rast->depth_clip_near)
5344 zmin = 0.0;
5345 if (cso_rast->depth_clip_far)
5346 zmax = 1.0;
5347
5348 iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5349 ccv.MinimumDepth = zmin;
5350 ccv.MaximumDepth = zmax;
5351 }
5352
5353 cc_vp_map += GENX(CC_VIEWPORT_length);
5354 }
5355
5356 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5357 ptr.CCViewportPointer = cc_vp_address;
5358 }
5359 }
5360
5361 if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5362 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5363 uint32_t sf_cl_vp_address;
5364 uint32_t *vp_map =
5365 stream_state(batch, ice->state.dynamic_uploader,
5366 &ice->state.last_res.sf_cl_vp,
5367 4 * ice->state.num_viewports *
5368 GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5369
5370 for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5371 const struct pipe_viewport_state *state = &ice->state.viewports[i];
5372 float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5373
5374 float vp_xmin = viewport_extent(state, 0, -1.0f);
5375 float vp_xmax = viewport_extent(state, 0, 1.0f);
5376 float vp_ymin = viewport_extent(state, 1, -1.0f);
5377 float vp_ymax = viewport_extent(state, 1, 1.0f);
5378
5379 gen_calculate_guardband_size(cso_fb->width, cso_fb->height,
5380 state->scale[0], state->scale[1],
5381 state->translate[0], state->translate[1],
5382 &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5383
5384 iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5385 vp.ViewportMatrixElementm00 = state->scale[0];
5386 vp.ViewportMatrixElementm11 = state->scale[1];
5387 vp.ViewportMatrixElementm22 = state->scale[2];
5388 vp.ViewportMatrixElementm30 = state->translate[0];
5389 vp.ViewportMatrixElementm31 = state->translate[1];
5390 vp.ViewportMatrixElementm32 = state->translate[2];
5391 vp.XMinClipGuardband = gb_xmin;
5392 vp.XMaxClipGuardband = gb_xmax;
5393 vp.YMinClipGuardband = gb_ymin;
5394 vp.YMaxClipGuardband = gb_ymax;
5395 vp.XMinViewPort = MAX2(vp_xmin, 0);
5396 vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5397 vp.YMinViewPort = MAX2(vp_ymin, 0);
5398 vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5399 }
5400
5401 vp_map += GENX(SF_CLIP_VIEWPORT_length);
5402 }
5403
5404 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5405 ptr.SFClipViewportPointer = sf_cl_vp_address;
5406 }
5407 }
5408
5409 if (dirty & IRIS_DIRTY_URB) {
5410 unsigned size[4];
5411
5412 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5413 if (!ice->shaders.prog[i]) {
5414 size[i] = 1;
5415 } else {
5416 struct brw_vue_prog_data *vue_prog_data =
5417 (void *) ice->shaders.prog[i]->prog_data;
5418 size[i] = vue_prog_data->urb_entry_size;
5419 }
5420 assert(size[i] != 0);
5421 }
5422
5423 unsigned entries[4], start[4];
5424 gen_get_urb_config(&batch->screen->devinfo,
5425 batch->screen->l3_config_3d,
5426 ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5427 ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
5428 size, entries, start,
5429 &ice->state.urb_deref_block_size);
5430
5431 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5432 iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
5433 urb._3DCommandSubOpcode += i;
5434 urb.VSURBStartingAddress = start[i];
5435 urb.VSURBEntryAllocationSize = size[i] - 1;
5436 urb.VSNumberofURBEntries = entries[i];
5437 }
5438 }
5439 }
5440
5441 if (dirty & IRIS_DIRTY_BLEND_STATE) {
5442 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5443 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5444 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5445 const int header_dwords = GENX(BLEND_STATE_length);
5446
5447 /* Always write at least one BLEND_STATE - the final RT message will
5448 * reference BLEND_STATE[0] even if there aren't color writes. There
5449 * may still be alpha testing, computed depth, and so on.
5450 */
5451 const int rt_dwords =
5452 MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5453
5454 uint32_t blend_offset;
5455 uint32_t *blend_map =
5456 stream_state(batch, ice->state.dynamic_uploader,
5457 &ice->state.last_res.blend,
5458 4 * (header_dwords + rt_dwords), 64, &blend_offset);
5459
5460 uint32_t blend_state_header;
5461 iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5462 bs.AlphaTestEnable = cso_zsa->alpha.enabled;
5463 bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha.func);
5464 }
5465
5466 blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5467 memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5468
5469 iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5470 ptr.BlendStatePointer = blend_offset;
5471 ptr.BlendStatePointerValid = true;
5472 }
5473 }
5474
5475 if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5476 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5477 #if GEN_GEN == 8
5478 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5479 #endif
5480 uint32_t cc_offset;
5481 void *cc_map =
5482 stream_state(batch, ice->state.dynamic_uploader,
5483 &ice->state.last_res.color_calc,
5484 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5485 64, &cc_offset);
5486 iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5487 cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5488 cc.AlphaReferenceValueAsFLOAT32 = cso->alpha.ref_value;
5489 cc.BlendConstantColorRed = ice->state.blend_color.color[0];
5490 cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5491 cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
5492 cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5493 #if GEN_GEN == 8
5494 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5495 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5496 #endif
5497 }
5498 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5499 ptr.ColorCalcStatePointer = cc_offset;
5500 ptr.ColorCalcStatePointerValid = true;
5501 }
5502 }
5503
5504 /* GEN:BUG:1604061319
5505 *
5506 * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5507 *
5508 * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5509 * any stage has a dirty binding table.
5510 */
5511 const bool emit_const_wa = GEN_GEN >= 11 &&
5512 (dirty & IRIS_ALL_DIRTY_BINDINGS) != 0;
5513
5514 #if GEN_GEN >= 12
5515 uint32_t nobuffer_stages = 0;
5516 #endif
5517
5518 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5519 if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)) &&
5520 !emit_const_wa)
5521 continue;
5522
5523 struct iris_shader_state *shs = &ice->state.shaders[stage];
5524 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5525
5526 if (!shader)
5527 continue;
5528
5529 if (shs->sysvals_need_upload)
5530 upload_sysvals(ice, stage);
5531
5532 struct push_bos push_bos = {};
5533 setup_constant_buffers(ice, batch, stage, &push_bos);
5534
5535 #if GEN_GEN >= 12
5536 /* If this stage doesn't have any push constants, emit it later in a
5537 * single CONSTANT_ALL packet with all the other stages.
5538 */
5539 if (push_bos.buffer_count == 0) {
5540 nobuffer_stages |= 1 << stage;
5541 continue;
5542 }
5543
5544 /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5545 * contains only 5 bits, so we can only use it for buffers smaller than
5546 * 32.
5547 */
5548 if (push_bos.max_length < 32) {
5549 emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
5550 continue;
5551 }
5552 #endif
5553 emit_push_constant_packets(ice, batch, stage, &push_bos);
5554 }
5555
5556 #if GEN_GEN >= 12
5557 if (nobuffer_stages)
5558 emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
5559 #endif
5560
5561 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5562 /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5563 * in order to commit constants. TODO: Investigate "Disable Gather
5564 * at Set Shader" to go back to legacy mode...
5565 */
5566 if (dirty & ((IRIS_DIRTY_BINDINGS_VS |
5567 (GEN_GEN == 9 ? IRIS_DIRTY_CONSTANTS_VS : 0)) << stage)) {
5568 iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
5569 ptr._3DCommandSubOpcode = 38 + stage;
5570 ptr.PointertoVSBindingTable = binder->bt_offset[stage];
5571 }
5572 }
5573 }
5574
5575 if (GEN_GEN >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
5576 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5577 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5578
5579 /* The PIPE_CONTROL command description says:
5580 *
5581 * "Whenever a Binding Table Index (BTI) used by a Render Target
5582 * Message points to a different RENDER_SURFACE_STATE, SW must issue a
5583 * Render Target Cache Flush by enabling this bit. When render target
5584 * flush is set due to new association of BTI, PS Scoreboard Stall bit
5585 * must be set in this packet."
5586 */
5587 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5588 iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
5589 PIPE_CONTROL_RENDER_TARGET_FLUSH |
5590 PIPE_CONTROL_STALL_AT_SCOREBOARD);
5591 }
5592
5593 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5594 if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
5595 iris_populate_binding_table(ice, batch, stage, false);
5596 }
5597 }
5598
5599 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5600 if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) ||
5601 !ice->shaders.prog[stage])
5602 continue;
5603
5604 iris_upload_sampler_states(ice, stage);
5605
5606 struct iris_shader_state *shs = &ice->state.shaders[stage];
5607 struct pipe_resource *res = shs->sampler_table.res;
5608 if (res)
5609 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
5610
5611 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
5612 ptr._3DCommandSubOpcode = 43 + stage;
5613 ptr.PointertoVSSamplerState = shs->sampler_table.offset;
5614 }
5615 }
5616
5617 if (ice->state.need_border_colors)
5618 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
5619
5620 if (dirty & IRIS_DIRTY_MULTISAMPLE) {
5621 iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
5622 ms.PixelLocation =
5623 ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
5624 if (ice->state.framebuffer.samples > 0)
5625 ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
5626 }
5627 }
5628
5629 if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
5630 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
5631 ms.SampleMask = ice->state.sample_mask;
5632 }
5633 }
5634
5635 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5636 if (!(dirty & (IRIS_DIRTY_VS << stage)))
5637 continue;
5638
5639 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5640
5641 if (shader) {
5642 struct brw_stage_prog_data *prog_data = shader->prog_data;
5643 struct iris_resource *cache = (void *) shader->assembly.res;
5644 iris_use_pinned_bo(batch, cache->bo, false);
5645
5646 if (prog_data->total_scratch > 0) {
5647 struct iris_bo *bo =
5648 iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5649 iris_use_pinned_bo(batch, bo, true);
5650 }
5651
5652 if (stage == MESA_SHADER_FRAGMENT) {
5653 UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
5654 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5655
5656 uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
5657 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
5658 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
5659 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
5660 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
5661
5662 /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5663 *
5664 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5665 * SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5666 * mode."
5667 *
5668 * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
5669 */
5670 if (GEN_GEN >= 9 && cso_fb->samples == 16 &&
5671 !wm_prog_data->persample_dispatch) {
5672 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
5673 ps._32PixelDispatchEnable = false;
5674 }
5675
5676 ps.DispatchGRFStartRegisterForConstantSetupData0 =
5677 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
5678 ps.DispatchGRFStartRegisterForConstantSetupData1 =
5679 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
5680 ps.DispatchGRFStartRegisterForConstantSetupData2 =
5681 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
5682
5683 ps.KernelStartPointer0 = KSP(shader) +
5684 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
5685 ps.KernelStartPointer1 = KSP(shader) +
5686 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
5687 ps.KernelStartPointer2 = KSP(shader) +
5688 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
5689 }
5690
5691 uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
5692 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
5693 #if GEN_GEN >= 9
5694 if (!wm_prog_data->uses_sample_mask)
5695 psx.InputCoverageMaskState = ICMS_NONE;
5696 else if (wm_prog_data->post_depth_coverage)
5697 psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
5698 else if (wm_prog_data->inner_coverage &&
5699 cso->conservative_rasterization)
5700 psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
5701 else
5702 psx.InputCoverageMaskState = ICMS_NORMAL;
5703 #else
5704 psx.PixelShaderUsesInputCoverageMask =
5705 wm_prog_data->uses_sample_mask;
5706 #endif
5707 }
5708
5709 uint32_t *shader_ps = (uint32_t *) shader->derived_data;
5710 uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
5711 iris_emit_merge(batch, shader_ps, ps_state,
5712 GENX(3DSTATE_PS_length));
5713 iris_emit_merge(batch, shader_psx, psx_state,
5714 GENX(3DSTATE_PS_EXTRA_length));
5715 } else {
5716 iris_batch_emit(batch, shader->derived_data,
5717 iris_derived_program_state_size(stage));
5718 }
5719 } else {
5720 if (stage == MESA_SHADER_TESS_EVAL) {
5721 iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
5722 iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
5723 iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
5724 } else if (stage == MESA_SHADER_GEOMETRY) {
5725 iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
5726 }
5727 }
5728 }
5729
5730 if (ice->state.streamout_active) {
5731 if (dirty & IRIS_DIRTY_SO_BUFFERS) {
5732 iris_batch_emit(batch, genx->so_buffers,
5733 4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
5734 for (int i = 0; i < 4; i++) {
5735 struct iris_stream_output_target *tgt =
5736 (void *) ice->state.so_target[i];
5737 if (tgt) {
5738 tgt->zeroed = true;
5739 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5740 true);
5741 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5742 true);
5743 }
5744 }
5745 }
5746
5747 if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
5748 uint32_t *decl_list =
5749 ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
5750 iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
5751 }
5752
5753 if (dirty & IRIS_DIRTY_STREAMOUT) {
5754 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5755
5756 uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
5757 iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
5758 sol.SOFunctionEnable = true;
5759 sol.SOStatisticsEnable = true;
5760
5761 sol.RenderingDisable = cso_rast->rasterizer_discard &&
5762 !ice->state.prims_generated_query_active;
5763 sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
5764 }
5765
5766 assert(ice->state.streamout);
5767
5768 iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
5769 GENX(3DSTATE_STREAMOUT_length));
5770 }
5771 } else {
5772 if (dirty & IRIS_DIRTY_STREAMOUT) {
5773 iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
5774 }
5775 }
5776
5777 if (dirty & IRIS_DIRTY_CLIP) {
5778 struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5779 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5780
5781 bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
5782 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
5783 bool points_or_lines = cso_rast->fill_mode_point_or_line ||
5784 (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
5785 : ice->state.prim_is_points_or_lines);
5786
5787 uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
5788 iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
5789 cl.StatisticsEnable = ice->state.statistics_counters_enabled;
5790 if (cso_rast->rasterizer_discard)
5791 cl.ClipMode = CLIPMODE_REJECT_ALL;
5792 else if (ice->state.window_space_position)
5793 cl.ClipMode = CLIPMODE_ACCEPT_ALL;
5794 else
5795 cl.ClipMode = CLIPMODE_NORMAL;
5796
5797 cl.PerspectiveDivideDisable = ice->state.window_space_position;
5798 cl.ViewportXYClipTestEnable = !points_or_lines;
5799
5800 if (wm_prog_data->barycentric_interp_modes &
5801 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
5802 cl.NonPerspectiveBarycentricEnable = true;
5803
5804 cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
5805 cl.MaximumVPIndex = ice->state.num_viewports - 1;
5806 }
5807 iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
5808 ARRAY_SIZE(cso_rast->clip));
5809 }
5810
5811 if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
5812 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5813 iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
5814
5815 uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
5816 iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
5817 sf.ViewportTransformEnable = !ice->state.window_space_position;
5818
5819 #if GEN_GEN >= 12
5820 sf.DerefBlockSize = ice->state.urb_deref_block_size;
5821 #endif
5822 }
5823 iris_emit_merge(batch, cso->sf, dynamic_sf,
5824 ARRAY_SIZE(dynamic_sf));
5825 }
5826
5827 if (dirty & IRIS_DIRTY_WM) {
5828 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5829 uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
5830
5831 iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
5832 wm.StatisticsEnable = ice->state.statistics_counters_enabled;
5833
5834 wm.BarycentricInterpolationMode =
5835 wm_prog_data->barycentric_interp_modes;
5836
5837 if (wm_prog_data->early_fragment_tests)
5838 wm.EarlyDepthStencilControl = EDSC_PREPS;
5839 else if (wm_prog_data->has_side_effects)
5840 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
5841
5842 /* We could skip this bit if color writes are enabled. */
5843 if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
5844 wm.ForceThreadDispatchEnable = ForceON;
5845 }
5846 iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
5847 }
5848
5849 if (dirty & IRIS_DIRTY_SBE) {
5850 iris_emit_sbe(batch, ice);
5851 }
5852
5853 if (dirty & IRIS_DIRTY_PS_BLEND) {
5854 struct iris_blend_state *cso_blend = ice->state.cso_blend;
5855 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5856 const struct shader_info *fs_info =
5857 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
5858
5859 uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
5860 iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
5861 pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
5862 pb.AlphaTestEnable = cso_zsa->alpha.enabled;
5863
5864 /* The dual source blending docs caution against using SRC1 factors
5865 * when the shader doesn't use a dual source render target write.
5866 * Empirically, this can lead to GPU hangs, and the results are
5867 * undefined anyway, so simply disable blending to avoid the hang.
5868 */
5869 pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
5870 (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
5871 }
5872
5873 iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
5874 ARRAY_SIZE(cso_blend->ps_blend));
5875 }
5876
5877 if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
5878 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5879 #if GEN_GEN >= 9
5880 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5881 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
5882 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
5883 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
5884 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5885 }
5886 iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
5887 #else
5888 iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
5889 #endif
5890
5891 #if GEN_GEN >= 12
5892 iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
5893 #endif
5894 }
5895
5896 if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
5897 uint32_t scissor_offset =
5898 emit_state(batch, ice->state.dynamic_uploader,
5899 &ice->state.last_res.scissor,
5900 ice->state.scissors,
5901 sizeof(struct pipe_scissor_state) *
5902 ice->state.num_viewports, 32);
5903
5904 iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
5905 ptr.ScissorRectPointer = scissor_offset;
5906 }
5907 }
5908
5909 if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
5910 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
5911
5912 /* Do not emit the clear params yets. We need to update the clear value
5913 * first.
5914 */
5915 uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
5916 uint32_t cso_z_size = batch->screen->isl_dev.ds.size - clear_length;;
5917
5918 #if GEN_GEN == 12
5919 /* GEN:BUG:14010455700
5920 *
5921 * ISL will change some CHICKEN registers depending on the depth surface
5922 * format, along with emitting the depth and stencil packets. In that
5923 * case, we want to do a depth flush and stall, so the pipeline is not
5924 * using these settings while we change the registers.
5925 */
5926 iris_emit_end_of_pipe_sync(batch,
5927 "Workaround: Stop pipeline for 14010455700",
5928 PIPE_CONTROL_DEPTH_STALL |
5929 PIPE_CONTROL_DEPTH_CACHE_FLUSH);
5930 #endif
5931
5932 iris_batch_emit(batch, cso_z->packets, cso_z_size);
5933 if (GEN_GEN >= 12) {
5934 /* GEN:BUG:1408224581
5935 *
5936 * Workaround: Gen12LP Astep only An additional pipe control with
5937 * post-sync = store dword operation would be required.( w/a is to
5938 * have an additional pipe control after the stencil state whenever
5939 * the surface state bits of this state is changing).
5940 */
5941 iris_emit_pipe_control_write(batch, "WA for stencil state",
5942 PIPE_CONTROL_WRITE_IMMEDIATE,
5943 batch->screen->workaround_bo, 0, 0);
5944 }
5945
5946 union isl_color_value clear_value = { .f32 = { 0, } };
5947
5948 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5949 if (cso_fb->zsbuf) {
5950 struct iris_resource *zres, *sres;
5951 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
5952 &zres, &sres);
5953 if (zres && zres->aux.bo)
5954 clear_value = iris_resource_get_clear_color(zres, NULL, NULL);
5955 }
5956
5957 uint32_t clear_params[GENX(3DSTATE_CLEAR_PARAMS_length)];
5958 iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
5959 clear.DepthClearValueValid = true;
5960 clear.DepthClearValue = clear_value.f32[0];
5961 }
5962 iris_batch_emit(batch, clear_params, clear_length);
5963 }
5964
5965 if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5966 /* Listen for buffer changes, and also write enable changes. */
5967 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5968 pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5969 }
5970
5971 if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
5972 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
5973 for (int i = 0; i < 32; i++) {
5974 poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
5975 }
5976 }
5977 }
5978
5979 if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
5980 struct iris_rasterizer_state *cso = ice->state.cso_rast;
5981 iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
5982 }
5983
5984 if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
5985 iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
5986 topo.PrimitiveTopologyType =
5987 translate_prim_type(draw->mode, draw->vertices_per_patch);
5988 }
5989 }
5990
5991 if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
5992 int count = util_bitcount64(ice->state.bound_vertex_buffers);
5993 uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
5994
5995 if (ice->state.vs_uses_draw_params) {
5996 assert(ice->draw.draw_params.res);
5997
5998 struct iris_vertex_buffer_state *state =
5999 &(ice->state.genx->vertex_buffers[count]);
6000 pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
6001 struct iris_resource *res = (void *) state->resource;
6002
6003 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6004 vb.VertexBufferIndex = count;
6005 vb.AddressModifyEnable = true;
6006 vb.BufferPitch = 0;
6007 vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
6008 vb.BufferStartingAddress =
6009 ro_bo(NULL, res->bo->gtt_offset +
6010 (int) ice->draw.draw_params.offset);
6011 vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev);
6012 }
6013 dynamic_bound |= 1ull << count;
6014 count++;
6015 }
6016
6017 if (ice->state.vs_uses_derived_draw_params) {
6018 struct iris_vertex_buffer_state *state =
6019 &(ice->state.genx->vertex_buffers[count]);
6020 pipe_resource_reference(&state->resource,
6021 ice->draw.derived_draw_params.res);
6022 struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
6023
6024 iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6025 vb.VertexBufferIndex = count;
6026 vb.AddressModifyEnable = true;
6027 vb.BufferPitch = 0;
6028 vb.BufferSize =
6029 res->bo->size - ice->draw.derived_draw_params.offset;
6030 vb.BufferStartingAddress =
6031 ro_bo(NULL, res->bo->gtt_offset +
6032 (int) ice->draw.derived_draw_params.offset);
6033 vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev);
6034 }
6035 dynamic_bound |= 1ull << count;
6036 count++;
6037 }
6038
6039 if (count) {
6040 #if GEN_GEN >= 11
6041 /* Gen11+ doesn't need the cache workaround below */
6042 uint64_t bound = dynamic_bound;
6043 while (bound) {
6044 const int i = u_bit_scan64(&bound);
6045 iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
6046 false);
6047 }
6048 #else
6049 /* The VF cache designers cut corners, and made the cache key's
6050 * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6051 * 32 bits of the address. If you have two vertex buffers which get
6052 * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6053 * you can get collisions (even within a single batch).
6054 *
6055 * So, we need to do a VF cache invalidate if the buffer for a VB
6056 * slot slot changes [48:32] address bits from the previous time.
6057 */
6058 unsigned flush_flags = 0;
6059
6060 uint64_t bound = dynamic_bound;
6061 while (bound) {
6062 const int i = u_bit_scan64(&bound);
6063 uint16_t high_bits = 0;
6064
6065 struct iris_resource *res =
6066 (void *) genx->vertex_buffers[i].resource;
6067 if (res) {
6068 iris_use_pinned_bo(batch, res->bo, false);
6069
6070 high_bits = res->bo->gtt_offset >> 32ull;
6071 if (high_bits != ice->state.last_vbo_high_bits[i]) {
6072 flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
6073 PIPE_CONTROL_CS_STALL;
6074 ice->state.last_vbo_high_bits[i] = high_bits;
6075 }
6076 }
6077 }
6078
6079 if (flush_flags) {
6080 iris_emit_pipe_control_flush(batch,
6081 "workaround: VF cache 32-bit key [VB]",
6082 flush_flags);
6083 }
6084 #endif
6085
6086 const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
6087
6088 uint32_t *map =
6089 iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
6090 _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
6091 vb.DWordLength = (vb_dwords * count + 1) - 2;
6092 }
6093 map += 1;
6094
6095 bound = dynamic_bound;
6096 while (bound) {
6097 const int i = u_bit_scan64(&bound);
6098 memcpy(map, genx->vertex_buffers[i].state,
6099 sizeof(uint32_t) * vb_dwords);
6100 map += vb_dwords;
6101 }
6102 }
6103 }
6104
6105 if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
6106 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6107 const unsigned entries = MAX2(cso->count, 1);
6108 if (!(ice->state.vs_needs_sgvs_element ||
6109 ice->state.vs_uses_derived_draw_params ||
6110 ice->state.vs_needs_edge_flag)) {
6111 iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
6112 (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
6113 } else {
6114 uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
6115 const unsigned dyn_count = cso->count +
6116 ice->state.vs_needs_sgvs_element +
6117 ice->state.vs_uses_derived_draw_params;
6118
6119 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
6120 &dynamic_ves, ve) {
6121 ve.DWordLength =
6122 1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
6123 }
6124 memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
6125 (cso->count - ice->state.vs_needs_edge_flag) *
6126 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
6127 uint32_t *ve_pack_dest =
6128 &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
6129 GENX(VERTEX_ELEMENT_STATE_length)];
6130
6131 if (ice->state.vs_needs_sgvs_element) {
6132 uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
6133 VFCOMP_STORE_SRC : VFCOMP_STORE_0;
6134 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6135 ve.Valid = true;
6136 ve.VertexBufferIndex =
6137 util_bitcount64(ice->state.bound_vertex_buffers);
6138 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6139 ve.Component0Control = base_ctrl;
6140 ve.Component1Control = base_ctrl;
6141 ve.Component2Control = VFCOMP_STORE_0;
6142 ve.Component3Control = VFCOMP_STORE_0;
6143 }
6144 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6145 }
6146 if (ice->state.vs_uses_derived_draw_params) {
6147 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6148 ve.Valid = true;
6149 ve.VertexBufferIndex =
6150 util_bitcount64(ice->state.bound_vertex_buffers) +
6151 ice->state.vs_uses_draw_params;
6152 ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6153 ve.Component0Control = VFCOMP_STORE_SRC;
6154 ve.Component1Control = VFCOMP_STORE_SRC;
6155 ve.Component2Control = VFCOMP_STORE_0;
6156 ve.Component3Control = VFCOMP_STORE_0;
6157 }
6158 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6159 }
6160 if (ice->state.vs_needs_edge_flag) {
6161 for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length); i++)
6162 ve_pack_dest[i] = cso->edgeflag_ve[i];
6163 }
6164
6165 iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
6166 (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
6167 }
6168
6169 if (!ice->state.vs_needs_edge_flag) {
6170 iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
6171 entries * GENX(3DSTATE_VF_INSTANCING_length));
6172 } else {
6173 assert(cso->count > 0);
6174 const unsigned edgeflag_index = cso->count - 1;
6175 uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
6176 memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
6177 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
6178
6179 uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
6180 edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
6181 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
6182 vi.VertexElementIndex = edgeflag_index +
6183 ice->state.vs_needs_sgvs_element +
6184 ice->state.vs_uses_derived_draw_params;
6185 }
6186 for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length); i++)
6187 vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
6188
6189 iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
6190 entries * GENX(3DSTATE_VF_INSTANCING_length));
6191 }
6192 }
6193
6194 if (dirty & IRIS_DIRTY_VF_SGVS) {
6195 const struct brw_vs_prog_data *vs_prog_data = (void *)
6196 ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
6197 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6198
6199 iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
6200 if (vs_prog_data->uses_vertexid) {
6201 sgv.VertexIDEnable = true;
6202 sgv.VertexIDComponentNumber = 2;
6203 sgv.VertexIDElementOffset =
6204 cso->count - ice->state.vs_needs_edge_flag;
6205 }
6206
6207 if (vs_prog_data->uses_instanceid) {
6208 sgv.InstanceIDEnable = true;
6209 sgv.InstanceIDComponentNumber = 3;
6210 sgv.InstanceIDElementOffset =
6211 cso->count - ice->state.vs_needs_edge_flag;
6212 }
6213 }
6214 }
6215
6216 if (dirty & IRIS_DIRTY_VF) {
6217 iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
6218 if (draw->primitive_restart) {
6219 vf.IndexedDrawCutIndexEnable = true;
6220 vf.CutIndex = draw->restart_index;
6221 }
6222 }
6223 }
6224
6225 if (dirty & IRIS_DIRTY_VF_STATISTICS) {
6226 iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
6227 vf.StatisticsEnable = true;
6228 }
6229 }
6230
6231 #if GEN_GEN == 8
6232 if (dirty & IRIS_DIRTY_PMA_FIX) {
6233 bool enable = want_pma_fix(ice);
6234 genX(update_pma_fix)(ice, batch, enable);
6235 }
6236 #endif
6237
6238 if (ice->state.current_hash_scale != 1)
6239 genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
6240
6241 #if GEN_GEN >= 12
6242 genX(invalidate_aux_map_state)(batch);
6243 #endif
6244 }
6245
6246 static void
6247 iris_upload_render_state(struct iris_context *ice,
6248 struct iris_batch *batch,
6249 const struct pipe_draw_info *draw)
6250 {
6251 bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
6252
6253 /* Always pin the binder. If we're emitting new binding table pointers,
6254 * we need it. If not, we're probably inheriting old tables via the
6255 * context, and need it anyway. Since true zero-bindings cases are
6256 * practically non-existent, just pin it and avoid last_res tracking.
6257 */
6258 iris_use_pinned_bo(batch, ice->state.binder.bo, false);
6259
6260 if (!batch->contains_draw) {
6261 iris_restore_render_saved_bos(ice, batch, draw);
6262 batch->contains_draw = true;
6263 }
6264
6265 iris_upload_dirty_render_state(ice, batch, draw);
6266
6267 if (draw->index_size > 0) {
6268 unsigned offset;
6269
6270 if (draw->has_user_indices) {
6271 u_upload_data(ice->ctx.stream_uploader, 0,
6272 draw->count * draw->index_size, 4, draw->index.user,
6273 &offset, &ice->state.last_res.index_buffer);
6274 } else {
6275 struct iris_resource *res = (void *) draw->index.resource;
6276 res->bind_history |= PIPE_BIND_INDEX_BUFFER;
6277
6278 pipe_resource_reference(&ice->state.last_res.index_buffer,
6279 draw->index.resource);
6280 offset = 0;
6281 }
6282
6283 struct iris_genx_state *genx = ice->state.genx;
6284 struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
6285
6286 uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
6287 iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
6288 ib.IndexFormat = draw->index_size >> 1;
6289 ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev);
6290 ib.BufferSize = bo->size - offset;
6291 ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
6292 }
6293
6294 if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
6295 memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
6296 iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
6297 iris_use_pinned_bo(batch, bo, false);
6298 }
6299
6300 #if GEN_GEN < 11
6301 /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6302 uint16_t high_bits = bo->gtt_offset >> 32ull;
6303 if (high_bits != ice->state.last_index_bo_high_bits) {
6304 iris_emit_pipe_control_flush(batch,
6305 "workaround: VF cache 32-bit key [IB]",
6306 PIPE_CONTROL_VF_CACHE_INVALIDATE |
6307 PIPE_CONTROL_CS_STALL);
6308 ice->state.last_index_bo_high_bits = high_bits;
6309 }
6310 #endif
6311 }
6312
6313 #define _3DPRIM_END_OFFSET 0x2420
6314 #define _3DPRIM_START_VERTEX 0x2430
6315 #define _3DPRIM_VERTEX_COUNT 0x2434
6316 #define _3DPRIM_INSTANCE_COUNT 0x2438
6317 #define _3DPRIM_START_INSTANCE 0x243C
6318 #define _3DPRIM_BASE_VERTEX 0x2440
6319
6320 if (draw->indirect) {
6321 if (draw->indirect->indirect_draw_count) {
6322 use_predicate = true;
6323
6324 struct iris_bo *draw_count_bo =
6325 iris_resource_bo(draw->indirect->indirect_draw_count);
6326 unsigned draw_count_offset =
6327 draw->indirect->indirect_draw_count_offset;
6328
6329 iris_emit_pipe_control_flush(batch,
6330 "ensure indirect draw buffer is flushed",
6331 PIPE_CONTROL_FLUSH_ENABLE);
6332
6333 if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
6334 struct gen_mi_builder b;
6335 gen_mi_builder_init(&b, batch);
6336
6337 /* comparison = draw id < draw count */
6338 struct gen_mi_value comparison =
6339 gen_mi_ult(&b, gen_mi_imm(draw->drawid),
6340 gen_mi_mem32(ro_bo(draw_count_bo,
6341 draw_count_offset)));
6342
6343 /* predicate = comparison & conditional rendering predicate */
6344 gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
6345 gen_mi_iand(&b, comparison,
6346 gen_mi_reg32(CS_GPR(15))));
6347 } else {
6348 uint32_t mi_predicate;
6349
6350 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6351 iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid);
6352 /* Upload the current draw count from the draw parameters buffer
6353 * to MI_PREDICATE_SRC0.
6354 */
6355 iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
6356 draw_count_bo, draw_count_offset);
6357 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6358 iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
6359
6360 if (draw->drawid == 0) {
6361 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
6362 MI_PREDICATE_COMBINEOP_SET |
6363 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6364 } else {
6365 /* While draw_index < draw_count the predicate's result will be
6366 * (draw_index == draw_count) ^ TRUE = TRUE
6367 * When draw_index == draw_count the result is
6368 * (TRUE) ^ TRUE = FALSE
6369 * After this all results will be:
6370 * (FALSE) ^ FALSE = FALSE
6371 */
6372 mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
6373 MI_PREDICATE_COMBINEOP_XOR |
6374 MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6375 }
6376 iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
6377 }
6378 }
6379 struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
6380 assert(bo);
6381
6382 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6383 lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
6384 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 0);
6385 }
6386 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6387 lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
6388 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 4);
6389 }
6390 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6391 lrm.RegisterAddress = _3DPRIM_START_VERTEX;
6392 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 8);
6393 }
6394 if (draw->index_size) {
6395 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6396 lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
6397 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6398 }
6399 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6400 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6401 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 16);
6402 }
6403 } else {
6404 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6405 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6406 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6407 }
6408 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
6409 lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
6410 lri.DataDWord = 0;
6411 }
6412 }
6413 } else if (draw->count_from_stream_output) {
6414 struct iris_stream_output_target *so =
6415 (void *) draw->count_from_stream_output;
6416
6417 /* XXX: Replace with actual cache tracking */
6418 iris_emit_pipe_control_flush(batch,
6419 "draw count from stream output stall",
6420 PIPE_CONTROL_CS_STALL);
6421
6422 struct gen_mi_builder b;
6423 gen_mi_builder_init(&b, batch);
6424
6425 struct iris_address addr =
6426 ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
6427 struct gen_mi_value offset =
6428 gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset);
6429
6430 gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT),
6431 gen_mi_udiv32_imm(&b, offset, so->stride));
6432
6433 _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
6434 _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
6435 _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
6436 _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
6437 }
6438
6439 iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
6440 prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
6441 prim.PredicateEnable = use_predicate;
6442
6443 if (draw->indirect || draw->count_from_stream_output) {
6444 prim.IndirectParameterEnable = true;
6445 } else {
6446 prim.StartInstanceLocation = draw->start_instance;
6447 prim.InstanceCount = draw->instance_count;
6448 prim.VertexCountPerInstance = draw->count;
6449
6450 prim.StartVertexLocation = draw->start;
6451
6452 if (draw->index_size) {
6453 prim.BaseVertexLocation += draw->index_bias;
6454 } else {
6455 prim.StartVertexLocation += draw->index_bias;
6456 }
6457 }
6458 }
6459 }
6460
6461 static void
6462 iris_upload_compute_state(struct iris_context *ice,
6463 struct iris_batch *batch,
6464 const struct pipe_grid_info *grid)
6465 {
6466 const uint64_t dirty = ice->state.dirty;
6467 struct iris_screen *screen = batch->screen;
6468 const struct gen_device_info *devinfo = &screen->devinfo;
6469 struct iris_binder *binder = &ice->state.binder;
6470 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6471 struct iris_compiled_shader *shader =
6472 ice->shaders.prog[MESA_SHADER_COMPUTE];
6473 struct brw_stage_prog_data *prog_data = shader->prog_data;
6474 struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6475
6476 const uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
6477 const unsigned threads = DIV_ROUND_UP(group_size, cs_prog_data->simd_size);
6478
6479 /* Always pin the binder. If we're emitting new binding table pointers,
6480 * we need it. If not, we're probably inheriting old tables via the
6481 * context, and need it anyway. Since true zero-bindings cases are
6482 * practically non-existent, just pin it and avoid last_res tracking.
6483 */
6484 iris_use_pinned_bo(batch, ice->state.binder.bo, false);
6485
6486 if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->sysvals_need_upload)
6487 upload_sysvals(ice, MESA_SHADER_COMPUTE);
6488
6489 if (dirty & IRIS_DIRTY_BINDINGS_CS)
6490 iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
6491
6492 if (dirty & IRIS_DIRTY_SAMPLER_STATES_CS)
6493 iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
6494
6495 iris_use_optional_res(batch, shs->sampler_table.res, false);
6496 iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false);
6497
6498 if (ice->state.need_border_colors)
6499 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
6500
6501 #if GEN_GEN >= 12
6502 genX(invalidate_aux_map_state)(batch);
6503 #endif
6504
6505 if (dirty & IRIS_DIRTY_CS) {
6506 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
6507 *
6508 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6509 * the only bits that are changed are scoreboard related: Scoreboard
6510 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
6511 * these scoreboard related states, a MEDIA_STATE_FLUSH is
6512 * sufficient."
6513 */
6514 iris_emit_pipe_control_flush(batch,
6515 "workaround: stall before MEDIA_VFE_STATE",
6516 PIPE_CONTROL_CS_STALL);
6517
6518 iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
6519 if (prog_data->total_scratch) {
6520 struct iris_bo *bo =
6521 iris_get_scratch_space(ice, prog_data->total_scratch,
6522 MESA_SHADER_COMPUTE);
6523 vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
6524 vfe.ScratchSpaceBasePointer = rw_bo(bo, 0);
6525 }
6526
6527 vfe.MaximumNumberofThreads =
6528 devinfo->max_cs_threads * screen->subslice_total - 1;
6529 #if GEN_GEN < 11
6530 vfe.ResetGatewayTimer =
6531 Resettingrelativetimerandlatchingtheglobaltimestamp;
6532 #endif
6533 #if GEN_GEN == 8
6534 vfe.BypassGatewayControl = true;
6535 #endif
6536 vfe.NumberofURBEntries = 2;
6537 vfe.URBEntryAllocationSize = 2;
6538
6539 vfe.CURBEAllocationSize =
6540 ALIGN(cs_prog_data->push.per_thread.regs * threads +
6541 cs_prog_data->push.cross_thread.regs, 2);
6542 }
6543 }
6544
6545 /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6546 if (dirty & IRIS_DIRTY_CS) {
6547 uint32_t curbe_data_offset = 0;
6548 assert(cs_prog_data->push.cross_thread.dwords == 0 &&
6549 cs_prog_data->push.per_thread.dwords == 1 &&
6550 cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
6551 const unsigned push_const_size =
6552 brw_cs_push_const_total_size(cs_prog_data, threads);
6553 uint32_t *curbe_data_map =
6554 stream_state(batch, ice->state.dynamic_uploader,
6555 &ice->state.last_res.cs_thread_ids,
6556 ALIGN(push_const_size, 64), 64,
6557 &curbe_data_offset);
6558 assert(curbe_data_map);
6559 memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
6560 iris_fill_cs_push_const_buffer(cs_prog_data, threads, curbe_data_map);
6561
6562 iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
6563 curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
6564 curbe.CURBEDataStartAddress = curbe_data_offset;
6565 }
6566 }
6567
6568 if (dirty & (IRIS_DIRTY_SAMPLER_STATES_CS |
6569 IRIS_DIRTY_BINDINGS_CS |
6570 IRIS_DIRTY_CONSTANTS_CS |
6571 IRIS_DIRTY_CS)) {
6572 uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
6573
6574 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
6575 idd.SamplerStatePointer = shs->sampler_table.offset;
6576 idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
6577 idd.NumberofThreadsinGPGPUThreadGroup = threads;
6578 }
6579
6580 for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
6581 desc[i] |= ((uint32_t *) shader->derived_data)[i];
6582
6583 iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
6584 load.InterfaceDescriptorTotalLength =
6585 GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
6586 load.InterfaceDescriptorDataStartAddress =
6587 emit_state(batch, ice->state.dynamic_uploader,
6588 &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
6589 }
6590 }
6591
6592 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
6593 uint32_t right_mask;
6594
6595 if (remainder > 0)
6596 right_mask = ~0u >> (32 - remainder);
6597 else
6598 right_mask = ~0u >> (32 - cs_prog_data->simd_size);
6599
6600 #define GPGPU_DISPATCHDIMX 0x2500
6601 #define GPGPU_DISPATCHDIMY 0x2504
6602 #define GPGPU_DISPATCHDIMZ 0x2508
6603
6604 if (grid->indirect) {
6605 struct iris_state_ref *grid_size = &ice->state.grid_size;
6606 struct iris_bo *bo = iris_resource_bo(grid_size->res);
6607 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6608 lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
6609 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
6610 }
6611 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6612 lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
6613 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
6614 }
6615 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6616 lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
6617 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
6618 }
6619 }
6620
6621 iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
6622 ggw.IndirectParameterEnable = grid->indirect != NULL;
6623 ggw.SIMDSize = cs_prog_data->simd_size / 16;
6624 ggw.ThreadDepthCounterMaximum = 0;
6625 ggw.ThreadHeightCounterMaximum = 0;
6626 ggw.ThreadWidthCounterMaximum = threads - 1;
6627 ggw.ThreadGroupIDXDimension = grid->grid[0];
6628 ggw.ThreadGroupIDYDimension = grid->grid[1];
6629 ggw.ThreadGroupIDZDimension = grid->grid[2];
6630 ggw.RightExecutionMask = right_mask;
6631 ggw.BottomExecutionMask = 0xffffffff;
6632 }
6633
6634 iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
6635
6636 if (!batch->contains_draw) {
6637 iris_restore_compute_saved_bos(ice, batch, grid);
6638 batch->contains_draw = true;
6639 }
6640 }
6641
6642 /**
6643 * State module teardown.
6644 */
6645 static void
6646 iris_destroy_state(struct iris_context *ice)
6647 {
6648 struct iris_genx_state *genx = ice->state.genx;
6649
6650 pipe_resource_reference(&ice->draw.draw_params.res, NULL);
6651 pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
6652
6653 /* Loop over all VBOs, including ones for draw parameters */
6654 for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
6655 pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
6656 }
6657
6658 free(ice->state.genx);
6659
6660 for (int i = 0; i < 4; i++) {
6661 pipe_so_target_reference(&ice->state.so_target[i], NULL);
6662 }
6663
6664 for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
6665 pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
6666 }
6667 pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
6668
6669 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
6670 struct iris_shader_state *shs = &ice->state.shaders[stage];
6671 pipe_resource_reference(&shs->sampler_table.res, NULL);
6672 for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
6673 pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
6674 pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
6675 }
6676 for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
6677 pipe_resource_reference(&shs->image[i].base.resource, NULL);
6678 pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
6679 free(shs->image[i].surface_state.cpu);
6680 }
6681 for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
6682 pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
6683 pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
6684 }
6685 for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
6686 pipe_sampler_view_reference((struct pipe_sampler_view **)
6687 &shs->textures[i], NULL);
6688 }
6689 }
6690
6691 pipe_resource_reference(&ice->state.grid_size.res, NULL);
6692 pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
6693
6694 pipe_resource_reference(&ice->state.null_fb.res, NULL);
6695 pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
6696
6697 pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
6698 pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
6699 pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
6700 pipe_resource_reference(&ice->state.last_res.scissor, NULL);
6701 pipe_resource_reference(&ice->state.last_res.blend, NULL);
6702 pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
6703 pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
6704 pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
6705 }
6706
6707 /* ------------------------------------------------------------------- */
6708
6709 static void
6710 iris_rebind_buffer(struct iris_context *ice,
6711 struct iris_resource *res)
6712 {
6713 struct pipe_context *ctx = &ice->ctx;
6714 struct iris_genx_state *genx = ice->state.genx;
6715
6716 assert(res->base.target == PIPE_BUFFER);
6717
6718 /* Buffers can't be framebuffer attachments, nor display related,
6719 * and we don't have upstream Clover support.
6720 */
6721 assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
6722 PIPE_BIND_RENDER_TARGET |
6723 PIPE_BIND_BLENDABLE |
6724 PIPE_BIND_DISPLAY_TARGET |
6725 PIPE_BIND_CURSOR |
6726 PIPE_BIND_COMPUTE_RESOURCE |
6727 PIPE_BIND_GLOBAL)));
6728
6729 if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
6730 uint64_t bound_vbs = ice->state.bound_vertex_buffers;
6731 while (bound_vbs) {
6732 const int i = u_bit_scan64(&bound_vbs);
6733 struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
6734
6735 /* Update the CPU struct */
6736 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
6737 STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
6738 uint64_t *addr = (uint64_t *) &state->state[1];
6739 struct iris_bo *bo = iris_resource_bo(state->resource);
6740
6741 if (*addr != bo->gtt_offset + state->offset) {
6742 *addr = bo->gtt_offset + state->offset;
6743 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
6744 }
6745 }
6746 }
6747
6748 /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
6749 * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
6750 *
6751 * There is also no need to handle these:
6752 * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
6753 * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
6754 */
6755
6756 if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
6757 /* XXX: be careful about resetting vs appending... */
6758 assert(false);
6759 }
6760
6761 for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
6762 struct iris_shader_state *shs = &ice->state.shaders[s];
6763 enum pipe_shader_type p_stage = stage_to_pipe(s);
6764
6765 if (!(res->bind_stages & (1 << s)))
6766 continue;
6767
6768 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
6769 /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
6770 uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
6771 while (bound_cbufs) {
6772 const int i = u_bit_scan(&bound_cbufs);
6773 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
6774 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
6775
6776 if (res->bo == iris_resource_bo(cbuf->buffer)) {
6777 pipe_resource_reference(&surf_state->res, NULL);
6778 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
6779 }
6780 }
6781 }
6782
6783 if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
6784 uint32_t bound_ssbos = shs->bound_ssbos;
6785 while (bound_ssbos) {
6786 const int i = u_bit_scan(&bound_ssbos);
6787 struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
6788
6789 if (res->bo == iris_resource_bo(ssbo->buffer)) {
6790 struct pipe_shader_buffer buf = {
6791 .buffer = &res->base,
6792 .buffer_offset = ssbo->buffer_offset,
6793 .buffer_size = ssbo->buffer_size,
6794 };
6795 iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
6796 (shs->writable_ssbos >> i) & 1);
6797 }
6798 }
6799 }
6800
6801 if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
6802 uint32_t bound_sampler_views = shs->bound_sampler_views;
6803 while (bound_sampler_views) {
6804 const int i = u_bit_scan(&bound_sampler_views);
6805 struct iris_sampler_view *isv = shs->textures[i];
6806 struct iris_bo *bo = isv->res->bo;
6807
6808 if (update_surface_state_addrs(ice->state.surface_uploader,
6809 &isv->surface_state, bo)) {
6810 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
6811 }
6812 }
6813 }
6814
6815 if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
6816 uint32_t bound_image_views = shs->bound_image_views;
6817 while (bound_image_views) {
6818 const int i = u_bit_scan(&bound_image_views);
6819 struct iris_image_view *iv = &shs->image[i];
6820 struct iris_bo *bo = iris_resource_bo(iv->base.resource);
6821
6822 if (update_surface_state_addrs(ice->state.surface_uploader,
6823 &iv->surface_state, bo)) {
6824 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
6825 }
6826 }
6827 }
6828 }
6829 }
6830
6831 /* ------------------------------------------------------------------- */
6832
6833 static unsigned
6834 flags_to_post_sync_op(uint32_t flags)
6835 {
6836 if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
6837 return WriteImmediateData;
6838
6839 if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
6840 return WritePSDepthCount;
6841
6842 if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
6843 return WriteTimestamp;
6844
6845 return 0;
6846 }
6847
6848 /**
6849 * Do the given flags have a Post Sync or LRI Post Sync operation?
6850 */
6851 static enum pipe_control_flags
6852 get_post_sync_flags(enum pipe_control_flags flags)
6853 {
6854 flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
6855 PIPE_CONTROL_WRITE_DEPTH_COUNT |
6856 PIPE_CONTROL_WRITE_TIMESTAMP |
6857 PIPE_CONTROL_LRI_POST_SYNC_OP;
6858
6859 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
6860 * "LRI Post Sync Operation". So more than one bit set would be illegal.
6861 */
6862 assert(util_bitcount(flags) <= 1);
6863
6864 return flags;
6865 }
6866
6867 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
6868
6869 /**
6870 * Emit a series of PIPE_CONTROL commands, taking into account any
6871 * workarounds necessary to actually accomplish the caller's request.
6872 *
6873 * Unless otherwise noted, spec quotations in this function come from:
6874 *
6875 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
6876 * Restrictions for PIPE_CONTROL.
6877 *
6878 * You should not use this function directly. Use the helpers in
6879 * iris_pipe_control.c instead, which may split the pipe control further.
6880 */
6881 static void
6882 iris_emit_raw_pipe_control(struct iris_batch *batch,
6883 const char *reason,
6884 uint32_t flags,
6885 struct iris_bo *bo,
6886 uint32_t offset,
6887 uint64_t imm)
6888 {
6889 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
6890 enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
6891 enum pipe_control_flags non_lri_post_sync_flags =
6892 post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
6893
6894 /* Recursive PIPE_CONTROL workarounds --------------------------------
6895 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
6896 *
6897 * We do these first because we want to look at the original operation,
6898 * rather than any workarounds we set.
6899 */
6900 if (GEN_GEN == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
6901 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
6902 * lists several workarounds:
6903 *
6904 * "Project: SKL, KBL, BXT
6905 *
6906 * If the VF Cache Invalidation Enable is set to a 1 in a
6907 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
6908 * sets to 0, with the VF Cache Invalidation Enable set to 0
6909 * needs to be sent prior to the PIPE_CONTROL with VF Cache
6910 * Invalidation Enable set to a 1."
6911 */
6912 iris_emit_raw_pipe_control(batch,
6913 "workaround: recursive VF cache invalidate",
6914 0, NULL, 0, 0);
6915 }
6916
6917 /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
6918 * invalidates the instruction cache
6919 */
6920 if (GEN_GEN == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
6921 iris_emit_raw_pipe_control(batch,
6922 "workaround: CS stall before instruction "
6923 "cache invalidate",
6924 PIPE_CONTROL_CS_STALL |
6925 PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
6926 imm);
6927 }
6928
6929 if ((GEN_GEN == 9 || (GEN_GEN == 12 && devinfo->revision == 0 /* A0*/)) &&
6930 IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
6931 /* Project: SKL / Argument: LRI Post Sync Operation [23]
6932 *
6933 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
6934 * programmed prior to programming a PIPECONTROL command with "LRI
6935 * Post Sync Operation" in GPGPU mode of operation (i.e when
6936 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
6937 *
6938 * The same text exists a few rows below for Post Sync Op.
6939 *
6940 * On Gen12 this is GEN:BUG:1607156449.
6941 */
6942 iris_emit_raw_pipe_control(batch,
6943 "workaround: CS stall before gpgpu post-sync",
6944 PIPE_CONTROL_CS_STALL, bo, offset, imm);
6945 }
6946
6947 /* "Flush Types" workarounds ---------------------------------------------
6948 * We do these now because they may add post-sync operations or CS stalls.
6949 */
6950
6951 if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
6952 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
6953 *
6954 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
6955 * 'Write PS Depth Count' or 'Write Timestamp'."
6956 */
6957 if (!bo) {
6958 flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
6959 post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
6960 non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
6961 bo = batch->screen->workaround_bo;
6962 }
6963 }
6964
6965 if (flags & PIPE_CONTROL_DEPTH_STALL) {
6966 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
6967 *
6968 * "This bit must be DISABLED for operations other than writing
6969 * PS_DEPTH_COUNT."
6970 *
6971 * This seems like nonsense. An Ivybridge workaround requires us to
6972 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
6973 * operation. Gen8+ requires us to emit depth stalls and depth cache
6974 * flushes together. So, it's hard to imagine this means anything other
6975 * than "we originally intended this to be used for PS_DEPTH_COUNT".
6976 *
6977 * We ignore the supposed restriction and do nothing.
6978 */
6979 }
6980
6981 if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
6982 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
6983 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
6984 *
6985 * "This bit must be DISABLED for End-of-pipe (Read) fences,
6986 * PS_DEPTH_COUNT or TIMESTAMP queries."
6987 *
6988 * TODO: Implement end-of-pipe checking.
6989 */
6990 assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
6991 PIPE_CONTROL_WRITE_TIMESTAMP)));
6992 }
6993
6994 if (GEN_GEN < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
6995 /* From the PIPE_CONTROL instruction table, bit 1:
6996 *
6997 * "This bit is ignored if Depth Stall Enable is set.
6998 * Further, the render cache is not flushed even if Write Cache
6999 * Flush Enable bit is set."
7000 *
7001 * We assert that the caller doesn't do this combination, to try and
7002 * prevent mistakes. It shouldn't hurt the GPU, though.
7003 *
7004 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
7005 * and "Render Target Flush" combo is explicitly required for BTI
7006 * update workarounds.
7007 */
7008 assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
7009 PIPE_CONTROL_RENDER_TARGET_FLUSH)));
7010 }
7011
7012 /* PIPE_CONTROL page workarounds ------------------------------------- */
7013
7014 if (GEN_GEN <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
7015 /* From the PIPE_CONTROL page itself:
7016 *
7017 * "IVB, HSW, BDW
7018 * Restriction: Pipe_control with CS-stall bit set must be issued
7019 * before a pipe-control command that has the State Cache
7020 * Invalidate bit set."
7021 */
7022 flags |= PIPE_CONTROL_CS_STALL;
7023 }
7024
7025 if (flags & PIPE_CONTROL_FLUSH_LLC) {
7026 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7027 *
7028 * "Project: ALL
7029 * SW must always program Post-Sync Operation to "Write Immediate
7030 * Data" when Flush LLC is set."
7031 *
7032 * For now, we just require the caller to do it.
7033 */
7034 assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
7035 }
7036
7037 /* "Post-Sync Operation" workarounds -------------------------------- */
7038
7039 /* Project: All / Argument: Global Snapshot Count Reset [19]
7040 *
7041 * "This bit must not be exercised on any product.
7042 * Requires stall bit ([20] of DW1) set."
7043 *
7044 * We don't use this, so we just assert that it isn't used. The
7045 * PIPE_CONTROL instruction page indicates that they intended this
7046 * as a debug feature and don't think it is useful in production,
7047 * but it may actually be usable, should we ever want to.
7048 */
7049 assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
7050
7051 if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
7052 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
7053 /* Project: All / Arguments:
7054 *
7055 * - Generic Media State Clear [16]
7056 * - Indirect State Pointers Disable [16]
7057 *
7058 * "Requires stall bit ([20] of DW1) set."
7059 *
7060 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7061 * State Clear) says:
7062 *
7063 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7064 * programmed prior to programming a PIPECONTROL command with "Media
7065 * State Clear" set in GPGPU mode of operation"
7066 *
7067 * This is a subset of the earlier rule, so there's nothing to do.
7068 */
7069 flags |= PIPE_CONTROL_CS_STALL;
7070 }
7071
7072 if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
7073 /* Project: All / Argument: Store Data Index
7074 *
7075 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7076 * than '0'."
7077 *
7078 * For now, we just assert that the caller does this. We might want to
7079 * automatically add a write to the workaround BO...
7080 */
7081 assert(non_lri_post_sync_flags != 0);
7082 }
7083
7084 if (flags & PIPE_CONTROL_SYNC_GFDT) {
7085 /* Project: All / Argument: Sync GFDT
7086 *
7087 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7088 * than '0' or 0x2520[13] must be set."
7089 *
7090 * For now, we just assert that the caller does this.
7091 */
7092 assert(non_lri_post_sync_flags != 0);
7093 }
7094
7095 if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
7096 /* Project: IVB+ / Argument: TLB inv
7097 *
7098 * "Requires stall bit ([20] of DW1) set."
7099 *
7100 * Also, from the PIPE_CONTROL instruction table:
7101 *
7102 * "Project: SKL+
7103 * Post Sync Operation or CS stall must be set to ensure a TLB
7104 * invalidation occurs. Otherwise no cycle will occur to the TLB
7105 * cache to invalidate."
7106 *
7107 * This is not a subset of the earlier rule, so there's nothing to do.
7108 */
7109 flags |= PIPE_CONTROL_CS_STALL;
7110 }
7111
7112 if (GEN_GEN >= 12 && ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ||
7113 (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))) {
7114 /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
7115 * Enable):
7116 *
7117 * Unified Cache (Tile Cache Disabled):
7118 *
7119 * When the Color and Depth (Z) streams are enabled to be cached in
7120 * the DC space of L2, Software must use "Render Target Cache Flush
7121 * Enable" and "Depth Cache Flush Enable" along with "Tile Cache
7122 * Flush" for getting the color and depth (Z) write data to be
7123 * globally observable. In this mode of operation it is not required
7124 * to set "CS Stall" upon setting "Tile Cache Flush" bit.
7125 */
7126 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
7127 }
7128
7129 if (GEN_GEN == 9 && devinfo->gt == 4) {
7130 /* TODO: The big Skylake GT4 post sync op workaround */
7131 }
7132
7133 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7134
7135 if (IS_COMPUTE_PIPELINE(batch)) {
7136 if (GEN_GEN >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
7137 /* Project: SKL+ / Argument: Tex Invalidate
7138 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7139 */
7140 flags |= PIPE_CONTROL_CS_STALL;
7141 }
7142
7143 if (GEN_GEN == 8 && (post_sync_flags ||
7144 (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
7145 PIPE_CONTROL_DEPTH_STALL |
7146 PIPE_CONTROL_RENDER_TARGET_FLUSH |
7147 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7148 PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
7149 /* Project: BDW / Arguments:
7150 *
7151 * - LRI Post Sync Operation [23]
7152 * - Post Sync Op [15:14]
7153 * - Notify En [8]
7154 * - Depth Stall [13]
7155 * - Render Target Cache Flush [12]
7156 * - Depth Cache Flush [0]
7157 * - DC Flush Enable [5]
7158 *
7159 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
7160 * Workloads."
7161 */
7162 flags |= PIPE_CONTROL_CS_STALL;
7163
7164 /* Also, from the PIPE_CONTROL instruction table, bit 20:
7165 *
7166 * "Project: BDW
7167 * This bit must be always set when PIPE_CONTROL command is
7168 * programmed by GPGPU and MEDIA workloads, except for the cases
7169 * when only Read Only Cache Invalidation bits are set (State
7170 * Cache Invalidation Enable, Instruction cache Invalidation
7171 * Enable, Texture Cache Invalidation Enable, Constant Cache
7172 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
7173 * need not implemented when FF_DOP_CG is disable via "Fixed
7174 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7175 *
7176 * It sounds like we could avoid CS stalls in some cases, but we
7177 * don't currently bother. This list isn't exactly the list above,
7178 * either...
7179 */
7180 }
7181 }
7182
7183 /* "Stall" workarounds ----------------------------------------------
7184 * These have to come after the earlier ones because we may have added
7185 * some additional CS stalls above.
7186 */
7187
7188 if (GEN_GEN < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
7189 /* Project: PRE-SKL, VLV, CHV
7190 *
7191 * "[All Stepping][All SKUs]:
7192 *
7193 * One of the following must also be set:
7194 *
7195 * - Render Target Cache Flush Enable ([12] of DW1)
7196 * - Depth Cache Flush Enable ([0] of DW1)
7197 * - Stall at Pixel Scoreboard ([1] of DW1)
7198 * - Depth Stall ([13] of DW1)
7199 * - Post-Sync Operation ([13] of DW1)
7200 * - DC Flush Enable ([5] of DW1)"
7201 *
7202 * If we don't already have one of those bits set, we choose to add
7203 * "Stall at Pixel Scoreboard". Some of the other bits require a
7204 * CS stall as a workaround (see above), which would send us into
7205 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
7206 * appears to be safe, so we choose that.
7207 */
7208 const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
7209 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7210 PIPE_CONTROL_WRITE_IMMEDIATE |
7211 PIPE_CONTROL_WRITE_DEPTH_COUNT |
7212 PIPE_CONTROL_WRITE_TIMESTAMP |
7213 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7214 PIPE_CONTROL_DEPTH_STALL |
7215 PIPE_CONTROL_DATA_CACHE_FLUSH;
7216 if (!(flags & wa_bits))
7217 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
7218 }
7219
7220 if (GEN_GEN >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
7221 /* GEN:BUG:1409600907:
7222 *
7223 * "PIPE_CONTROL with Depth Stall Enable bit must be set
7224 * with any PIPE_CONTROL with Depth Flush Enable bit set.
7225 */
7226 flags |= PIPE_CONTROL_DEPTH_STALL;
7227 }
7228
7229 /* Emit --------------------------------------------------------------- */
7230
7231 if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
7232 fprintf(stderr,
7233 " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
7234 (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
7235 (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
7236 (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
7237 (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
7238 (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
7239 (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
7240 (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
7241 (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
7242 (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
7243 (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
7244 (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
7245 (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
7246 (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
7247 (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
7248 (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
7249 (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
7250 "SnapRes" : "",
7251 (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
7252 "ISPDis" : "",
7253 (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
7254 (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
7255 (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
7256 (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
7257 imm, reason);
7258 }
7259
7260 iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
7261 #if GEN_GEN >= 12
7262 pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
7263 #endif
7264 #if GEN_GEN >= 11
7265 pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
7266 #endif
7267 pc.LRIPostSyncOperation = NoLRIOperation;
7268 pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
7269 pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
7270 pc.StoreDataIndex = 0;
7271 pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
7272 pc.GlobalSnapshotCountReset =
7273 flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
7274 pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
7275 pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
7276 pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
7277 pc.RenderTargetCacheFlushEnable =
7278 flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
7279 pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
7280 pc.StateCacheInvalidationEnable =
7281 flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
7282 pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
7283 pc.ConstantCacheInvalidationEnable =
7284 flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
7285 pc.PostSyncOperation = flags_to_post_sync_op(flags);
7286 pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
7287 pc.InstructionCacheInvalidateEnable =
7288 flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
7289 pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
7290 pc.IndirectStatePointersDisable =
7291 flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
7292 pc.TextureCacheInvalidationEnable =
7293 flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
7294 pc.Address = rw_bo(bo, offset);
7295 pc.ImmediateData = imm;
7296 }
7297 }
7298
7299 #if GEN_GEN == 9
7300 /**
7301 * Preemption on Gen9 has to be enabled or disabled in various cases.
7302 *
7303 * See these workarounds for preemption:
7304 * - WaDisableMidObjectPreemptionForGSLineStripAdj
7305 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
7306 * - WaDisableMidObjectPreemptionForLineLoop
7307 * - WA#0798
7308 *
7309 * We don't put this in the vtable because it's only used on Gen9.
7310 */
7311 void
7312 gen9_toggle_preemption(struct iris_context *ice,
7313 struct iris_batch *batch,
7314 const struct pipe_draw_info *draw)
7315 {
7316 struct iris_genx_state *genx = ice->state.genx;
7317 bool object_preemption = true;
7318
7319 /* WaDisableMidObjectPreemptionForGSLineStripAdj
7320 *
7321 * "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7322 * and GS is enabled."
7323 */
7324 if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
7325 ice->shaders.prog[MESA_SHADER_GEOMETRY])
7326 object_preemption = false;
7327
7328 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7329 *
7330 * "TriFan miscompare in Execlist Preemption test. Cut index that is
7331 * on a previous context. End the previous, the resume another context
7332 * with a tri-fan or polygon, and the vertex count is corrupted. If we
7333 * prempt again we will cause corruption.
7334 *
7335 * WA: Disable mid-draw preemption when draw-call has a tri-fan."
7336 */
7337 if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
7338 object_preemption = false;
7339
7340 /* WaDisableMidObjectPreemptionForLineLoop
7341 *
7342 * "VF Stats Counters Missing a vertex when preemption enabled.
7343 *
7344 * WA: Disable mid-draw preemption when the draw uses a lineloop
7345 * topology."
7346 */
7347 if (draw->mode == PIPE_PRIM_LINE_LOOP)
7348 object_preemption = false;
7349
7350 /* WA#0798
7351 *
7352 * "VF is corrupting GAFS data when preempted on an instance boundary
7353 * and replayed with instancing enabled.
7354 *
7355 * WA: Disable preemption when using instanceing."
7356 */
7357 if (draw->instance_count > 1)
7358 object_preemption = false;
7359
7360 if (genx->object_preemption != object_preemption) {
7361 iris_enable_obj_preemption(batch, object_preemption);
7362 genx->object_preemption = object_preemption;
7363 }
7364 }
7365 #endif
7366
7367 static void
7368 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
7369 {
7370 struct iris_genx_state *genx = ice->state.genx;
7371
7372 memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
7373 }
7374
7375 static void
7376 iris_emit_mi_report_perf_count(struct iris_batch *batch,
7377 struct iris_bo *bo,
7378 uint32_t offset_in_bytes,
7379 uint32_t report_id)
7380 {
7381 iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
7382 mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes);
7383 mi_rpc.ReportID = report_id;
7384 }
7385 }
7386
7387 /**
7388 * Update the pixel hashing modes that determine the balancing of PS threads
7389 * across subslices and slices.
7390 *
7391 * \param width Width bound of the rendering area (already scaled down if \p
7392 * scale is greater than 1).
7393 * \param height Height bound of the rendering area (already scaled down if \p
7394 * scale is greater than 1).
7395 * \param scale The number of framebuffer samples that could potentially be
7396 * affected by an individual channel of the PS thread. This is
7397 * typically one for single-sampled rendering, but for operations
7398 * like CCS resolves and fast clears a single PS invocation may
7399 * update a huge number of pixels, in which case a finer
7400 * balancing is desirable in order to maximally utilize the
7401 * bandwidth available. UINT_MAX can be used as shorthand for
7402 * "finest hashing mode available".
7403 */
7404 void
7405 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
7406 unsigned width, unsigned height, unsigned scale)
7407 {
7408 #if GEN_GEN == 9
7409 const struct gen_device_info *devinfo = &batch->screen->devinfo;
7410 const unsigned slice_hashing[] = {
7411 /* Because all Gen9 platforms with more than one slice require
7412 * three-way subslice hashing, a single "normal" 16x16 slice hashing
7413 * block is guaranteed to suffer from substantial imbalance, with one
7414 * subslice receiving twice as much work as the other two in the
7415 * slice.
7416 *
7417 * The performance impact of that would be particularly severe when
7418 * three-way hashing is also in use for slice balancing (which is the
7419 * case for all Gen9 GT4 platforms), because one of the slices
7420 * receives one every three 16x16 blocks in either direction, which
7421 * is roughly the periodicity of the underlying subslice imbalance
7422 * pattern ("roughly" because in reality the hardware's
7423 * implementation of three-way hashing doesn't do exact modulo 3
7424 * arithmetic, which somewhat decreases the magnitude of this effect
7425 * in practice). This leads to a systematic subslice imbalance
7426 * within that slice regardless of the size of the primitive. The
7427 * 32x32 hashing mode guarantees that the subslice imbalance within a
7428 * single slice hashing block is minimal, largely eliminating this
7429 * effect.
7430 */
7431 _32x32,
7432 /* Finest slice hashing mode available. */
7433 NORMAL
7434 };
7435 const unsigned subslice_hashing[] = {
7436 /* 16x16 would provide a slight cache locality benefit especially
7437 * visible in the sampler L1 cache efficiency of low-bandwidth
7438 * non-LLC platforms, but it comes at the cost of greater subslice
7439 * imbalance for primitives of dimensions approximately intermediate
7440 * between 16x4 and 16x16.
7441 */
7442 _16x4,
7443 /* Finest subslice hashing mode available. */
7444 _8x4
7445 };
7446 /* Dimensions of the smallest hashing block of a given hashing mode. If
7447 * the rendering area is smaller than this there can't possibly be any
7448 * benefit from switching to this mode, so we optimize out the
7449 * transition.
7450 */
7451 const unsigned min_size[][2] = {
7452 { 16, 4 },
7453 { 8, 4 }
7454 };
7455 const unsigned idx = scale > 1;
7456
7457 if (width > min_size[idx][0] || height > min_size[idx][1]) {
7458 uint32_t gt_mode;
7459
7460 iris_pack_state(GENX(GT_MODE), &gt_mode, reg) {
7461 reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
7462 reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
7463 reg.SubsliceHashing = subslice_hashing[idx];
7464 reg.SubsliceHashingMask = -1;
7465 };
7466
7467 iris_emit_raw_pipe_control(batch,
7468 "workaround: CS stall before GT_MODE LRI",
7469 PIPE_CONTROL_STALL_AT_SCOREBOARD |
7470 PIPE_CONTROL_CS_STALL,
7471 NULL, 0, 0);
7472
7473 iris_emit_lri(batch, GT_MODE, gt_mode);
7474
7475 ice->state.current_hash_scale = scale;
7476 }
7477 #endif
7478 }
7479
7480 static void
7481 iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
7482 {
7483 struct iris_context *ice = (struct iris_context *) ctx;
7484
7485 ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER],
7486 enable,
7487 IRIS_ALL_DIRTY_FOR_RENDER);
7488 ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE],
7489 enable,
7490 IRIS_ALL_DIRTY_FOR_COMPUTE);
7491 }
7492
7493 void
7494 genX(init_state)(struct iris_context *ice)
7495 {
7496 struct pipe_context *ctx = &ice->ctx;
7497 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
7498
7499 ctx->create_blend_state = iris_create_blend_state;
7500 ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
7501 ctx->create_rasterizer_state = iris_create_rasterizer_state;
7502 ctx->create_sampler_state = iris_create_sampler_state;
7503 ctx->create_sampler_view = iris_create_sampler_view;
7504 ctx->create_surface = iris_create_surface;
7505 ctx->create_vertex_elements_state = iris_create_vertex_elements;
7506 ctx->bind_blend_state = iris_bind_blend_state;
7507 ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
7508 ctx->bind_sampler_states = iris_bind_sampler_states;
7509 ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
7510 ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
7511 ctx->delete_blend_state = iris_delete_state;
7512 ctx->delete_depth_stencil_alpha_state = iris_delete_state;
7513 ctx->delete_rasterizer_state = iris_delete_state;
7514 ctx->delete_sampler_state = iris_delete_state;
7515 ctx->delete_vertex_elements_state = iris_delete_state;
7516 ctx->set_blend_color = iris_set_blend_color;
7517 ctx->set_clip_state = iris_set_clip_state;
7518 ctx->set_constant_buffer = iris_set_constant_buffer;
7519 ctx->set_shader_buffers = iris_set_shader_buffers;
7520 ctx->set_shader_images = iris_set_shader_images;
7521 ctx->set_sampler_views = iris_set_sampler_views;
7522 ctx->set_tess_state = iris_set_tess_state;
7523 ctx->set_framebuffer_state = iris_set_framebuffer_state;
7524 ctx->set_polygon_stipple = iris_set_polygon_stipple;
7525 ctx->set_sample_mask = iris_set_sample_mask;
7526 ctx->set_scissor_states = iris_set_scissor_states;
7527 ctx->set_stencil_ref = iris_set_stencil_ref;
7528 ctx->set_vertex_buffers = iris_set_vertex_buffers;
7529 ctx->set_viewport_states = iris_set_viewport_states;
7530 ctx->sampler_view_destroy = iris_sampler_view_destroy;
7531 ctx->surface_destroy = iris_surface_destroy;
7532 ctx->draw_vbo = iris_draw_vbo;
7533 ctx->launch_grid = iris_launch_grid;
7534 ctx->create_stream_output_target = iris_create_stream_output_target;
7535 ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
7536 ctx->set_stream_output_targets = iris_set_stream_output_targets;
7537 ctx->set_frontend_noop = iris_set_frontend_noop;
7538
7539 screen->vtbl.destroy_state = iris_destroy_state;
7540 screen->vtbl.init_render_context = iris_init_render_context;
7541 screen->vtbl.init_compute_context = iris_init_compute_context;
7542 screen->vtbl.upload_render_state = iris_upload_render_state;
7543 screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
7544 screen->vtbl.upload_compute_state = iris_upload_compute_state;
7545 screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
7546 screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
7547 screen->vtbl.rebind_buffer = iris_rebind_buffer;
7548 screen->vtbl.load_register_reg32 = iris_load_register_reg32;
7549 screen->vtbl.load_register_reg64 = iris_load_register_reg64;
7550 screen->vtbl.load_register_imm32 = iris_load_register_imm32;
7551 screen->vtbl.load_register_imm64 = iris_load_register_imm64;
7552 screen->vtbl.load_register_mem32 = iris_load_register_mem32;
7553 screen->vtbl.load_register_mem64 = iris_load_register_mem64;
7554 screen->vtbl.store_register_mem32 = iris_store_register_mem32;
7555 screen->vtbl.store_register_mem64 = iris_store_register_mem64;
7556 screen->vtbl.store_data_imm32 = iris_store_data_imm32;
7557 screen->vtbl.store_data_imm64 = iris_store_data_imm64;
7558 screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
7559 screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
7560 screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
7561 screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
7562 screen->vtbl.populate_vs_key = iris_populate_vs_key;
7563 screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
7564 screen->vtbl.populate_tes_key = iris_populate_tes_key;
7565 screen->vtbl.populate_gs_key = iris_populate_gs_key;
7566 screen->vtbl.populate_fs_key = iris_populate_fs_key;
7567 screen->vtbl.populate_cs_key = iris_populate_cs_key;
7568 screen->vtbl.lost_genx_state = iris_lost_genx_state;
7569
7570 ice->state.dirty = ~0ull;
7571
7572 ice->state.statistics_counters_enabled = true;
7573
7574 ice->state.sample_mask = 0xffff;
7575 ice->state.num_viewports = 1;
7576 ice->state.prim_mode = PIPE_PRIM_MAX;
7577 ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
7578 ice->draw.derived_params.drawid = -1;
7579
7580 /* Make a 1x1x1 null surface for unbound textures */
7581 void *null_surf_map =
7582 upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
7583 4 * GENX(RENDER_SURFACE_STATE_length), 64);
7584 isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
7585 ice->state.unbound_tex.offset +=
7586 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
7587
7588 /* Default all scissor rectangles to be empty regions. */
7589 for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
7590 ice->state.scissors[i] = (struct pipe_scissor_state) {
7591 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
7592 };
7593 }
7594 }