iris: properly pin stencil buffers
[mesa.git] / src / gallium / drivers / iris / iris_state.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_state.c
25 *
26 * ============================= GENXML CODE =============================
27 * [This file is compiled once per generation.]
28 * =======================================================================
29 *
30 * This is the main state upload code.
31 *
32 * Gallium uses Constant State Objects, or CSOs, for most state. Large,
33 * complex, or highly reusable state can be created once, and bound and
34 * rebound multiple times. This is modeled with the pipe->create_*_state()
35 * and pipe->bind_*_state() hooks. Highly dynamic or inexpensive state is
36 * streamed out on the fly, via pipe->set_*_state() hooks.
37 *
38 * OpenGL involves frequently mutating context state, which is mirrored in
39 * core Mesa by highly mutable data structures. However, most applications
40 * typically draw the same things over and over - from frame to frame, most
41 * of the same objects are still visible and need to be redrawn. So, rather
42 * than inventing new state all the time, applications usually mutate to swap
43 * between known states that we've seen before.
44 *
45 * Gallium isolates us from this mutation by tracking API state, and
46 * distilling it into a set of Constant State Objects, or CSOs. Large,
47 * complex, or typically reusable state can be created once, then reused
48 * multiple times. Drivers can create and store their own associated data.
49 * This create/bind model corresponds to the pipe->create_*_state() and
50 * pipe->bind_*_state() driver hooks.
51 *
52 * Some state is cheap to create, or expected to be highly dynamic. Rather
53 * than creating and caching piles of CSOs for these, Gallium simply streams
54 * them out, via the pipe->set_*_state() driver hooks.
55 *
56 * To reduce draw time overhead, we try to compute as much state at create
57 * time as possible. Wherever possible, we translate the Gallium pipe state
58 * to 3DSTATE commands, and store those commands in the CSO. At draw time,
59 * we can simply memcpy them into a batch buffer.
60 *
61 * No hardware matches the abstraction perfectly, so some commands require
62 * information from multiple CSOs. In this case, we can store two copies
63 * of the packet (one in each CSO), and simply | together their DWords at
64 * draw time. Sometimes the second set is trivial (one or two fields), so
65 * we simply pack it at draw time.
66 *
67 * There are two main components in the file below. First, the CSO hooks
68 * create/bind/track state. The second are the draw-time upload functions,
69 * iris_upload_render_state() and iris_upload_compute_state(), which read
70 * the context state and emit the commands into the actual batch.
71 */
72
73 #include <stdio.h>
74 #include <errno.h>
75
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifndef NDEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_inlines.h"
92 #include "util/u_format.h"
93 #include "util/u_framebuffer.h"
94 #include "util/u_transfer.h"
95 #include "util/u_upload_mgr.h"
96 #include "util/u_viewport.h"
97 #include "i915_drm.h"
98 #include "nir.h"
99 #include "intel/compiler/brw_compiler.h"
100 #include "intel/common/gen_l3_config.h"
101 #include "intel/common/gen_sample_positions.h"
102 #include "iris_batch.h"
103 #include "iris_context.h"
104 #include "iris_pipe.h"
105 #include "iris_resource.h"
106
107 #define __gen_address_type struct iris_address
108 #define __gen_user_data struct iris_batch
109
110 #define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x))
111
112 static uint64_t
113 __gen_combine_address(struct iris_batch *batch, void *location,
114 struct iris_address addr, uint32_t delta)
115 {
116 uint64_t result = addr.offset + delta;
117
118 if (addr.bo) {
119 iris_use_pinned_bo(batch, addr.bo, addr.write);
120 /* Assume this is a general address, not relative to a base. */
121 result += addr.bo->gtt_offset;
122 }
123
124 return result;
125 }
126
127 #define __genxml_cmd_length(cmd) cmd ## _length
128 #define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
129 #define __genxml_cmd_header(cmd) cmd ## _header
130 #define __genxml_cmd_pack(cmd) cmd ## _pack
131
132 #define _iris_pack_command(batch, cmd, dst, name) \
133 for (struct cmd name = { __genxml_cmd_header(cmd) }, \
134 *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
135 ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \
136 _dst = NULL; \
137 }))
138
139 #define iris_pack_command(cmd, dst, name) \
140 _iris_pack_command(NULL, cmd, dst, name)
141
142 #define iris_pack_state(cmd, dst, name) \
143 for (struct cmd name = {}, \
144 *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
145 __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \
146 _dst = NULL)
147
148 #define iris_emit_cmd(batch, cmd, name) \
149 _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
150
151 #define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \
152 do { \
153 uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \
154 for (uint32_t i = 0; i < num_dwords; i++) \
155 dw[i] = (dwords0)[i] | (dwords1)[i]; \
156 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \
157 } while (0)
158
159 #include "genxml/genX_pack.h"
160 #include "genxml/gen_macros.h"
161 #include "genxml/genX_bits.h"
162
163 #define MOCS_WB (2 << 1)
164
165 /**
166 * Statically assert that PIPE_* enums match the hardware packets.
167 * (As long as they match, we don't need to translate them.)
168 */
169 UNUSED static void pipe_asserts()
170 {
171 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
172
173 /* pipe_logicop happens to match the hardware. */
174 PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
175 PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
176 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
177 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
178 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
179 PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
180 PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
181 PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
182 PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
183 PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
184 PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
185 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
186 PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
187 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
188 PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
189 PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
190
191 /* pipe_blend_func happens to match the hardware. */
192 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
193 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
194 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
195 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
196 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
197 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
198 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
199 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
200 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
201 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
202 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
203 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
204 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
205 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
206 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
207 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
208 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
209 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
210 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
211
212 /* pipe_blend_func happens to match the hardware. */
213 PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
214 PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
215 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
216 PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
217 PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
218
219 /* pipe_stencil_op happens to match the hardware. */
220 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
221 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
222 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
223 PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
224 PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
225 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
226 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
227 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
228
229 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
230 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
231 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
232 #undef PIPE_ASSERT
233 }
234
235 static unsigned
236 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
237 {
238 static const unsigned map[] = {
239 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
240 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
241 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
242 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
243 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
244 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
245 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
246 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
247 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
248 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
249 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
250 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
251 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
252 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
253 [PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
254 };
255
256 return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
257 }
258
259 static unsigned
260 translate_compare_func(enum pipe_compare_func pipe_func)
261 {
262 static const unsigned map[] = {
263 [PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
264 [PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
265 [PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
266 [PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
267 [PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
268 [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
269 [PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
270 [PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
271 };
272 return map[pipe_func];
273 }
274
275 static unsigned
276 translate_shadow_func(enum pipe_compare_func pipe_func)
277 {
278 /* Gallium specifies the result of shadow comparisons as:
279 *
280 * 1 if ref <op> texel,
281 * 0 otherwise.
282 *
283 * The hardware does:
284 *
285 * 0 if texel <op> ref,
286 * 1 otherwise.
287 *
288 * So we need to flip the operator and also negate.
289 */
290 static const unsigned map[] = {
291 [PIPE_FUNC_NEVER] = PREFILTEROPALWAYS,
292 [PIPE_FUNC_LESS] = PREFILTEROPLEQUAL,
293 [PIPE_FUNC_EQUAL] = PREFILTEROPNOTEQUAL,
294 [PIPE_FUNC_LEQUAL] = PREFILTEROPLESS,
295 [PIPE_FUNC_GREATER] = PREFILTEROPGEQUAL,
296 [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
297 [PIPE_FUNC_GEQUAL] = PREFILTEROPGREATER,
298 [PIPE_FUNC_ALWAYS] = PREFILTEROPNEVER,
299 };
300 return map[pipe_func];
301 }
302
303 static unsigned
304 translate_cull_mode(unsigned pipe_face)
305 {
306 static const unsigned map[4] = {
307 [PIPE_FACE_NONE] = CULLMODE_NONE,
308 [PIPE_FACE_FRONT] = CULLMODE_FRONT,
309 [PIPE_FACE_BACK] = CULLMODE_BACK,
310 [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
311 };
312 return map[pipe_face];
313 }
314
315 static unsigned
316 translate_fill_mode(unsigned pipe_polymode)
317 {
318 static const unsigned map[4] = {
319 [PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
320 [PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
321 [PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
322 [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
323 };
324 return map[pipe_polymode];
325 }
326
327 static unsigned
328 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
329 {
330 static const unsigned map[] = {
331 [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
332 [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
333 [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
334 };
335 return map[pipe_mip];
336 }
337
338 static uint32_t
339 translate_wrap(unsigned pipe_wrap)
340 {
341 static const unsigned map[] = {
342 [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
343 [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
344 [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
345 [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
346 [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
347 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
348
349 /* These are unsupported. */
350 [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
351 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
352 };
353 return map[pipe_wrap];
354 }
355
356 static struct iris_address
357 ro_bo(struct iris_bo *bo, uint64_t offset)
358 {
359 /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
360 * validation list at CSO creation time, instead of draw time.
361 */
362 return (struct iris_address) { .bo = bo, .offset = offset };
363 }
364
365 static struct iris_address
366 rw_bo(struct iris_bo *bo, uint64_t offset)
367 {
368 /* CSOs must pass NULL for bo! Otherwise it will add the BO to the
369 * validation list at CSO creation time, instead of draw time.
370 */
371 return (struct iris_address) { .bo = bo, .offset = offset, .write = true };
372 }
373
374 /**
375 * Allocate space for some indirect state.
376 *
377 * Return a pointer to the map (to fill it out) and a state ref (for
378 * referring to the state in GPU commands).
379 */
380 static void *
381 upload_state(struct u_upload_mgr *uploader,
382 struct iris_state_ref *ref,
383 unsigned size,
384 unsigned alignment)
385 {
386 void *p = NULL;
387 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
388 return p;
389 }
390
391 /**
392 * Stream out temporary/short-lived state.
393 *
394 * This allocates space, pins the BO, and includes the BO address in the
395 * returned offset (which works because all state lives in 32-bit memory
396 * zones).
397 */
398 static uint32_t *
399 stream_state(struct iris_batch *batch,
400 struct u_upload_mgr *uploader,
401 struct pipe_resource **out_res,
402 unsigned size,
403 unsigned alignment,
404 uint32_t *out_offset)
405 {
406 void *ptr = NULL;
407
408 u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
409
410 struct iris_bo *bo = iris_resource_bo(*out_res);
411 iris_use_pinned_bo(batch, bo, false);
412
413 *out_offset += iris_bo_offset_from_base_address(bo);
414
415 return ptr;
416 }
417
418 /**
419 * stream_state() + memcpy.
420 */
421 static uint32_t
422 emit_state(struct iris_batch *batch,
423 struct u_upload_mgr *uploader,
424 struct pipe_resource **out_res,
425 const void *data,
426 unsigned size,
427 unsigned alignment)
428 {
429 unsigned offset = 0;
430 uint32_t *map =
431 stream_state(batch, uploader, out_res, size, alignment, &offset);
432
433 if (map)
434 memcpy(map, data, size);
435
436 return offset;
437 }
438
439 /**
440 * Did field 'x' change between 'old_cso' and 'new_cso'?
441 *
442 * (If so, we may want to set some dirty flags.)
443 */
444 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
445 #define cso_changed_memcmp(x) \
446 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
447
448 static void
449 flush_for_state_base_change(struct iris_batch *batch)
450 {
451 /* Flush before emitting STATE_BASE_ADDRESS.
452 *
453 * This isn't documented anywhere in the PRM. However, it seems to be
454 * necessary prior to changing the surface state base adress. We've
455 * seen issues in Vulkan where we get GPU hangs when using multi-level
456 * command buffers which clear depth, reset state base address, and then
457 * go render stuff.
458 *
459 * Normally, in GL, we would trust the kernel to do sufficient stalls
460 * and flushes prior to executing our batch. However, it doesn't seem
461 * as if the kernel's flushing is always sufficient and we don't want to
462 * rely on it.
463 *
464 * We make this an end-of-pipe sync instead of a normal flush because we
465 * do not know the current status of the GPU. On Haswell at least,
466 * having a fast-clear operation in flight at the same time as a normal
467 * rendering operation can cause hangs. Since the kernel's flushing is
468 * insufficient, we need to ensure that any rendering operations from
469 * other processes are definitely complete before we try to do our own
470 * rendering. It's a bit of a big hammer but it appears to work.
471 */
472 iris_emit_end_of_pipe_sync(batch,
473 PIPE_CONTROL_RENDER_TARGET_FLUSH |
474 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
475 PIPE_CONTROL_DATA_CACHE_FLUSH);
476 }
477
478 static void
479 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
480 {
481 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
482 lri.RegisterOffset = reg;
483 lri.DataDWord = val;
484 }
485 }
486 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
487
488 static void
489 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
490 {
491 #if GEN_GEN >= 8 && GEN_GEN < 10
492 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
493 *
494 * Software must clear the COLOR_CALC_STATE Valid field in
495 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
496 * with Pipeline Select set to GPGPU.
497 *
498 * The internal hardware docs recommend the same workaround for Gen9
499 * hardware too.
500 */
501 if (pipeline == GPGPU)
502 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
503 #endif
504
505
506 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
507 * PIPELINE_SELECT [DevBWR+]":
508 *
509 * "Project: DEVSNB+
510 *
511 * Software must ensure all the write caches are flushed through a
512 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
513 * command to invalidate read only caches prior to programming
514 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
515 */
516 iris_emit_pipe_control_flush(batch,
517 PIPE_CONTROL_RENDER_TARGET_FLUSH |
518 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
519 PIPE_CONTROL_DATA_CACHE_FLUSH |
520 PIPE_CONTROL_CS_STALL);
521
522 iris_emit_pipe_control_flush(batch,
523 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
524 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
525 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
526 PIPE_CONTROL_INSTRUCTION_INVALIDATE);
527
528 iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
529 #if GEN_GEN >= 9
530 sel.MaskBits = 3;
531 #endif
532 sel.PipelineSelection = pipeline;
533 }
534 }
535
536 UNUSED static void
537 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
538 {
539 #if GEN_GEN == 9
540 /* Project: DevGLK
541 *
542 * "This chicken bit works around a hardware issue with barrier
543 * logic encountered when switching between GPGPU and 3D pipelines.
544 * To workaround the issue, this mode bit should be set after a
545 * pipeline is selected."
546 */
547 uint32_t reg_val;
548 iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
549 reg.GLKBarrierMode = value;
550 reg.GLKBarrierModeMask = 1;
551 }
552 iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
553 #endif
554 }
555
556 static void
557 init_state_base_address(struct iris_batch *batch)
558 {
559 flush_for_state_base_change(batch);
560
561 /* We program most base addresses once at context initialization time.
562 * Each base address points at a 4GB memory zone, and never needs to
563 * change. See iris_bufmgr.h for a description of the memory zones.
564 *
565 * The one exception is Surface State Base Address, which needs to be
566 * updated occasionally. See iris_binder.c for the details there.
567 */
568 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
569 #if 0
570 // XXX: MOCS is stupid for this.
571 sba.GeneralStateMemoryObjectControlState = MOCS_WB;
572 sba.StatelessDataPortAccessMemoryObjectControlState = MOCS_WB;
573 sba.DynamicStateMemoryObjectControlState = MOCS_WB;
574 sba.IndirectObjectMemoryObjectControlState = MOCS_WB;
575 sba.InstructionMemoryObjectControlState = MOCS_WB;
576 sba.BindlessSurfaceStateMemoryObjectControlState = MOCS_WB;
577 #endif
578
579 sba.GeneralStateBaseAddressModifyEnable = true;
580 sba.DynamicStateBaseAddressModifyEnable = true;
581 sba.IndirectObjectBaseAddressModifyEnable = true;
582 sba.InstructionBaseAddressModifyEnable = true;
583 sba.GeneralStateBufferSizeModifyEnable = true;
584 sba.DynamicStateBufferSizeModifyEnable = true;
585 sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
586 sba.IndirectObjectBufferSizeModifyEnable = true;
587 sba.InstructionBuffersizeModifyEnable = true;
588
589 sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
590 sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
591
592 sba.GeneralStateBufferSize = 0xfffff;
593 sba.IndirectObjectBufferSize = 0xfffff;
594 sba.InstructionBufferSize = 0xfffff;
595 sba.DynamicStateBufferSize = 0xfffff;
596 }
597 }
598
599 /**
600 * Upload the initial GPU state for a render context.
601 *
602 * This sets some invariant state that needs to be programmed a particular
603 * way, but we never actually change.
604 */
605 static void
606 iris_init_render_context(struct iris_screen *screen,
607 struct iris_batch *batch,
608 struct iris_vtable *vtbl,
609 struct pipe_debug_callback *dbg)
610 {
611 UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
612 uint32_t reg_val;
613
614 emit_pipeline_select(batch, _3D);
615
616 init_state_base_address(batch);
617
618 // XXX: INSTPM on Gen8
619 iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
620 reg.CONSTANT_BUFFERAddressOffsetDisable = true;
621 reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
622 }
623 iris_emit_lri(batch, CS_DEBUG_MODE2, reg_val);
624
625 #if GEN_GEN == 9
626 iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
627 reg.FloatBlendOptimizationEnable = true;
628 reg.FloatBlendOptimizationEnableMask = true;
629 reg.PartialResolveDisableInVC = true;
630 reg.PartialResolveDisableInVCMask = true;
631 }
632 iris_emit_lri(batch, CACHE_MODE_1, reg_val);
633
634 if (devinfo->is_geminilake)
635 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
636 #endif
637
638 #if GEN_GEN == 11
639 iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
640 reg.HeaderlessMessageforPreemptableContexts = 1;
641 reg.HeaderlessMessageforPreemptableContextsMask = 1;
642 }
643 iris_emit_lri(batch, SAMPLER_MODE, reg_val);
644
645 // XXX: 3D_MODE?
646 #endif
647
648 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
649 * changing it dynamically. We set it to the maximum size here, and
650 * instead include the render target dimensions in the viewport, so
651 * viewport extents clipping takes care of pruning stray geometry.
652 */
653 iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
654 rect.ClippedDrawingRectangleXMax = UINT16_MAX;
655 rect.ClippedDrawingRectangleYMax = UINT16_MAX;
656 }
657
658 /* Set the initial MSAA sample positions. */
659 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
660 GEN_SAMPLE_POS_1X(pat._1xSample);
661 GEN_SAMPLE_POS_2X(pat._2xSample);
662 GEN_SAMPLE_POS_4X(pat._4xSample);
663 GEN_SAMPLE_POS_8X(pat._8xSample);
664 GEN_SAMPLE_POS_16X(pat._16xSample);
665 }
666
667 /* Use the legacy AA line coverage computation. */
668 iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
669
670 /* Disable chromakeying (it's for media) */
671 iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
672
673 /* We want regular rendering, not special HiZ operations. */
674 iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
675
676 /* No polygon stippling offsets are necessary. */
677 // XXX: may need to set an offset for origin-UL framebuffers
678 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
679
680 /* Set a static partitioning of the push constant area. */
681 // XXX: this may be a bad idea...could starve the push ringbuffers...
682 for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
683 iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
684 alloc._3DCommandSubOpcode = 18 + i;
685 alloc.ConstantBufferOffset = 6 * i;
686 alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
687 }
688 }
689 }
690
691 static void
692 iris_init_compute_context(struct iris_screen *screen,
693 struct iris_batch *batch,
694 struct iris_vtable *vtbl,
695 struct pipe_debug_callback *dbg)
696 {
697 UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
698
699 emit_pipeline_select(batch, GPGPU);
700
701 init_state_base_address(batch);
702
703 #if GEN_GEN == 9
704 if (devinfo->is_geminilake)
705 init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
706 #endif
707 }
708
709 struct iris_vertex_buffer_state {
710 /** The 3DSTATE_VERTEX_BUFFERS hardware packet. */
711 uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
712
713 /** The resource to source vertex data from. */
714 struct pipe_resource *resources[33];
715
716 /** The number of bound vertex buffers. */
717 unsigned num_buffers;
718 };
719
720 struct iris_depth_buffer_state {
721 /* Depth/HiZ/Stencil related hardware packets. */
722 uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
723 GENX(3DSTATE_STENCIL_BUFFER_length) +
724 GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
725 GENX(3DSTATE_CLEAR_PARAMS_length)];
726 };
727
728 /**
729 * Generation-specific context state (ice->state.genx->...).
730 *
731 * Most state can go in iris_context directly, but these encode hardware
732 * packets which vary by generation.
733 */
734 struct iris_genx_state {
735 /** SF_CLIP_VIEWPORT */
736 uint32_t sf_cl_vp[GENX(SF_CLIP_VIEWPORT_length) * IRIS_MAX_VIEWPORTS];
737
738 struct iris_vertex_buffer_state vertex_buffers;
739 struct iris_depth_buffer_state depth_buffer;
740
741 uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
742 uint32_t streamout[4 * GENX(3DSTATE_STREAMOUT_length)];
743 };
744
745 /**
746 * The pipe->set_blend_color() driver hook.
747 *
748 * This corresponds to our COLOR_CALC_STATE.
749 */
750 static void
751 iris_set_blend_color(struct pipe_context *ctx,
752 const struct pipe_blend_color *state)
753 {
754 struct iris_context *ice = (struct iris_context *) ctx;
755
756 /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
757 memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
758 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
759 }
760
761 /**
762 * Gallium CSO for blend state (see pipe_blend_state).
763 */
764 struct iris_blend_state {
765 /** Partial 3DSTATE_PS_BLEND */
766 uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
767
768 /** Partial BLEND_STATE */
769 uint32_t blend_state[GENX(BLEND_STATE_length) +
770 BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
771
772 bool alpha_to_coverage; /* for shader key */
773 };
774
775 /**
776 * The pipe->create_blend_state() driver hook.
777 *
778 * Translates a pipe_blend_state into iris_blend_state.
779 */
780 static void *
781 iris_create_blend_state(struct pipe_context *ctx,
782 const struct pipe_blend_state *state)
783 {
784 struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
785 uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
786
787 cso->alpha_to_coverage = state->alpha_to_coverage;
788
789 bool indep_alpha_blend = false;
790
791 for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
792 const struct pipe_rt_blend_state *rt =
793 &state->rt[state->independent_blend_enable ? i : 0];
794
795 if (rt->rgb_func != rt->alpha_func ||
796 rt->rgb_src_factor != rt->alpha_src_factor ||
797 rt->rgb_dst_factor != rt->alpha_dst_factor)
798 indep_alpha_blend = true;
799
800 iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
801 be.LogicOpEnable = state->logicop_enable;
802 be.LogicOpFunction = state->logicop_func;
803
804 be.PreBlendSourceOnlyClampEnable = false;
805 be.ColorClampRange = COLORCLAMP_RTFORMAT;
806 be.PreBlendColorClampEnable = true;
807 be.PostBlendColorClampEnable = true;
808
809 be.ColorBufferBlendEnable = rt->blend_enable;
810
811 be.ColorBlendFunction = rt->rgb_func;
812 be.AlphaBlendFunction = rt->alpha_func;
813 be.SourceBlendFactor = rt->rgb_src_factor;
814 be.SourceAlphaBlendFactor = rt->alpha_src_factor;
815 be.DestinationBlendFactor = rt->rgb_dst_factor;
816 be.DestinationAlphaBlendFactor = rt->alpha_dst_factor;
817
818 be.WriteDisableRed = !(rt->colormask & PIPE_MASK_R);
819 be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
820 be.WriteDisableBlue = !(rt->colormask & PIPE_MASK_B);
821 be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
822 }
823 blend_entry += GENX(BLEND_STATE_ENTRY_length);
824 }
825
826 iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
827 /* pb.HasWriteableRT is filled in at draw time. */
828 /* pb.AlphaTestEnable is filled in at draw time. */
829 pb.AlphaToCoverageEnable = state->alpha_to_coverage;
830 pb.IndependentAlphaBlendEnable = indep_alpha_blend;
831
832 pb.ColorBufferBlendEnable = state->rt[0].blend_enable;
833
834 pb.SourceBlendFactor = state->rt[0].rgb_src_factor;
835 pb.SourceAlphaBlendFactor = state->rt[0].alpha_src_factor;
836 pb.DestinationBlendFactor = state->rt[0].rgb_dst_factor;
837 pb.DestinationAlphaBlendFactor = state->rt[0].alpha_dst_factor;
838 }
839
840 iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
841 bs.AlphaToCoverageEnable = state->alpha_to_coverage;
842 bs.IndependentAlphaBlendEnable = indep_alpha_blend;
843 bs.AlphaToOneEnable = state->alpha_to_one;
844 bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
845 bs.ColorDitherEnable = state->dither;
846 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
847 }
848
849
850 return cso;
851 }
852
853 /**
854 * The pipe->bind_blend_state() driver hook.
855 *
856 * Bind a blending CSO and flag related dirty bits.
857 */
858 static void
859 iris_bind_blend_state(struct pipe_context *ctx, void *state)
860 {
861 struct iris_context *ice = (struct iris_context *) ctx;
862 ice->state.cso_blend = state;
863 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
864 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
865 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
866 }
867
868 /**
869 * Gallium CSO for depth, stencil, and alpha testing state.
870 */
871 struct iris_depth_stencil_alpha_state {
872 /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
873 uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
874
875 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
876 struct pipe_alpha_state alpha;
877
878 /** Outbound to resolve and cache set tracking. */
879 bool depth_writes_enabled;
880 bool stencil_writes_enabled;
881 };
882
883 /**
884 * The pipe->create_depth_stencil_alpha_state() driver hook.
885 *
886 * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
887 * testing state since we need pieces of it in a variety of places.
888 */
889 static void *
890 iris_create_zsa_state(struct pipe_context *ctx,
891 const struct pipe_depth_stencil_alpha_state *state)
892 {
893 struct iris_depth_stencil_alpha_state *cso =
894 malloc(sizeof(struct iris_depth_stencil_alpha_state));
895
896 bool two_sided_stencil = state->stencil[1].enabled;
897
898 cso->alpha = state->alpha;
899 cso->depth_writes_enabled = state->depth.writemask;
900 cso->stencil_writes_enabled =
901 state->stencil[0].writemask != 0 ||
902 (two_sided_stencil && state->stencil[1].writemask != 1);
903
904 /* The state tracker needs to optimize away EQUAL writes for us. */
905 assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
906
907 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
908 wmds.StencilFailOp = state->stencil[0].fail_op;
909 wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
910 wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
911 wmds.StencilTestFunction =
912 translate_compare_func(state->stencil[0].func);
913 wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
914 wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
915 wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
916 wmds.BackfaceStencilTestFunction =
917 translate_compare_func(state->stencil[1].func);
918 wmds.DepthTestFunction = translate_compare_func(state->depth.func);
919 wmds.DoubleSidedStencilEnable = two_sided_stencil;
920 wmds.StencilTestEnable = state->stencil[0].enabled;
921 wmds.StencilBufferWriteEnable =
922 state->stencil[0].writemask != 0 ||
923 (two_sided_stencil && state->stencil[1].writemask != 0);
924 wmds.DepthTestEnable = state->depth.enabled;
925 wmds.DepthBufferWriteEnable = state->depth.writemask;
926 wmds.StencilTestMask = state->stencil[0].valuemask;
927 wmds.StencilWriteMask = state->stencil[0].writemask;
928 wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
929 wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
930 /* wmds.[Backface]StencilReferenceValue are merged later */
931 }
932
933 return cso;
934 }
935
936 /**
937 * The pipe->bind_depth_stencil_alpha_state() driver hook.
938 *
939 * Bind a depth/stencil/alpha CSO and flag related dirty bits.
940 */
941 static void
942 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
943 {
944 struct iris_context *ice = (struct iris_context *) ctx;
945 struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
946 struct iris_depth_stencil_alpha_state *new_cso = state;
947
948 if (new_cso) {
949 if (cso_changed(alpha.ref_value))
950 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
951
952 if (cso_changed(alpha.enabled))
953 ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
954
955 if (cso_changed(alpha.func))
956 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
957
958 ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
959 ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
960 }
961
962 ice->state.cso_zsa = new_cso;
963 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
964 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
965 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
966 }
967
968 /**
969 * Gallium CSO for rasterizer state.
970 */
971 struct iris_rasterizer_state {
972 uint32_t sf[GENX(3DSTATE_SF_length)];
973 uint32_t clip[GENX(3DSTATE_CLIP_length)];
974 uint32_t raster[GENX(3DSTATE_RASTER_length)];
975 uint32_t wm[GENX(3DSTATE_WM_length)];
976 uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
977
978 uint8_t num_clip_plane_consts;
979 bool clip_halfz; /* for CC_VIEWPORT */
980 bool depth_clip_near; /* for CC_VIEWPORT */
981 bool depth_clip_far; /* for CC_VIEWPORT */
982 bool flatshade; /* for shader state */
983 bool flatshade_first; /* for stream output */
984 bool clamp_fragment_color; /* for shader state */
985 bool light_twoside; /* for shader state */
986 bool rasterizer_discard; /* for 3DSTATE_STREAMOUT */
987 bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
988 bool line_stipple_enable;
989 bool poly_stipple_enable;
990 bool multisample;
991 bool force_persample_interp;
992 enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
993 uint16_t sprite_coord_enable;
994 };
995
996 static float
997 get_line_width(const struct pipe_rasterizer_state *state)
998 {
999 float line_width = state->line_width;
1000
1001 /* From the OpenGL 4.4 spec:
1002 *
1003 * "The actual width of non-antialiased lines is determined by rounding
1004 * the supplied width to the nearest integer, then clamping it to the
1005 * implementation-dependent maximum non-antialiased line width."
1006 */
1007 if (!state->multisample && !state->line_smooth)
1008 line_width = roundf(state->line_width);
1009
1010 if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1011 /* For 1 pixel line thickness or less, the general anti-aliasing
1012 * algorithm gives up, and a garbage line is generated. Setting a
1013 * Line Width of 0.0 specifies the rasterization of the "thinnest"
1014 * (one-pixel-wide), non-antialiased lines.
1015 *
1016 * Lines rendered with zero Line Width are rasterized using the
1017 * "Grid Intersection Quantization" rules as specified by the
1018 * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1019 */
1020 line_width = 0.0f;
1021 }
1022
1023 return line_width;
1024 }
1025
1026 /**
1027 * The pipe->create_rasterizer_state() driver hook.
1028 */
1029 static void *
1030 iris_create_rasterizer_state(struct pipe_context *ctx,
1031 const struct pipe_rasterizer_state *state)
1032 {
1033 struct iris_rasterizer_state *cso =
1034 malloc(sizeof(struct iris_rasterizer_state));
1035
1036 #if 0
1037 point_quad_rasterization -> SBE?
1038
1039 not necessary?
1040 {
1041 poly_smooth
1042 force_persample_interp - ?
1043 bottom_edge_rule
1044
1045 offset_units_unscaled - cap not exposed
1046 }
1047 #endif
1048
1049 // XXX: it may make more sense just to store the pipe_rasterizer_state,
1050 // we're copying a lot of booleans here. But we don't need all of them...
1051
1052 cso->multisample = state->multisample;
1053 cso->force_persample_interp = state->force_persample_interp;
1054 cso->clip_halfz = state->clip_halfz;
1055 cso->depth_clip_near = state->depth_clip_near;
1056 cso->depth_clip_far = state->depth_clip_far;
1057 cso->flatshade = state->flatshade;
1058 cso->flatshade_first = state->flatshade_first;
1059 cso->clamp_fragment_color = state->clamp_fragment_color;
1060 cso->light_twoside = state->light_twoside;
1061 cso->rasterizer_discard = state->rasterizer_discard;
1062 cso->half_pixel_center = state->half_pixel_center;
1063 cso->sprite_coord_mode = state->sprite_coord_mode;
1064 cso->sprite_coord_enable = state->sprite_coord_enable;
1065 cso->line_stipple_enable = state->line_stipple_enable;
1066 cso->poly_stipple_enable = state->poly_stipple_enable;
1067
1068 if (state->clip_plane_enable != 0)
1069 cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1070 else
1071 cso->num_clip_plane_consts = 0;
1072
1073 float line_width = get_line_width(state);
1074
1075 iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1076 sf.StatisticsEnable = true;
1077 sf.ViewportTransformEnable = true;
1078 sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1079 sf.LineEndCapAntialiasingRegionWidth =
1080 state->line_smooth ? _10pixels : _05pixels;
1081 sf.LastPixelEnable = state->line_last_pixel;
1082 sf.LineWidth = line_width;
1083 sf.SmoothPointEnable = state->point_smooth || state->multisample;
1084 sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1085 sf.PointWidth = state->point_size;
1086
1087 if (state->flatshade_first) {
1088 sf.TriangleFanProvokingVertexSelect = 1;
1089 } else {
1090 sf.TriangleStripListProvokingVertexSelect = 2;
1091 sf.TriangleFanProvokingVertexSelect = 2;
1092 sf.LineStripListProvokingVertexSelect = 1;
1093 }
1094 }
1095
1096 iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1097 rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1098 rr.CullMode = translate_cull_mode(state->cull_face);
1099 rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1100 rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1101 rr.DXMultisampleRasterizationEnable = state->multisample;
1102 rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1103 rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1104 rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1105 rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1106 rr.GlobalDepthOffsetScale = state->offset_scale;
1107 rr.GlobalDepthOffsetClamp = state->offset_clamp;
1108 rr.SmoothPointEnable = state->point_smooth || state->multisample;
1109 rr.AntialiasingEnable = state->line_smooth;
1110 rr.ScissorRectangleEnable = state->scissor;
1111 rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1112 rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1113 //rr.ConservativeRasterizationEnable = not yet supported by Gallium...
1114 }
1115
1116 iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1117 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1118 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1119 */
1120 cl.StatisticsEnable = true;
1121 cl.EarlyCullEnable = true;
1122 cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1123 cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1124 cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1125 cl.GuardbandClipTestEnable = true;
1126 cl.ClipMode = CLIPMODE_NORMAL;
1127 cl.ClipEnable = true;
1128 cl.ViewportXYClipTestEnable = state->point_tri_clip;
1129 cl.MinimumPointWidth = 0.125;
1130 cl.MaximumPointWidth = 255.875;
1131
1132 if (state->flatshade_first) {
1133 cl.TriangleFanProvokingVertexSelect = 1;
1134 } else {
1135 cl.TriangleStripListProvokingVertexSelect = 2;
1136 cl.TriangleFanProvokingVertexSelect = 2;
1137 cl.LineStripListProvokingVertexSelect = 1;
1138 }
1139 }
1140
1141 iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1142 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1143 * filled in at draw time from the FS program.
1144 */
1145 wm.LineAntialiasingRegionWidth = _10pixels;
1146 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1147 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1148 wm.LineStippleEnable = state->line_stipple_enable;
1149 wm.PolygonStippleEnable = state->poly_stipple_enable;
1150 }
1151
1152 /* Remap from 0..255 back to 1..256 */
1153 const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1154
1155 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1156 line.LineStipplePattern = state->line_stipple_pattern;
1157 line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1158 line.LineStippleRepeatCount = line_stipple_factor;
1159 }
1160
1161 return cso;
1162 }
1163
1164 /**
1165 * The pipe->bind_rasterizer_state() driver hook.
1166 *
1167 * Bind a rasterizer CSO and flag related dirty bits.
1168 */
1169 static void
1170 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1171 {
1172 struct iris_context *ice = (struct iris_context *) ctx;
1173 struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1174 struct iris_rasterizer_state *new_cso = state;
1175
1176 if (new_cso) {
1177 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1178 if (cso_changed_memcmp(line_stipple))
1179 ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1180
1181 if (cso_changed(half_pixel_center))
1182 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1183
1184 if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1185 ice->state.dirty |= IRIS_DIRTY_WM;
1186
1187 if (cso_changed(rasterizer_discard) || cso_changed(flatshade_first))
1188 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1189
1190 if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1191 cso_changed(clip_halfz))
1192 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1193
1194 if (cso_changed(sprite_coord_enable) || cso_changed(light_twoside))
1195 ice->state.dirty |= IRIS_DIRTY_SBE;
1196 }
1197
1198 ice->state.cso_rast = new_cso;
1199 ice->state.dirty |= IRIS_DIRTY_RASTER;
1200 ice->state.dirty |= IRIS_DIRTY_CLIP;
1201 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_RASTERIZER];
1202 }
1203
1204 /**
1205 * Return true if the given wrap mode requires the border color to exist.
1206 *
1207 * (We can skip uploading it if the sampler isn't going to use it.)
1208 */
1209 static bool
1210 wrap_mode_needs_border_color(unsigned wrap_mode)
1211 {
1212 return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1213 }
1214
1215 /**
1216 * Gallium CSO for sampler state.
1217 */
1218 struct iris_sampler_state {
1219 union pipe_color_union border_color;
1220 bool needs_border_color;
1221
1222 uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1223 };
1224
1225 /**
1226 * The pipe->create_sampler_state() driver hook.
1227 *
1228 * We fill out SAMPLER_STATE (except for the border color pointer), and
1229 * store that on the CPU. It doesn't make sense to upload it to a GPU
1230 * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1231 * all bound sampler states to be in contiguous memor.
1232 */
1233 static void *
1234 iris_create_sampler_state(struct pipe_context *ctx,
1235 const struct pipe_sampler_state *state)
1236 {
1237 struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1238
1239 if (!cso)
1240 return NULL;
1241
1242 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1243 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1244
1245 unsigned wrap_s = translate_wrap(state->wrap_s);
1246 unsigned wrap_t = translate_wrap(state->wrap_t);
1247 unsigned wrap_r = translate_wrap(state->wrap_r);
1248
1249 memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1250
1251 cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
1252 wrap_mode_needs_border_color(wrap_t) ||
1253 wrap_mode_needs_border_color(wrap_r);
1254
1255 float min_lod = state->min_lod;
1256 unsigned mag_img_filter = state->mag_img_filter;
1257
1258 // XXX: explain this code ported from ilo...I don't get it at all...
1259 if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
1260 state->min_lod > 0.0f) {
1261 min_lod = 0.0f;
1262 mag_img_filter = state->min_img_filter;
1263 }
1264
1265 iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
1266 samp.TCXAddressControlMode = wrap_s;
1267 samp.TCYAddressControlMode = wrap_t;
1268 samp.TCZAddressControlMode = wrap_r;
1269 samp.CubeSurfaceControlMode = state->seamless_cube_map;
1270 samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
1271 samp.MinModeFilter = state->min_img_filter;
1272 samp.MagModeFilter = mag_img_filter;
1273 samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
1274 samp.MaximumAnisotropy = RATIO21;
1275
1276 if (state->max_anisotropy >= 2) {
1277 if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
1278 samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
1279 samp.AnisotropicAlgorithm = EWAApproximation;
1280 }
1281
1282 if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
1283 samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
1284
1285 samp.MaximumAnisotropy =
1286 MIN2((state->max_anisotropy - 2) / 2, RATIO161);
1287 }
1288
1289 /* Set address rounding bits if not using nearest filtering. */
1290 if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
1291 samp.UAddressMinFilterRoundingEnable = true;
1292 samp.VAddressMinFilterRoundingEnable = true;
1293 samp.RAddressMinFilterRoundingEnable = true;
1294 }
1295
1296 if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
1297 samp.UAddressMagFilterRoundingEnable = true;
1298 samp.VAddressMagFilterRoundingEnable = true;
1299 samp.RAddressMagFilterRoundingEnable = true;
1300 }
1301
1302 if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
1303 samp.ShadowFunction = translate_shadow_func(state->compare_func);
1304
1305 const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
1306
1307 samp.LODPreClampMode = CLAMP_MODE_OGL;
1308 samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
1309 samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
1310 samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
1311
1312 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
1313 }
1314
1315 return cso;
1316 }
1317
1318 /**
1319 * The pipe->bind_sampler_states() driver hook.
1320 *
1321 * Now that we know all the sampler states, we upload them all into a
1322 * contiguous area of GPU memory, for 3DSTATE_SAMPLER_STATE_POINTERS_*.
1323 * We also fill out the border color state pointers at this point.
1324 *
1325 * We could defer this work to draw time, but we assume that binding
1326 * will be less frequent than drawing.
1327 */
1328 // XXX: this may be a bad idea, need to make sure that st/mesa calls us
1329 // XXX: with the complete set of shaders. If it makes multiple calls to
1330 // XXX: things one at a time, we could waste a lot of time assembling things.
1331 // XXX: it doesn't even BUY us anything to do it here, because we only flag
1332 // XXX: IRIS_DIRTY_SAMPLER_STATE when this is called...
1333 static void
1334 iris_bind_sampler_states(struct pipe_context *ctx,
1335 enum pipe_shader_type p_stage,
1336 unsigned start, unsigned count,
1337 void **states)
1338 {
1339 struct iris_context *ice = (struct iris_context *) ctx;
1340 gl_shader_stage stage = stage_from_pipe(p_stage);
1341 struct iris_shader_state *shs = &ice->state.shaders[stage];
1342
1343 assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
1344 shs->num_samplers = MAX2(shs->num_samplers, start + count);
1345
1346 for (int i = 0; i < count; i++) {
1347 shs->samplers[start + i] = states[i];
1348 }
1349
1350 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
1351 * in the dynamic state memory zone, so we can point to it via the
1352 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
1353 */
1354 uint32_t *map =
1355 upload_state(ice->state.dynamic_uploader, &shs->sampler_table,
1356 count * 4 * GENX(SAMPLER_STATE_length), 32);
1357 if (unlikely(!map))
1358 return;
1359
1360 struct pipe_resource *res = shs->sampler_table.res;
1361 shs->sampler_table.offset +=
1362 iris_bo_offset_from_base_address(iris_resource_bo(res));
1363
1364 /* Make sure all land in the same BO */
1365 iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
1366
1367 for (int i = 0; i < count; i++) {
1368 struct iris_sampler_state *state = shs->samplers[i];
1369
1370 if (!state) {
1371 memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
1372 } else if (!state->needs_border_color) {
1373 memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
1374 } else {
1375 ice->state.need_border_colors = true;
1376
1377 /* Stream out the border color and merge the pointer. */
1378 uint32_t offset =
1379 iris_upload_border_color(ice, &state->border_color);
1380
1381 uint32_t dynamic[GENX(SAMPLER_STATE_length)];
1382 iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
1383 dyns.BorderColorPointer = offset;
1384 }
1385
1386 for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
1387 map[j] = state->sampler_state[j] | dynamic[j];
1388 }
1389
1390 map += GENX(SAMPLER_STATE_length);
1391 }
1392
1393 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
1394 }
1395
1396 static enum isl_channel_select
1397 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
1398 {
1399 switch (swz) {
1400 case PIPE_SWIZZLE_X: return fmt->swizzle.r;
1401 case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
1402 case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
1403 case PIPE_SWIZZLE_W: return fmt->swizzle.a;
1404 case PIPE_SWIZZLE_1: return SCS_ONE;
1405 case PIPE_SWIZZLE_0: return SCS_ZERO;
1406 default: unreachable("invalid swizzle");
1407 }
1408 }
1409
1410 static void
1411 fill_buffer_surface_state(struct isl_device *isl_dev,
1412 struct iris_bo *bo,
1413 void *map,
1414 enum isl_format format,
1415 unsigned offset,
1416 unsigned size)
1417 {
1418 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
1419 const unsigned cpp = fmtl->bpb / 8;
1420
1421 /* The ARB_texture_buffer_specification says:
1422 *
1423 * "The number of texels in the buffer texture's texel array is given by
1424 *
1425 * floor(<buffer_size> / (<components> * sizeof(<base_type>)),
1426 *
1427 * where <buffer_size> is the size of the buffer object, in basic
1428 * machine units and <components> and <base_type> are the element count
1429 * and base data type for elements, as specified in Table X.1. The
1430 * number of texels in the texel array is then clamped to the
1431 * implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
1432 *
1433 * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
1434 * so that when ISL divides by stride to obtain the number of texels, that
1435 * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
1436 */
1437 unsigned final_size =
1438 MIN3(size, bo->size - offset, IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
1439
1440 isl_buffer_fill_state(isl_dev, map,
1441 .address = bo->gtt_offset + offset,
1442 .size_B = final_size,
1443 .format = format,
1444 .stride_B = cpp,
1445 .mocs = MOCS_WB);
1446 }
1447
1448 /**
1449 * The pipe->create_sampler_view() driver hook.
1450 */
1451 static struct pipe_sampler_view *
1452 iris_create_sampler_view(struct pipe_context *ctx,
1453 struct pipe_resource *tex,
1454 const struct pipe_sampler_view *tmpl)
1455 {
1456 struct iris_context *ice = (struct iris_context *) ctx;
1457 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1458 const struct gen_device_info *devinfo = &screen->devinfo;
1459 struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
1460
1461 if (!isv)
1462 return NULL;
1463
1464 /* initialize base object */
1465 isv->base = *tmpl;
1466 isv->base.context = ctx;
1467 isv->base.texture = NULL;
1468 pipe_reference_init(&isv->base.reference, 1);
1469 pipe_resource_reference(&isv->base.texture, tex);
1470
1471 void *map = upload_state(ice->state.surface_uploader, &isv->surface_state,
1472 4 * GENX(RENDER_SURFACE_STATE_length), 64);
1473 if (!unlikely(map))
1474 return NULL;
1475
1476 struct iris_bo *state_bo = iris_resource_bo(isv->surface_state.res);
1477 isv->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
1478
1479 if (util_format_is_depth_or_stencil(tmpl->format)) {
1480 struct iris_resource *zres, *sres;
1481 const struct util_format_description *desc =
1482 util_format_description(tmpl->format);
1483
1484 iris_get_depth_stencil_resources(tex, &zres, &sres);
1485
1486 tex = util_format_has_depth(desc) ? &zres->base : &sres->base;
1487 }
1488
1489 isv->res = (struct iris_resource *) tex;
1490
1491 isl_surf_usage_flags_t usage =
1492 ISL_SURF_USAGE_TEXTURE_BIT |
1493 (isv->res->surf.usage & ISL_SURF_USAGE_CUBE_BIT);
1494
1495 const struct iris_format_info fmt =
1496 iris_format_for_usage(devinfo, tmpl->format, usage);
1497
1498 isv->view = (struct isl_view) {
1499 .format = fmt.fmt,
1500 .swizzle = (struct isl_swizzle) {
1501 .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
1502 .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
1503 .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
1504 .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
1505 },
1506 .usage = usage,
1507 };
1508
1509 /* Fill out SURFACE_STATE for this view. */
1510 if (tmpl->target != PIPE_BUFFER) {
1511 isv->view.base_level = tmpl->u.tex.first_level;
1512 isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
1513 // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
1514 isv->view.base_array_layer = tmpl->u.tex.first_layer;
1515 isv->view.array_len =
1516 tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
1517
1518 isl_surf_fill_state(&screen->isl_dev, map,
1519 .surf = &isv->res->surf, .view = &isv->view,
1520 .mocs = MOCS_WB,
1521 .address = isv->res->bo->gtt_offset);
1522 // .aux_surf =
1523 // .clear_color = clear_color,
1524 } else {
1525 fill_buffer_surface_state(&screen->isl_dev, isv->res->bo, map,
1526 isv->view.format, tmpl->u.buf.offset,
1527 tmpl->u.buf.size);
1528 }
1529
1530 return &isv->base;
1531 }
1532
1533 static void
1534 iris_sampler_view_destroy(struct pipe_context *ctx,
1535 struct pipe_sampler_view *state)
1536 {
1537 struct iris_sampler_view *isv = (void *) state;
1538 pipe_resource_reference(&state->texture, NULL);
1539 pipe_resource_reference(&isv->surface_state.res, NULL);
1540 free(isv);
1541 }
1542
1543 /**
1544 * The pipe->create_surface() driver hook.
1545 *
1546 * In Gallium nomenclature, "surfaces" are a view of a resource that
1547 * can be bound as a render target or depth/stencil buffer.
1548 */
1549 static struct pipe_surface *
1550 iris_create_surface(struct pipe_context *ctx,
1551 struct pipe_resource *tex,
1552 const struct pipe_surface *tmpl)
1553 {
1554 struct iris_context *ice = (struct iris_context *) ctx;
1555 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1556 const struct gen_device_info *devinfo = &screen->devinfo;
1557 struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
1558 struct pipe_surface *psurf = &surf->base;
1559 struct iris_resource *res = (struct iris_resource *) tex;
1560
1561 if (!surf)
1562 return NULL;
1563
1564 pipe_reference_init(&psurf->reference, 1);
1565 pipe_resource_reference(&psurf->texture, tex);
1566 psurf->context = ctx;
1567 psurf->format = tmpl->format;
1568 psurf->width = tex->width0;
1569 psurf->height = tex->height0;
1570 psurf->texture = tex;
1571 psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
1572 psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
1573 psurf->u.tex.level = tmpl->u.tex.level;
1574
1575 isl_surf_usage_flags_t usage = 0;
1576 if (tmpl->writable)
1577 usage = ISL_SURF_USAGE_STORAGE_BIT;
1578 else if (util_format_is_depth_or_stencil(tmpl->format))
1579 usage = ISL_SURF_USAGE_DEPTH_BIT;
1580 else
1581 usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
1582
1583 const struct iris_format_info fmt =
1584 iris_format_for_usage(devinfo, psurf->format, usage);
1585
1586 if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
1587 !isl_format_supports_rendering(devinfo, fmt.fmt)) {
1588 /* Framebuffer validation will reject this invalid case, but it
1589 * hasn't had the opportunity yet. In the meantime, we need to
1590 * avoid hitting ISL asserts about unsupported formats below.
1591 */
1592 free(surf);
1593 return NULL;
1594 }
1595
1596 surf->view = (struct isl_view) {
1597 .format = fmt.fmt,
1598 .base_level = tmpl->u.tex.level,
1599 .levels = 1,
1600 .base_array_layer = tmpl->u.tex.first_layer,
1601 .array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1,
1602 .swizzle = ISL_SWIZZLE_IDENTITY,
1603 .usage = usage,
1604 };
1605
1606 /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
1607 if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
1608 ISL_SURF_USAGE_STENCIL_BIT))
1609 return psurf;
1610
1611
1612 void *map = upload_state(ice->state.surface_uploader, &surf->surface_state,
1613 4 * GENX(RENDER_SURFACE_STATE_length), 64);
1614 if (!unlikely(map))
1615 return NULL;
1616
1617 struct iris_bo *state_bo = iris_resource_bo(surf->surface_state.res);
1618 surf->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
1619
1620 isl_surf_fill_state(&screen->isl_dev, map,
1621 .surf = &res->surf, .view = &surf->view,
1622 .mocs = MOCS_WB,
1623 .address = res->bo->gtt_offset);
1624 // .aux_surf =
1625 // .clear_color = clear_color,
1626
1627 return psurf;
1628 }
1629
1630 /**
1631 * The pipe->set_shader_images() driver hook.
1632 */
1633 static void
1634 iris_set_shader_images(struct pipe_context *ctx,
1635 enum pipe_shader_type p_stage,
1636 unsigned start_slot, unsigned count,
1637 const struct pipe_image_view *p_images)
1638 {
1639 struct iris_context *ice = (struct iris_context *) ctx;
1640 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1641 const struct gen_device_info *devinfo = &screen->devinfo;
1642 gl_shader_stage stage = stage_from_pipe(p_stage);
1643 struct iris_shader_state *shs = &ice->state.shaders[stage];
1644
1645 for (unsigned i = 0; i < count; i++) {
1646 if (p_images && p_images[i].resource) {
1647 const struct pipe_image_view *img = &p_images[i];
1648 struct iris_resource *res = (void *) img->resource;
1649 pipe_resource_reference(&shs->image[start_slot + i].res, &res->base);
1650
1651 // XXX: these are not retained forever, use a separate uploader?
1652 void *map =
1653 upload_state(ice->state.surface_uploader,
1654 &shs->image[start_slot + i].surface_state,
1655 4 * GENX(RENDER_SURFACE_STATE_length), 64);
1656 if (!unlikely(map)) {
1657 pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
1658 return;
1659 }
1660
1661 struct iris_bo *surf_state_bo =
1662 iris_resource_bo(shs->image[start_slot + i].surface_state.res);
1663 shs->image[start_slot + i].surface_state.offset +=
1664 iris_bo_offset_from_base_address(surf_state_bo);
1665
1666 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
1667 enum isl_format isl_format =
1668 iris_format_for_usage(devinfo, img->format, usage).fmt;
1669
1670 if (img->shader_access & PIPE_IMAGE_ACCESS_READ)
1671 isl_format = isl_lower_storage_image_format(devinfo, isl_format);
1672
1673 shs->image[start_slot + i].access = img->shader_access;
1674
1675 if (res->base.target != PIPE_BUFFER) {
1676 struct isl_view view = {
1677 .format = isl_format,
1678 .base_level = img->u.tex.level,
1679 .levels = 1,
1680 .base_array_layer = img->u.tex.first_layer,
1681 .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
1682 .swizzle = ISL_SWIZZLE_IDENTITY,
1683 .usage = usage,
1684 };
1685
1686 isl_surf_fill_state(&screen->isl_dev, map,
1687 .surf = &res->surf, .view = &view,
1688 .mocs = MOCS_WB,
1689 .address = res->bo->gtt_offset);
1690 // .aux_surf =
1691 // .clear_color = clear_color,
1692 } else {
1693 fill_buffer_surface_state(&screen->isl_dev, res->bo, map,
1694 isl_format, img->u.buf.offset,
1695 img->u.buf.size);
1696 }
1697 } else {
1698 pipe_resource_reference(&shs->image[start_slot + i].res, NULL);
1699 pipe_resource_reference(&shs->image[start_slot + i].surface_state.res,
1700 NULL);
1701 }
1702 }
1703
1704 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
1705 }
1706
1707
1708 /**
1709 * The pipe->set_sampler_views() driver hook.
1710 */
1711 static void
1712 iris_set_sampler_views(struct pipe_context *ctx,
1713 enum pipe_shader_type p_stage,
1714 unsigned start, unsigned count,
1715 struct pipe_sampler_view **views)
1716 {
1717 struct iris_context *ice = (struct iris_context *) ctx;
1718 gl_shader_stage stage = stage_from_pipe(p_stage);
1719 struct iris_shader_state *shs = &ice->state.shaders[stage];
1720
1721 unsigned i;
1722 for (i = 0; i < count; i++) {
1723 pipe_sampler_view_reference((struct pipe_sampler_view **)
1724 &shs->textures[i], views[i]);
1725 }
1726 for (; i < shs->num_textures; i++) {
1727 pipe_sampler_view_reference((struct pipe_sampler_view **)
1728 &shs->textures[i], NULL);
1729 }
1730
1731 shs->num_textures = count;
1732
1733 ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
1734 }
1735
1736 /**
1737 * The pipe->set_tess_state() driver hook.
1738 */
1739 static void
1740 iris_set_tess_state(struct pipe_context *ctx,
1741 const float default_outer_level[4],
1742 const float default_inner_level[2])
1743 {
1744 struct iris_context *ice = (struct iris_context *) ctx;
1745
1746 memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
1747 memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
1748
1749 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TCS;
1750 }
1751
1752 static void
1753 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
1754 {
1755 struct iris_surface *surf = (void *) p_surf;
1756 pipe_resource_reference(&p_surf->texture, NULL);
1757 pipe_resource_reference(&surf->surface_state.res, NULL);
1758 free(surf);
1759 }
1760
1761 static void
1762 iris_set_clip_state(struct pipe_context *ctx,
1763 const struct pipe_clip_state *state)
1764 {
1765 struct iris_context *ice = (struct iris_context *) ctx;
1766 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1767
1768 memcpy(&ice->state.clip_planes, state, sizeof(*state));
1769
1770 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS;
1771 shs->cbuf0_needs_upload = true;
1772 }
1773
1774 /**
1775 * The pipe->set_polygon_stipple() driver hook.
1776 */
1777 static void
1778 iris_set_polygon_stipple(struct pipe_context *ctx,
1779 const struct pipe_poly_stipple *state)
1780 {
1781 struct iris_context *ice = (struct iris_context *) ctx;
1782 memcpy(&ice->state.poly_stipple, state, sizeof(*state));
1783 ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
1784 }
1785
1786 /**
1787 * The pipe->set_sample_mask() driver hook.
1788 */
1789 static void
1790 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
1791 {
1792 struct iris_context *ice = (struct iris_context *) ctx;
1793
1794 /* We only support 16x MSAA, so we have 16 bits of sample maks.
1795 * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
1796 */
1797 ice->state.sample_mask = sample_mask & 0xffff;
1798 ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
1799 }
1800
1801 /**
1802 * The pipe->set_scissor_states() driver hook.
1803 *
1804 * This corresponds to our SCISSOR_RECT state structures. It's an
1805 * exact match, so we just store them, and memcpy them out later.
1806 */
1807 static void
1808 iris_set_scissor_states(struct pipe_context *ctx,
1809 unsigned start_slot,
1810 unsigned num_scissors,
1811 const struct pipe_scissor_state *rects)
1812 {
1813 struct iris_context *ice = (struct iris_context *) ctx;
1814
1815 for (unsigned i = 0; i < num_scissors; i++) {
1816 if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
1817 /* If the scissor was out of bounds and got clamped to 0 width/height
1818 * at the bounds, the subtraction of 1 from maximums could produce a
1819 * negative number and thus not clip anything. Instead, just provide
1820 * a min > max scissor inside the bounds, which produces the expected
1821 * no rendering.
1822 */
1823 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
1824 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
1825 };
1826 } else {
1827 ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
1828 .minx = rects[i].minx, .miny = rects[i].miny,
1829 .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
1830 };
1831 }
1832 }
1833
1834 ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
1835 }
1836
1837 /**
1838 * The pipe->set_stencil_ref() driver hook.
1839 *
1840 * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
1841 */
1842 static void
1843 iris_set_stencil_ref(struct pipe_context *ctx,
1844 const struct pipe_stencil_ref *state)
1845 {
1846 struct iris_context *ice = (struct iris_context *) ctx;
1847 memcpy(&ice->state.stencil_ref, state, sizeof(*state));
1848 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1849 }
1850
1851 static float
1852 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
1853 {
1854 return copysignf(state->scale[axis], sign) + state->translate[axis];
1855 }
1856
1857 #if 0
1858 static void
1859 calculate_guardband_size(uint32_t fb_width, uint32_t fb_height,
1860 float m00, float m11, float m30, float m31,
1861 float *xmin, float *xmax,
1862 float *ymin, float *ymax)
1863 {
1864 /* According to the "Vertex X,Y Clamping and Quantization" section of the
1865 * Strips and Fans documentation:
1866 *
1867 * "The vertex X and Y screen-space coordinates are also /clamped/ to the
1868 * fixed-point "guardband" range supported by the rasterization hardware"
1869 *
1870 * and
1871 *
1872 * "In almost all circumstances, if an object’s vertices are actually
1873 * modified by this clamping (i.e., had X or Y coordinates outside of
1874 * the guardband extent the rendered object will not match the intended
1875 * result. Therefore software should take steps to ensure that this does
1876 * not happen - e.g., by clipping objects such that they do not exceed
1877 * these limits after the Drawing Rectangle is applied."
1878 *
1879 * I believe the fundamental restriction is that the rasterizer (in
1880 * the SF/WM stages) have a limit on the number of pixels that can be
1881 * rasterized. We need to ensure any coordinates beyond the rasterizer
1882 * limit are handled by the clipper. So effectively that limit becomes
1883 * the clipper's guardband size.
1884 *
1885 * It goes on to say:
1886 *
1887 * "In addition, in order to be correctly rendered, objects must have a
1888 * screenspace bounding box not exceeding 8K in the X or Y direction.
1889 * This additional restriction must also be comprehended by software,
1890 * i.e., enforced by use of clipping."
1891 *
1892 * This makes no sense. Gen7+ hardware supports 16K render targets,
1893 * and you definitely need to be able to draw polygons that fill the
1894 * surface. Our assumption is that the rasterizer was limited to 8K
1895 * on Sandybridge, which only supports 8K surfaces, and it was actually
1896 * increased to 16K on Ivybridge and later.
1897 *
1898 * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge.
1899 */
1900 const float gb_size = GEN_GEN >= 7 ? 16384.0f : 8192.0f;
1901
1902 if (m00 != 0 && m11 != 0) {
1903 /* First, we compute the screen-space render area */
1904 const float ss_ra_xmin = MIN3( 0, m30 + m00, m30 - m00);
1905 const float ss_ra_xmax = MAX3( fb_width, m30 + m00, m30 - m00);
1906 const float ss_ra_ymin = MIN3( 0, m31 + m11, m31 - m11);
1907 const float ss_ra_ymax = MAX3(fb_height, m31 + m11, m31 - m11);
1908
1909 /* We want the guardband to be centered on that */
1910 const float ss_gb_xmin = (ss_ra_xmin + ss_ra_xmax) / 2 - gb_size;
1911 const float ss_gb_xmax = (ss_ra_xmin + ss_ra_xmax) / 2 + gb_size;
1912 const float ss_gb_ymin = (ss_ra_ymin + ss_ra_ymax) / 2 - gb_size;
1913 const float ss_gb_ymax = (ss_ra_ymin + ss_ra_ymax) / 2 + gb_size;
1914
1915 /* Now we need it in native device coordinates */
1916 const float ndc_gb_xmin = (ss_gb_xmin - m30) / m00;
1917 const float ndc_gb_xmax = (ss_gb_xmax - m30) / m00;
1918 const float ndc_gb_ymin = (ss_gb_ymin - m31) / m11;
1919 const float ndc_gb_ymax = (ss_gb_ymax - m31) / m11;
1920
1921 /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be
1922 * flipped upside-down. X should be fine though.
1923 */
1924 assert(ndc_gb_xmin <= ndc_gb_xmax);
1925 *xmin = ndc_gb_xmin;
1926 *xmax = ndc_gb_xmax;
1927 *ymin = MIN2(ndc_gb_ymin, ndc_gb_ymax);
1928 *ymax = MAX2(ndc_gb_ymin, ndc_gb_ymax);
1929 } else {
1930 /* The viewport scales to 0, so nothing will be rendered. */
1931 *xmin = 0.0f;
1932 *xmax = 0.0f;
1933 *ymin = 0.0f;
1934 *ymax = 0.0f;
1935 }
1936 }
1937 #endif
1938
1939 /**
1940 * The pipe->set_viewport_states() driver hook.
1941 *
1942 * This corresponds to our SF_CLIP_VIEWPORT states. We can't calculate
1943 * the guardband yet, as we need the framebuffer dimensions, but we can
1944 * at least fill out the rest.
1945 */
1946 static void
1947 iris_set_viewport_states(struct pipe_context *ctx,
1948 unsigned start_slot,
1949 unsigned count,
1950 const struct pipe_viewport_state *states)
1951 {
1952 struct iris_context *ice = (struct iris_context *) ctx;
1953 struct iris_genx_state *genx = ice->state.genx;
1954 uint32_t *vp_map =
1955 &genx->sf_cl_vp[start_slot * GENX(SF_CLIP_VIEWPORT_length)];
1956
1957 for (unsigned i = 0; i < count; i++) {
1958 const struct pipe_viewport_state *state = &states[i];
1959
1960 memcpy(&ice->state.viewports[start_slot + i], state, sizeof(*state));
1961
1962 iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
1963 vp.ViewportMatrixElementm00 = state->scale[0];
1964 vp.ViewportMatrixElementm11 = state->scale[1];
1965 vp.ViewportMatrixElementm22 = state->scale[2];
1966 vp.ViewportMatrixElementm30 = state->translate[0];
1967 vp.ViewportMatrixElementm31 = state->translate[1];
1968 vp.ViewportMatrixElementm32 = state->translate[2];
1969 /* XXX: in i965 this is computed based on the drawbuffer size,
1970 * but we don't have that here...
1971 */
1972 vp.XMinClipGuardband = -1.0;
1973 vp.XMaxClipGuardband = 1.0;
1974 vp.YMinClipGuardband = -1.0;
1975 vp.YMaxClipGuardband = 1.0;
1976 vp.XMinViewPort = viewport_extent(state, 0, -1.0f);
1977 vp.XMaxViewPort = viewport_extent(state, 0, 1.0f) - 1;
1978 vp.YMinViewPort = viewport_extent(state, 1, -1.0f);
1979 vp.YMaxViewPort = viewport_extent(state, 1, 1.0f) - 1;
1980 }
1981
1982 vp_map += GENX(SF_CLIP_VIEWPORT_length);
1983 }
1984
1985 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
1986
1987 if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
1988 !ice->state.cso_rast->depth_clip_far))
1989 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1990 }
1991
1992 /**
1993 * The pipe->set_framebuffer_state() driver hook.
1994 *
1995 * Sets the current draw FBO, including color render targets, depth,
1996 * and stencil buffers.
1997 */
1998 static void
1999 iris_set_framebuffer_state(struct pipe_context *ctx,
2000 const struct pipe_framebuffer_state *state)
2001 {
2002 struct iris_context *ice = (struct iris_context *) ctx;
2003 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2004 struct isl_device *isl_dev = &screen->isl_dev;
2005 struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
2006 struct iris_resource *zres;
2007 struct iris_resource *stencil_res;
2008
2009 unsigned samples = util_framebuffer_get_num_samples(state);
2010
2011 if (cso->samples != samples) {
2012 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
2013 }
2014
2015 if (cso->nr_cbufs != state->nr_cbufs) {
2016 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
2017 }
2018
2019 if ((cso->layers == 0) != (state->layers == 0)) {
2020 ice->state.dirty |= IRIS_DIRTY_CLIP;
2021 }
2022
2023 util_copy_framebuffer_state(cso, state);
2024 cso->samples = samples;
2025
2026 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
2027
2028 struct isl_view view = {
2029 .base_level = 0,
2030 .levels = 1,
2031 .base_array_layer = 0,
2032 .array_len = 1,
2033 .swizzle = ISL_SWIZZLE_IDENTITY,
2034 };
2035
2036 struct isl_depth_stencil_hiz_emit_info info = {
2037 .view = &view,
2038 .mocs = MOCS_WB,
2039 };
2040
2041 if (cso->zsbuf) {
2042 iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
2043 &stencil_res);
2044
2045 view.base_level = cso->zsbuf->u.tex.level;
2046 view.base_array_layer = cso->zsbuf->u.tex.first_layer;
2047 view.array_len =
2048 cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
2049
2050 if (zres) {
2051 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
2052
2053 info.depth_surf = &zres->surf;
2054 info.depth_address = zres->bo->gtt_offset;
2055 info.hiz_usage = ISL_AUX_USAGE_NONE;
2056
2057 view.format = zres->surf.format;
2058 }
2059
2060 if (stencil_res) {
2061 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
2062 info.stencil_surf = &stencil_res->surf;
2063 info.stencil_address = stencil_res->bo->gtt_offset;
2064 if (!zres)
2065 view.format = stencil_res->surf.format;
2066 }
2067 }
2068
2069 isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
2070
2071 /* Make a null surface for unbound buffers */
2072 void *null_surf_map =
2073 upload_state(ice->state.surface_uploader, &ice->state.null_fb,
2074 4 * GENX(RENDER_SURFACE_STATE_length), 64);
2075 isl_null_fill_state(&screen->isl_dev, null_surf_map,
2076 isl_extent3d(MAX2(cso->width, 1),
2077 MAX2(cso->height, 1),
2078 cso->layers ? cso->layers : 1));
2079 ice->state.null_fb.offset +=
2080 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
2081
2082 ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
2083
2084 /* Render target change */
2085 ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
2086
2087 ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
2088
2089 #if GEN_GEN == 11
2090 // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
2091 // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
2092
2093 /* The PIPE_CONTROL command description says:
2094 *
2095 * "Whenever a Binding Table Index (BTI) used by a Render Target Message
2096 * points to a different RENDER_SURFACE_STATE, SW must issue a Render
2097 * Target Cache Flush by enabling this bit. When render target flush
2098 * is set due to new association of BTI, PS Scoreboard Stall bit must
2099 * be set in this packet."
2100 */
2101 // XXX: does this need to happen at 3DSTATE_BTP_PS time?
2102 iris_emit_pipe_control_flush(&ice->render_batch,
2103 PIPE_CONTROL_RENDER_TARGET_FLUSH |
2104 PIPE_CONTROL_STALL_AT_SCOREBOARD);
2105 #endif
2106 }
2107
2108 static void
2109 upload_ubo_surf_state(struct iris_context *ice,
2110 struct iris_const_buffer *cbuf,
2111 unsigned buffer_size)
2112 {
2113 struct pipe_context *ctx = &ice->ctx;
2114 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2115
2116 // XXX: these are not retained forever, use a separate uploader?
2117 void *map =
2118 upload_state(ice->state.surface_uploader, &cbuf->surface_state,
2119 4 * GENX(RENDER_SURFACE_STATE_length), 64);
2120 if (!unlikely(map)) {
2121 pipe_resource_reference(&cbuf->data.res, NULL);
2122 return;
2123 }
2124
2125 struct iris_resource *res = (void *) cbuf->data.res;
2126 struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
2127 cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
2128
2129 isl_buffer_fill_state(&screen->isl_dev, map,
2130 .address = res->bo->gtt_offset + cbuf->data.offset,
2131 .size_B = MIN2(buffer_size,
2132 res->bo->size - cbuf->data.offset),
2133 .format = ISL_FORMAT_R32G32B32A32_FLOAT,
2134 .stride_B = 1,
2135 .mocs = MOCS_WB)
2136 }
2137
2138 /**
2139 * The pipe->set_constant_buffer() driver hook.
2140 *
2141 * This uploads any constant data in user buffers, and references
2142 * any UBO resources containing constant data.
2143 */
2144 static void
2145 iris_set_constant_buffer(struct pipe_context *ctx,
2146 enum pipe_shader_type p_stage, unsigned index,
2147 const struct pipe_constant_buffer *input)
2148 {
2149 struct iris_context *ice = (struct iris_context *) ctx;
2150 gl_shader_stage stage = stage_from_pipe(p_stage);
2151 struct iris_shader_state *shs = &ice->state.shaders[stage];
2152 struct iris_const_buffer *cbuf = &shs->constbuf[index];
2153
2154 if (input && input->buffer) {
2155 assert(index > 0);
2156
2157 pipe_resource_reference(&cbuf->data.res, input->buffer);
2158 cbuf->data.offset = input->buffer_offset;
2159
2160 upload_ubo_surf_state(ice, cbuf, input->buffer_size);
2161 } else {
2162 pipe_resource_reference(&cbuf->data.res, NULL);
2163 pipe_resource_reference(&cbuf->surface_state.res, NULL);
2164 }
2165
2166 if (index == 0) {
2167 if (input)
2168 memcpy(&shs->cbuf0, input, sizeof(shs->cbuf0));
2169 else
2170 memset(&shs->cbuf0, 0, sizeof(shs->cbuf0));
2171
2172 shs->cbuf0_needs_upload = true;
2173 }
2174
2175 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
2176 // XXX: maybe not necessary all the time...?
2177 // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
2178 // XXX: pull model we may need actual new bindings...
2179 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
2180 }
2181
2182 static void
2183 upload_uniforms(struct iris_context *ice,
2184 gl_shader_stage stage)
2185 {
2186 struct iris_shader_state *shs = &ice->state.shaders[stage];
2187 struct iris_const_buffer *cbuf = &shs->constbuf[0];
2188 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
2189
2190 unsigned upload_size = shader->num_system_values * sizeof(uint32_t) +
2191 shs->cbuf0.buffer_size;
2192
2193 if (upload_size == 0)
2194 return;
2195
2196 uint32_t *map =
2197 upload_state(ice->ctx.const_uploader, &cbuf->data, upload_size, 64);
2198
2199 for (int i = 0; i < shader->num_system_values; i++) {
2200 uint32_t sysval = shader->system_values[i];
2201 uint32_t value = 0;
2202
2203 if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
2204 int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
2205 int comp = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
2206 value = fui(ice->state.clip_planes.ucp[plane][comp]);
2207 } else {
2208 assert(!"unhandled system value");
2209 }
2210
2211 *map++ = value;
2212 }
2213
2214 if (shs->cbuf0.user_buffer) {
2215 memcpy(map, shs->cbuf0.user_buffer, shs->cbuf0.buffer_size);
2216 }
2217
2218 upload_ubo_surf_state(ice, cbuf, upload_size);
2219 }
2220
2221 /**
2222 * The pipe->set_shader_buffers() driver hook.
2223 *
2224 * This binds SSBOs and ABOs. Unfortunately, we need to stream out
2225 * SURFACE_STATE here, as the buffer offset may change each time.
2226 */
2227 static void
2228 iris_set_shader_buffers(struct pipe_context *ctx,
2229 enum pipe_shader_type p_stage,
2230 unsigned start_slot, unsigned count,
2231 const struct pipe_shader_buffer *buffers)
2232 {
2233 struct iris_context *ice = (struct iris_context *) ctx;
2234 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2235 gl_shader_stage stage = stage_from_pipe(p_stage);
2236 struct iris_shader_state *shs = &ice->state.shaders[stage];
2237
2238 for (unsigned i = 0; i < count; i++) {
2239 if (buffers && buffers[i].buffer) {
2240 const struct pipe_shader_buffer *buffer = &buffers[i];
2241 struct iris_resource *res = (void *) buffer->buffer;
2242 pipe_resource_reference(&shs->ssbo[start_slot + i], &res->base);
2243
2244 // XXX: these are not retained forever, use a separate uploader?
2245 void *map =
2246 upload_state(ice->state.surface_uploader,
2247 &shs->ssbo_surface_state[start_slot + i],
2248 4 * GENX(RENDER_SURFACE_STATE_length), 64);
2249 if (!unlikely(map)) {
2250 pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
2251 return;
2252 }
2253
2254 struct iris_bo *surf_state_bo =
2255 iris_resource_bo(shs->ssbo_surface_state[start_slot + i].res);
2256 shs->ssbo_surface_state[start_slot + i].offset +=
2257 iris_bo_offset_from_base_address(surf_state_bo);
2258
2259 isl_buffer_fill_state(&screen->isl_dev, map,
2260 .address =
2261 res->bo->gtt_offset + buffer->buffer_offset,
2262 .size_B =
2263 MIN2(buffer->buffer_size,
2264 res->bo->size - buffer->buffer_offset),
2265 .format = ISL_FORMAT_RAW,
2266 .stride_B = 1,
2267 .mocs = MOCS_WB);
2268 } else {
2269 pipe_resource_reference(&shs->ssbo[start_slot + i], NULL);
2270 pipe_resource_reference(&shs->ssbo_surface_state[start_slot + i].res,
2271 NULL);
2272 }
2273 }
2274
2275 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
2276 }
2277
2278 static void
2279 iris_delete_state(struct pipe_context *ctx, void *state)
2280 {
2281 free(state);
2282 }
2283
2284 static void
2285 iris_free_vertex_buffers(struct iris_vertex_buffer_state *cso)
2286 {
2287 for (unsigned i = 0; i < cso->num_buffers; i++)
2288 pipe_resource_reference(&cso->resources[i], NULL);
2289 }
2290
2291 /**
2292 * The pipe->set_vertex_buffers() driver hook.
2293 *
2294 * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
2295 */
2296 static void
2297 iris_set_vertex_buffers(struct pipe_context *ctx,
2298 unsigned start_slot, unsigned count,
2299 const struct pipe_vertex_buffer *buffers)
2300 {
2301 struct iris_context *ice = (struct iris_context *) ctx;
2302 struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
2303
2304 iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
2305
2306 if (!buffers)
2307 count = 0;
2308
2309 cso->num_buffers = count;
2310
2311 iris_pack_command(GENX(3DSTATE_VERTEX_BUFFERS), cso->vertex_buffers, vb) {
2312 vb.DWordLength = 4 * MAX2(cso->num_buffers, 1) - 1;
2313 }
2314
2315 uint32_t *vb_pack_dest = &cso->vertex_buffers[1];
2316
2317 if (count == 0) {
2318 iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
2319 vb.VertexBufferIndex = start_slot;
2320 vb.NullVertexBuffer = true;
2321 vb.AddressModifyEnable = true;
2322 }
2323 }
2324
2325 for (unsigned i = 0; i < count; i++) {
2326 assert(!buffers[i].is_user_buffer);
2327
2328 pipe_resource_reference(&cso->resources[i], buffers[i].buffer.resource);
2329 struct iris_resource *res = (void *) cso->resources[i];
2330
2331 iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
2332 vb.VertexBufferIndex = start_slot + i;
2333 vb.MOCS = MOCS_WB;
2334 vb.AddressModifyEnable = true;
2335 vb.BufferPitch = buffers[i].stride;
2336 if (res) {
2337 vb.BufferSize = res->bo->size;
2338 vb.BufferStartingAddress =
2339 ro_bo(NULL, res->bo->gtt_offset + buffers[i].buffer_offset);
2340 } else {
2341 vb.NullVertexBuffer = true;
2342 }
2343 }
2344
2345 vb_pack_dest += GENX(VERTEX_BUFFER_STATE_length);
2346 }
2347
2348 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
2349 }
2350
2351 /**
2352 * Gallium CSO for vertex elements.
2353 */
2354 struct iris_vertex_element_state {
2355 uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
2356 uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
2357 unsigned count;
2358 };
2359
2360 /**
2361 * The pipe->create_vertex_elements() driver hook.
2362 *
2363 * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
2364 * and 3DSTATE_VF_INSTANCING commands. SGVs are handled at draw time.
2365 */
2366 static void *
2367 iris_create_vertex_elements(struct pipe_context *ctx,
2368 unsigned count,
2369 const struct pipe_vertex_element *state)
2370 {
2371 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2372 const struct gen_device_info *devinfo = &screen->devinfo;
2373 struct iris_vertex_element_state *cso =
2374 malloc(sizeof(struct iris_vertex_element_state));
2375
2376 cso->count = count;
2377
2378 /* TODO:
2379 * - create edge flag one
2380 * - create SGV ones
2381 * - if those are necessary, use count + 1/2/3... OR in the length
2382 */
2383 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
2384 ve.DWordLength =
2385 1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
2386 }
2387
2388 uint32_t *ve_pack_dest = &cso->vertex_elements[1];
2389 uint32_t *vfi_pack_dest = cso->vf_instancing;
2390
2391 if (count == 0) {
2392 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
2393 ve.Valid = true;
2394 ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
2395 ve.Component0Control = VFCOMP_STORE_0;
2396 ve.Component1Control = VFCOMP_STORE_0;
2397 ve.Component2Control = VFCOMP_STORE_0;
2398 ve.Component3Control = VFCOMP_STORE_1_FP;
2399 }
2400
2401 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
2402 }
2403 }
2404
2405 for (int i = 0; i < count; i++) {
2406 const struct iris_format_info fmt =
2407 iris_format_for_usage(devinfo, state[i].src_format, 0);
2408 unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
2409 VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
2410
2411 switch (isl_format_get_num_channels(fmt.fmt)) {
2412 case 0: comp[0] = VFCOMP_STORE_0;
2413 case 1: comp[1] = VFCOMP_STORE_0;
2414 case 2: comp[2] = VFCOMP_STORE_0;
2415 case 3:
2416 comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
2417 : VFCOMP_STORE_1_FP;
2418 break;
2419 }
2420 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
2421 ve.VertexBufferIndex = state[i].vertex_buffer_index;
2422 ve.Valid = true;
2423 ve.SourceElementOffset = state[i].src_offset;
2424 ve.SourceElementFormat = fmt.fmt;
2425 ve.Component0Control = comp[0];
2426 ve.Component1Control = comp[1];
2427 ve.Component2Control = comp[2];
2428 ve.Component3Control = comp[3];
2429 }
2430
2431 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
2432 vi.VertexElementIndex = i;
2433 vi.InstancingEnable = state[i].instance_divisor > 0;
2434 vi.InstanceDataStepRate = state[i].instance_divisor;
2435 }
2436
2437 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
2438 vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
2439 }
2440
2441 return cso;
2442 }
2443
2444 /**
2445 * The pipe->bind_vertex_elements_state() driver hook.
2446 */
2447 static void
2448 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
2449 {
2450 struct iris_context *ice = (struct iris_context *) ctx;
2451 struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
2452 struct iris_vertex_element_state *new_cso = state;
2453
2454 /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
2455 * we need to re-emit it to ensure we're overriding the right one.
2456 */
2457 if (new_cso && cso_changed(count))
2458 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
2459
2460 ice->state.cso_vertex_elements = state;
2461 ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
2462 }
2463
2464 /**
2465 * Gallium CSO for stream output (transform feedback) targets.
2466 */
2467 struct iris_stream_output_target {
2468 struct pipe_stream_output_target base;
2469
2470 uint32_t so_buffer[GENX(3DSTATE_SO_BUFFER_length)];
2471
2472 /** Storage holding the offset where we're writing in the buffer */
2473 struct iris_state_ref offset;
2474 };
2475
2476 /**
2477 * The pipe->create_stream_output_target() driver hook.
2478 *
2479 * "Target" here refers to a destination buffer. We translate this into
2480 * a 3DSTATE_SO_BUFFER packet. We can handle most fields, but don't yet
2481 * know which buffer this represents, or whether we ought to zero the
2482 * write-offsets, or append. Those are handled in the set() hook.
2483 */
2484 static struct pipe_stream_output_target *
2485 iris_create_stream_output_target(struct pipe_context *ctx,
2486 struct pipe_resource *res,
2487 unsigned buffer_offset,
2488 unsigned buffer_size)
2489 {
2490 struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
2491 if (!cso)
2492 return NULL;
2493
2494 pipe_reference_init(&cso->base.reference, 1);
2495 pipe_resource_reference(&cso->base.buffer, res);
2496 cso->base.buffer_offset = buffer_offset;
2497 cso->base.buffer_size = buffer_size;
2498 cso->base.context = ctx;
2499
2500 upload_state(ctx->stream_uploader, &cso->offset, 4 * sizeof(uint32_t), 4);
2501
2502 iris_pack_command(GENX(3DSTATE_SO_BUFFER), cso->so_buffer, sob) {
2503 sob.SurfaceBaseAddress =
2504 rw_bo(NULL, iris_resource_bo(res)->gtt_offset + buffer_offset);
2505 sob.SOBufferEnable = true;
2506 sob.StreamOffsetWriteEnable = true;
2507 sob.StreamOutputBufferOffsetAddressEnable = true;
2508 sob.MOCS = MOCS_WB; // XXX: MOCS
2509
2510 sob.SurfaceSize = MAX2(buffer_size / 4, 1) - 1;
2511
2512 /* .SOBufferIndex, .StreamOffset, and .StreamOutputBufferOffsetAddress
2513 * are filled in later when we have stream IDs.
2514 */
2515 }
2516
2517 return &cso->base;
2518 }
2519
2520 static void
2521 iris_stream_output_target_destroy(struct pipe_context *ctx,
2522 struct pipe_stream_output_target *state)
2523 {
2524 struct iris_stream_output_target *cso = (void *) state;
2525
2526 pipe_resource_reference(&cso->base.buffer, NULL);
2527 pipe_resource_reference(&cso->offset.res, NULL);
2528
2529 free(cso);
2530 }
2531
2532 /**
2533 * The pipe->set_stream_output_targets() driver hook.
2534 *
2535 * At this point, we know which targets are bound to a particular index,
2536 * and also whether we want to append or start over. We can finish the
2537 * 3DSTATE_SO_BUFFER packets we started earlier.
2538 */
2539 static void
2540 iris_set_stream_output_targets(struct pipe_context *ctx,
2541 unsigned num_targets,
2542 struct pipe_stream_output_target **targets,
2543 const unsigned *offsets)
2544 {
2545 struct iris_context *ice = (struct iris_context *) ctx;
2546 struct iris_genx_state *genx = ice->state.genx;
2547 uint32_t *so_buffers = genx->so_buffers;
2548
2549 const bool active = num_targets > 0;
2550 if (ice->state.streamout_active != active) {
2551 ice->state.streamout_active = active;
2552 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
2553
2554 /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
2555 * it's a non-pipelined command. If we're switching streamout on, we
2556 * may have missed emitting it earlier, so do so now. (We're already
2557 * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
2558 */
2559 if (active)
2560 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
2561 }
2562
2563 for (int i = 0; i < 4; i++) {
2564 pipe_so_target_reference(&ice->state.so_target[i],
2565 i < num_targets ? targets[i] : NULL);
2566 }
2567
2568 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
2569 if (!active)
2570 return;
2571
2572 for (unsigned i = 0; i < 4; i++,
2573 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
2574
2575 if (i >= num_targets || !targets[i]) {
2576 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob)
2577 sob.SOBufferIndex = i;
2578 continue;
2579 }
2580
2581 struct iris_stream_output_target *tgt = (void *) targets[i];
2582
2583 /* Note that offsets[i] will either be 0, causing us to zero
2584 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
2585 * "continue appending at the existing offset."
2586 */
2587 assert(offsets[i] == 0 || offsets[i] == 0xFFFFFFFF);
2588
2589 uint32_t dynamic[GENX(3DSTATE_SO_BUFFER_length)];
2590 iris_pack_state(GENX(3DSTATE_SO_BUFFER), dynamic, dyns) {
2591 dyns.SOBufferIndex = i;
2592 dyns.StreamOffset = offsets[i];
2593 dyns.StreamOutputBufferOffsetAddress =
2594 rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset + tgt->offset.offset + i * sizeof(uint32_t));
2595 }
2596
2597 for (uint32_t j = 0; j < GENX(3DSTATE_SO_BUFFER_length); j++) {
2598 so_buffers[j] = tgt->so_buffer[j] | dynamic[j];
2599 }
2600 }
2601
2602 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
2603 }
2604
2605 /**
2606 * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
2607 * 3DSTATE_STREAMOUT packets.
2608 *
2609 * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
2610 * hardware to record. We can create it entirely based on the shader, with
2611 * no dynamic state dependencies.
2612 *
2613 * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
2614 * state-based settings. We capture the shader-related ones here, and merge
2615 * the rest in at draw time.
2616 */
2617 static uint32_t *
2618 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
2619 const struct brw_vue_map *vue_map)
2620 {
2621 struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
2622 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
2623 int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
2624 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
2625 int max_decls = 0;
2626 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
2627
2628 memset(so_decl, 0, sizeof(so_decl));
2629
2630 /* Construct the list of SO_DECLs to be emitted. The formatting of the
2631 * command feels strange -- each dword pair contains a SO_DECL per stream.
2632 */
2633 for (unsigned i = 0; i < info->num_outputs; i++) {
2634 const struct pipe_stream_output *output = &info->output[i];
2635 const int buffer = output->output_buffer;
2636 const int varying = output->register_index;
2637 const unsigned stream_id = output->stream;
2638 assert(stream_id < MAX_VERTEX_STREAMS);
2639
2640 buffer_mask[stream_id] |= 1 << buffer;
2641
2642 assert(vue_map->varying_to_slot[varying] >= 0);
2643
2644 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
2645 * array. Instead, it simply increments DstOffset for the following
2646 * input by the number of components that should be skipped.
2647 *
2648 * Our hardware is unusual in that it requires us to program SO_DECLs
2649 * for fake "hole" components, rather than simply taking the offset
2650 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
2651 * program as many size = 4 holes as we can, then a final hole to
2652 * accommodate the final 1, 2, or 3 remaining.
2653 */
2654 int skip_components = output->dst_offset - next_offset[buffer];
2655
2656 while (skip_components > 0) {
2657 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
2658 .HoleFlag = 1,
2659 .OutputBufferSlot = output->output_buffer,
2660 .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
2661 };
2662 skip_components -= 4;
2663 }
2664
2665 next_offset[buffer] = output->dst_offset + output->num_components;
2666
2667 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
2668 .OutputBufferSlot = output->output_buffer,
2669 .RegisterIndex = vue_map->varying_to_slot[varying],
2670 .ComponentMask =
2671 ((1 << output->num_components) - 1) << output->start_component,
2672 };
2673
2674 if (decls[stream_id] > max_decls)
2675 max_decls = decls[stream_id];
2676 }
2677
2678 unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
2679 uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
2680 uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
2681
2682 iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
2683 int urb_entry_read_offset = 0;
2684 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
2685 urb_entry_read_offset;
2686
2687 /* We always read the whole vertex. This could be reduced at some
2688 * point by reading less and offsetting the register index in the
2689 * SO_DECLs.
2690 */
2691 sol.Stream0VertexReadOffset = urb_entry_read_offset;
2692 sol.Stream0VertexReadLength = urb_entry_read_length - 1;
2693 sol.Stream1VertexReadOffset = urb_entry_read_offset;
2694 sol.Stream1VertexReadLength = urb_entry_read_length - 1;
2695 sol.Stream2VertexReadOffset = urb_entry_read_offset;
2696 sol.Stream2VertexReadLength = urb_entry_read_length - 1;
2697 sol.Stream3VertexReadOffset = urb_entry_read_offset;
2698 sol.Stream3VertexReadLength = urb_entry_read_length - 1;
2699
2700 /* Set buffer pitches; 0 means unbound. */
2701 sol.Buffer0SurfacePitch = 4 * info->stride[0];
2702 sol.Buffer1SurfacePitch = 4 * info->stride[1];
2703 sol.Buffer2SurfacePitch = 4 * info->stride[2];
2704 sol.Buffer3SurfacePitch = 4 * info->stride[3];
2705 }
2706
2707 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
2708 list.DWordLength = 3 + 2 * max_decls - 2;
2709 list.StreamtoBufferSelects0 = buffer_mask[0];
2710 list.StreamtoBufferSelects1 = buffer_mask[1];
2711 list.StreamtoBufferSelects2 = buffer_mask[2];
2712 list.StreamtoBufferSelects3 = buffer_mask[3];
2713 list.NumEntries0 = decls[0];
2714 list.NumEntries1 = decls[1];
2715 list.NumEntries2 = decls[2];
2716 list.NumEntries3 = decls[3];
2717 }
2718
2719 for (int i = 0; i < max_decls; i++) {
2720 iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
2721 entry.Stream0Decl = so_decl[0][i];
2722 entry.Stream1Decl = so_decl[1][i];
2723 entry.Stream2Decl = so_decl[2][i];
2724 entry.Stream3Decl = so_decl[3][i];
2725 }
2726 }
2727
2728 return map;
2729 }
2730
2731 static void
2732 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
2733 const struct brw_vue_map *last_vue_map,
2734 bool two_sided_color,
2735 unsigned *out_offset,
2736 unsigned *out_length)
2737 {
2738 /* The compiler computes the first URB slot without considering COL/BFC
2739 * swizzling (because it doesn't know whether it's enabled), so we need
2740 * to do that here too. This may result in a smaller offset, which
2741 * should be safe.
2742 */
2743 const unsigned first_slot =
2744 brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
2745
2746 /* This becomes the URB read offset (counted in pairs of slots). */
2747 assert(first_slot % 2 == 0);
2748 *out_offset = first_slot / 2;
2749
2750 /* We need to adjust the inputs read to account for front/back color
2751 * swizzling, as it can make the URB length longer.
2752 */
2753 for (int c = 0; c <= 1; c++) {
2754 if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
2755 /* If two sided color is enabled, the fragment shader's gl_Color
2756 * (COL0) input comes from either the gl_FrontColor (COL0) or
2757 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
2758 */
2759 if (two_sided_color)
2760 fs_input_slots |= (VARYING_BIT_BFC0 << c);
2761
2762 /* If front color isn't written, we opt to give them back color
2763 * instead of an undefined value. Switch from COL to BFC.
2764 */
2765 if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
2766 fs_input_slots &= ~(VARYING_BIT_COL0 << c);
2767 fs_input_slots |= (VARYING_BIT_BFC0 << c);
2768 }
2769 }
2770 }
2771
2772 /* Compute the minimum URB Read Length necessary for the FS inputs.
2773 *
2774 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
2775 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
2776 *
2777 * "This field should be set to the minimum length required to read the
2778 * maximum source attribute. The maximum source attribute is indicated
2779 * by the maximum value of the enabled Attribute # Source Attribute if
2780 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
2781 * enable is not set.
2782 * read_length = ceiling((max_source_attr + 1) / 2)
2783 *
2784 * [errata] Corruption/Hang possible if length programmed larger than
2785 * recommended"
2786 *
2787 * Similar text exists for Ivy Bridge.
2788 *
2789 * We find the last URB slot that's actually read by the FS.
2790 */
2791 unsigned last_read_slot = last_vue_map->num_slots - 1;
2792 while (last_read_slot > first_slot && !(fs_input_slots &
2793 (1ull << last_vue_map->slot_to_varying[last_read_slot])))
2794 --last_read_slot;
2795
2796 /* The URB read length is the difference of the two, counted in pairs. */
2797 *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
2798 }
2799
2800 static void
2801 iris_emit_sbe_swiz(struct iris_batch *batch,
2802 const struct iris_context *ice,
2803 unsigned urb_read_offset,
2804 unsigned sprite_coord_enables)
2805 {
2806 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
2807 const struct brw_wm_prog_data *wm_prog_data = (void *)
2808 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
2809 const struct brw_vue_map *vue_map = ice->shaders.last_vue_map;
2810 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
2811
2812 /* XXX: this should be generated when putting programs in place */
2813
2814 // XXX: raster->sprite_coord_enable
2815
2816 for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) {
2817 const int input_index = wm_prog_data->urb_setup[fs_attr];
2818 if (input_index < 0 || input_index >= 16)
2819 continue;
2820
2821 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
2822 &attr_overrides[input_index];
2823 int slot = vue_map->varying_to_slot[fs_attr];
2824
2825 /* Viewport and Layer are stored in the VUE header. We need to override
2826 * them to zero if earlier stages didn't write them, as GL requires that
2827 * they read back as zero when not explicitly set.
2828 */
2829 switch (fs_attr) {
2830 case VARYING_SLOT_VIEWPORT:
2831 case VARYING_SLOT_LAYER:
2832 attr->ComponentOverrideX = true;
2833 attr->ComponentOverrideW = true;
2834 attr->ConstantSource = CONST_0000;
2835
2836 if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
2837 attr->ComponentOverrideY = true;
2838 if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
2839 attr->ComponentOverrideZ = true;
2840 continue;
2841
2842 case VARYING_SLOT_PRIMITIVE_ID:
2843 /* Override if the previous shader stage didn't write gl_PrimitiveID. */
2844 if (slot == -1) {
2845 attr->ComponentOverrideX = true;
2846 attr->ComponentOverrideY = true;
2847 attr->ComponentOverrideZ = true;
2848 attr->ComponentOverrideW = true;
2849 attr->ConstantSource = PRIM_ID;
2850 continue;
2851 }
2852
2853 default:
2854 break;
2855 }
2856
2857 if (sprite_coord_enables & (1 << input_index))
2858 continue;
2859
2860 /* If there was only a back color written but not front, use back
2861 * as the color instead of undefined.
2862 */
2863 if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
2864 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
2865 if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
2866 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
2867
2868 /* Not written by the previous stage - undefined. */
2869 if (slot == -1) {
2870 attr->ComponentOverrideX = true;
2871 attr->ComponentOverrideY = true;
2872 attr->ComponentOverrideZ = true;
2873 attr->ComponentOverrideW = true;
2874 attr->ConstantSource = CONST_0001_FLOAT;
2875 continue;
2876 }
2877
2878 /* Compute the location of the attribute relative to the read offset,
2879 * which is counted in 256-bit increments (two 128-bit VUE slots).
2880 */
2881 const int source_attr = slot - 2 * urb_read_offset;
2882 assert(source_attr >= 0 && source_attr <= 32);
2883 attr->SourceAttribute = source_attr;
2884
2885 /* If we are doing two-sided color, and the VUE slot following this one
2886 * represents a back-facing color, then we need to instruct the SF unit
2887 * to do back-facing swizzling.
2888 */
2889 if (cso_rast->light_twoside &&
2890 ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
2891 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
2892 (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
2893 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
2894 attr->SwizzleSelect = INPUTATTR_FACING;
2895 }
2896
2897 iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
2898 for (int i = 0; i < 16; i++)
2899 sbes.Attribute[i] = attr_overrides[i];
2900 }
2901 }
2902
2903 static unsigned
2904 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
2905 const struct iris_rasterizer_state *cso)
2906 {
2907 unsigned overrides = 0;
2908
2909 if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
2910 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
2911
2912 for (int i = 0; i < 8; i++) {
2913 if ((cso->sprite_coord_enable & (1 << i)) &&
2914 prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
2915 overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
2916 }
2917
2918 return overrides;
2919 }
2920
2921 static void
2922 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
2923 {
2924 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
2925 const struct brw_wm_prog_data *wm_prog_data = (void *)
2926 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
2927 const struct shader_info *fs_info =
2928 iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
2929
2930 unsigned urb_read_offset, urb_read_length;
2931 iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
2932 ice->shaders.last_vue_map,
2933 cso_rast->light_twoside,
2934 &urb_read_offset, &urb_read_length);
2935
2936 unsigned sprite_coord_overrides =
2937 iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast);
2938
2939 iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
2940 sbe.AttributeSwizzleEnable = true;
2941 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
2942 sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
2943 sbe.VertexURBEntryReadOffset = urb_read_offset;
2944 sbe.VertexURBEntryReadLength = urb_read_length;
2945 sbe.ForceVertexURBEntryReadOffset = true;
2946 sbe.ForceVertexURBEntryReadLength = true;
2947 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
2948 sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
2949
2950 for (int i = 0; i < 32; i++) {
2951 sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
2952 }
2953 }
2954
2955 iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides);
2956 }
2957
2958 /* ------------------------------------------------------------------- */
2959
2960 /**
2961 * Set sampler-related program key fields based on the current state.
2962 */
2963 static void
2964 iris_populate_sampler_key(const struct iris_context *ice,
2965 struct brw_sampler_prog_key_data *key)
2966 {
2967 for (int i = 0; i < MAX_SAMPLERS; i++) {
2968 key->swizzles[i] = 0x688; /* XYZW */
2969 }
2970 }
2971
2972 /**
2973 * Populate VS program key fields based on the current state.
2974 */
2975 static void
2976 iris_populate_vs_key(const struct iris_context *ice,
2977 const struct shader_info *info,
2978 struct brw_vs_prog_key *key)
2979 {
2980 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
2981
2982 iris_populate_sampler_key(ice, &key->tex);
2983
2984 if (info->clip_distance_array_size == 0 &&
2985 (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)))
2986 key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
2987 }
2988
2989 /**
2990 * Populate TCS program key fields based on the current state.
2991 */
2992 static void
2993 iris_populate_tcs_key(const struct iris_context *ice,
2994 struct brw_tcs_prog_key *key)
2995 {
2996 iris_populate_sampler_key(ice, &key->tex);
2997 }
2998
2999 /**
3000 * Populate TES program key fields based on the current state.
3001 */
3002 static void
3003 iris_populate_tes_key(const struct iris_context *ice,
3004 struct brw_tes_prog_key *key)
3005 {
3006 iris_populate_sampler_key(ice, &key->tex);
3007 }
3008
3009 /**
3010 * Populate GS program key fields based on the current state.
3011 */
3012 static void
3013 iris_populate_gs_key(const struct iris_context *ice,
3014 struct brw_gs_prog_key *key)
3015 {
3016 iris_populate_sampler_key(ice, &key->tex);
3017 }
3018
3019 /**
3020 * Populate FS program key fields based on the current state.
3021 */
3022 static void
3023 iris_populate_fs_key(const struct iris_context *ice,
3024 struct brw_wm_prog_key *key)
3025 {
3026 iris_populate_sampler_key(ice, &key->tex);
3027
3028 /* XXX: dirty flags? */
3029 const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
3030 const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
3031 const struct iris_rasterizer_state *rast = ice->state.cso_rast;
3032 const struct iris_blend_state *blend = ice->state.cso_blend;
3033
3034 key->nr_color_regions = fb->nr_cbufs;
3035
3036 key->clamp_fragment_color = rast->clamp_fragment_color;
3037
3038 key->replicate_alpha = fb->nr_cbufs > 1 &&
3039 (zsa->alpha.enabled || blend->alpha_to_coverage);
3040
3041 /* XXX: only bother if COL0/1 are read */
3042 key->flat_shade = rast->flatshade;
3043
3044 key->persample_interp = rast->force_persample_interp;
3045 key->multisample_fbo = rast->multisample && fb->samples > 1;
3046
3047 key->coherent_fb_fetch = true;
3048
3049 // XXX: uint64_t input_slots_valid; - for >16 inputs
3050
3051 // XXX: key->force_dual_color_blend for unigine
3052 // XXX: respect hint for high_quality_derivatives:1;
3053 }
3054
3055 static void
3056 iris_populate_cs_key(const struct iris_context *ice,
3057 struct brw_cs_prog_key *key)
3058 {
3059 iris_populate_sampler_key(ice, &key->tex);
3060 }
3061
3062 #if 0
3063 // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
3064 pkt.SamplerCount = \
3065 DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
3066
3067 #endif
3068
3069 static uint64_t
3070 KSP(const struct iris_compiled_shader *shader)
3071 {
3072 struct iris_resource *res = (void *) shader->assembly.res;
3073 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
3074 }
3075
3076 // Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
3077 // prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
3078 // this WA on C0 stepping.
3079
3080 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
3081 pkt.KernelStartPointer = KSP(shader); \
3082 pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
3083 prog_data->binding_table.size_bytes / 4; \
3084 pkt.FloatingPointMode = prog_data->use_alt_mode; \
3085 \
3086 pkt.DispatchGRFStartRegisterForURBData = \
3087 prog_data->dispatch_grf_start_reg; \
3088 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
3089 pkt.prefix##URBEntryReadOffset = 0; \
3090 \
3091 pkt.StatisticsEnable = true; \
3092 pkt.Enable = true; \
3093 \
3094 if (prog_data->total_scratch) { \
3095 uint32_t scratch_addr = \
3096 iris_get_scratch_space(ice, prog_data->total_scratch, stage); \
3097 pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11; \
3098 pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr); \
3099 }
3100
3101 /**
3102 * Encode most of 3DSTATE_VS based on the compiled shader.
3103 */
3104 static void
3105 iris_store_vs_state(struct iris_context *ice,
3106 const struct gen_device_info *devinfo,
3107 struct iris_compiled_shader *shader)
3108 {
3109 struct brw_stage_prog_data *prog_data = shader->prog_data;
3110 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
3111
3112 iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
3113 INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
3114 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
3115 vs.SIMD8DispatchEnable = true;
3116 vs.UserClipDistanceCullTestEnableBitmask =
3117 vue_prog_data->cull_distance_mask;
3118 }
3119 }
3120
3121 /**
3122 * Encode most of 3DSTATE_HS based on the compiled shader.
3123 */
3124 static void
3125 iris_store_tcs_state(struct iris_context *ice,
3126 const struct gen_device_info *devinfo,
3127 struct iris_compiled_shader *shader)
3128 {
3129 struct brw_stage_prog_data *prog_data = shader->prog_data;
3130 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
3131 struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
3132
3133 iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
3134 INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
3135
3136 hs.InstanceCount = tcs_prog_data->instances - 1;
3137 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
3138 hs.IncludeVertexHandles = true;
3139 }
3140 }
3141
3142 /**
3143 * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
3144 */
3145 static void
3146 iris_store_tes_state(struct iris_context *ice,
3147 const struct gen_device_info *devinfo,
3148 struct iris_compiled_shader *shader)
3149 {
3150 struct brw_stage_prog_data *prog_data = shader->prog_data;
3151 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
3152 struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
3153
3154 uint32_t *te_state = (void *) shader->derived_data;
3155 uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
3156
3157 iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
3158 te.Partitioning = tes_prog_data->partitioning;
3159 te.OutputTopology = tes_prog_data->output_topology;
3160 te.TEDomain = tes_prog_data->domain;
3161 te.TEEnable = true;
3162 te.MaximumTessellationFactorOdd = 63.0;
3163 te.MaximumTessellationFactorNotOdd = 64.0;
3164 }
3165
3166 iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
3167 INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
3168
3169 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
3170 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
3171 ds.ComputeWCoordinateEnable =
3172 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
3173
3174 ds.UserClipDistanceCullTestEnableBitmask =
3175 vue_prog_data->cull_distance_mask;
3176 }
3177
3178 }
3179
3180 /**
3181 * Encode most of 3DSTATE_GS based on the compiled shader.
3182 */
3183 static void
3184 iris_store_gs_state(struct iris_context *ice,
3185 const struct gen_device_info *devinfo,
3186 struct iris_compiled_shader *shader)
3187 {
3188 struct brw_stage_prog_data *prog_data = shader->prog_data;
3189 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
3190 struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
3191
3192 iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
3193 INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
3194
3195 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
3196 gs.OutputTopology = gs_prog_data->output_topology;
3197 gs.ControlDataHeaderSize =
3198 gs_prog_data->control_data_header_size_hwords;
3199 gs.InstanceControl = gs_prog_data->invocations - 1;
3200 gs.DispatchMode = DISPATCH_MODE_SIMD8;
3201 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
3202 gs.ControlDataFormat = gs_prog_data->control_data_format;
3203 gs.ReorderMode = TRAILING;
3204 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
3205 gs.MaximumNumberofThreads =
3206 GEN_GEN == 8 ? (devinfo->max_gs_threads / 2 - 1)
3207 : (devinfo->max_gs_threads - 1);
3208
3209 if (gs_prog_data->static_vertex_count != -1) {
3210 gs.StaticOutput = true;
3211 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
3212 }
3213 gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
3214
3215 gs.UserClipDistanceCullTestEnableBitmask =
3216 vue_prog_data->cull_distance_mask;
3217
3218 const int urb_entry_write_offset = 1;
3219 const uint32_t urb_entry_output_length =
3220 DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
3221 urb_entry_write_offset;
3222
3223 gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
3224 gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
3225 }
3226 }
3227
3228 /**
3229 * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
3230 */
3231 static void
3232 iris_store_fs_state(struct iris_context *ice,
3233 const struct gen_device_info *devinfo,
3234 struct iris_compiled_shader *shader)
3235 {
3236 struct brw_stage_prog_data *prog_data = shader->prog_data;
3237 struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
3238
3239 uint32_t *ps_state = (void *) shader->derived_data;
3240 uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
3241
3242 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
3243 ps.VectorMaskEnable = true;
3244 //ps.SamplerCount = ...
3245 // XXX: WABTPPrefetchDisable, see above, drop at C0
3246 ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
3247 prog_data->binding_table.size_bytes / 4;
3248 ps.FloatingPointMode = prog_data->use_alt_mode;
3249 ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
3250
3251 ps.PushConstantEnable = shader->num_system_values > 0 ||
3252 prog_data->ubo_ranges[0].length > 0;
3253
3254 /* From the documentation for this packet:
3255 * "If the PS kernel does not need the Position XY Offsets to
3256 * compute a Position Value, then this field should be programmed
3257 * to POSOFFSET_NONE."
3258 *
3259 * "SW Recommendation: If the PS kernel needs the Position Offsets
3260 * to compute a Position XY value, this field should match Position
3261 * ZW Interpolation Mode to ensure a consistent position.xyzw
3262 * computation."
3263 *
3264 * We only require XY sample offsets. So, this recommendation doesn't
3265 * look useful at the moment. We might need this in future.
3266 */
3267 ps.PositionXYOffsetSelect =
3268 wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
3269 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
3270 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
3271 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
3272
3273 // XXX: Disable SIMD32 with 16x MSAA
3274
3275 ps.DispatchGRFStartRegisterForConstantSetupData0 =
3276 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
3277 ps.DispatchGRFStartRegisterForConstantSetupData1 =
3278 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
3279 ps.DispatchGRFStartRegisterForConstantSetupData2 =
3280 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
3281
3282 ps.KernelStartPointer0 =
3283 KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
3284 ps.KernelStartPointer1 =
3285 KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
3286 ps.KernelStartPointer2 =
3287 KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
3288
3289 if (prog_data->total_scratch) {
3290 uint32_t scratch_addr =
3291 iris_get_scratch_space(ice, prog_data->total_scratch,
3292 MESA_SHADER_FRAGMENT);
3293 ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
3294 ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
3295 }
3296 }
3297
3298 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
3299 psx.PixelShaderValid = true;
3300 psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
3301 psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
3302 psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
3303 psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
3304 psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
3305 psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
3306
3307 if (wm_prog_data->uses_sample_mask) {
3308 /* TODO: conservative rasterization */
3309 if (wm_prog_data->post_depth_coverage)
3310 psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
3311 else
3312 psx.InputCoverageMaskState = ICMS_NORMAL;
3313 }
3314
3315 psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
3316 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
3317 psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
3318
3319 // XXX: UAV bit
3320 }
3321 }
3322
3323 /**
3324 * Compute the size of the derived data (shader command packets).
3325 *
3326 * This must match the data written by the iris_store_xs_state() functions.
3327 */
3328 static void
3329 iris_store_cs_state(struct iris_context *ice,
3330 const struct gen_device_info *devinfo,
3331 struct iris_compiled_shader *shader)
3332 {
3333 struct brw_stage_prog_data *prog_data = shader->prog_data;
3334 struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
3335 void *map = shader->derived_data;
3336
3337 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
3338 desc.KernelStartPointer = KSP(shader);
3339 desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
3340 desc.NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads;
3341 desc.SharedLocalMemorySize =
3342 encode_slm_size(GEN_GEN, prog_data->total_shared);
3343 desc.BarrierEnable = cs_prog_data->uses_barrier;
3344 desc.CrossThreadConstantDataReadLength =
3345 cs_prog_data->push.cross_thread.regs;
3346 }
3347 }
3348
3349 static unsigned
3350 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
3351 {
3352 assert(cache_id <= IRIS_CACHE_BLORP);
3353
3354 static const unsigned dwords[] = {
3355 [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
3356 [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
3357 [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
3358 [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
3359 [IRIS_CACHE_FS] =
3360 GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
3361 [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
3362 [IRIS_CACHE_BLORP] = 0,
3363 };
3364
3365 return sizeof(uint32_t) * dwords[cache_id];
3366 }
3367
3368 /**
3369 * Create any state packets corresponding to the given shader stage
3370 * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
3371 * This means that we can look up a program in the in-memory cache and
3372 * get most of the state packet without having to reconstruct it.
3373 */
3374 static void
3375 iris_store_derived_program_state(struct iris_context *ice,
3376 enum iris_program_cache_id cache_id,
3377 struct iris_compiled_shader *shader)
3378 {
3379 struct iris_screen *screen = (void *) ice->ctx.screen;
3380 const struct gen_device_info *devinfo = &screen->devinfo;
3381
3382 switch (cache_id) {
3383 case IRIS_CACHE_VS:
3384 iris_store_vs_state(ice, devinfo, shader);
3385 break;
3386 case IRIS_CACHE_TCS:
3387 iris_store_tcs_state(ice, devinfo, shader);
3388 break;
3389 case IRIS_CACHE_TES:
3390 iris_store_tes_state(ice, devinfo, shader);
3391 break;
3392 case IRIS_CACHE_GS:
3393 iris_store_gs_state(ice, devinfo, shader);
3394 break;
3395 case IRIS_CACHE_FS:
3396 iris_store_fs_state(ice, devinfo, shader);
3397 break;
3398 case IRIS_CACHE_CS:
3399 iris_store_cs_state(ice, devinfo, shader);
3400 case IRIS_CACHE_BLORP:
3401 break;
3402 default:
3403 break;
3404 }
3405 }
3406
3407 /* ------------------------------------------------------------------- */
3408
3409 /**
3410 * Configure the URB.
3411 *
3412 * XXX: write a real comment.
3413 */
3414 static void
3415 iris_upload_urb_config(struct iris_context *ice, struct iris_batch *batch)
3416 {
3417 const struct gen_device_info *devinfo = &batch->screen->devinfo;
3418 const unsigned push_size_kB = 32;
3419 unsigned entries[4];
3420 unsigned start[4];
3421 unsigned size[4];
3422
3423 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
3424 if (!ice->shaders.prog[i]) {
3425 size[i] = 1;
3426 } else {
3427 struct brw_vue_prog_data *vue_prog_data =
3428 (void *) ice->shaders.prog[i]->prog_data;
3429 size[i] = vue_prog_data->urb_entry_size;
3430 }
3431 assert(size[i] != 0);
3432 }
3433
3434 gen_get_urb_config(devinfo, 1024 * push_size_kB,
3435 1024 * ice->shaders.urb_size,
3436 ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
3437 ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
3438 size, entries, start);
3439
3440 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
3441 iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
3442 urb._3DCommandSubOpcode += i;
3443 urb.VSURBStartingAddress = start[i];
3444 urb.VSURBEntryAllocationSize = size[i] - 1;
3445 urb.VSNumberofURBEntries = entries[i];
3446 }
3447 }
3448 }
3449
3450 static const uint32_t push_constant_opcodes[] = {
3451 [MESA_SHADER_VERTEX] = 21,
3452 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
3453 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
3454 [MESA_SHADER_GEOMETRY] = 22,
3455 [MESA_SHADER_FRAGMENT] = 23,
3456 [MESA_SHADER_COMPUTE] = 0,
3457 };
3458
3459 static uint32_t
3460 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
3461 {
3462 struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
3463
3464 iris_use_pinned_bo(batch, state_bo, false);
3465
3466 return ice->state.unbound_tex.offset;
3467 }
3468
3469 static uint32_t
3470 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
3471 {
3472 /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
3473 if (!ice->state.null_fb.res)
3474 return use_null_surface(batch, ice);
3475
3476 struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
3477
3478 iris_use_pinned_bo(batch, state_bo, false);
3479
3480 return ice->state.null_fb.offset;
3481 }
3482
3483 /**
3484 * Add a surface to the validation list, as well as the buffer containing
3485 * the corresponding SURFACE_STATE.
3486 *
3487 * Returns the binding table entry (offset to SURFACE_STATE).
3488 */
3489 static uint32_t
3490 use_surface(struct iris_batch *batch,
3491 struct pipe_surface *p_surf,
3492 bool writeable)
3493 {
3494 struct iris_surface *surf = (void *) p_surf;
3495
3496 iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
3497 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
3498
3499 return surf->surface_state.offset;
3500 }
3501
3502 static uint32_t
3503 use_sampler_view(struct iris_batch *batch, struct iris_sampler_view *isv)
3504 {
3505 iris_use_pinned_bo(batch, isv->res->bo, false);
3506 iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
3507
3508 return isv->surface_state.offset;
3509 }
3510
3511 static uint32_t
3512 use_const_buffer(struct iris_batch *batch,
3513 struct iris_context *ice,
3514 struct iris_const_buffer *cbuf)
3515 {
3516 if (!cbuf->surface_state.res)
3517 return use_null_surface(batch, ice);
3518
3519 iris_use_pinned_bo(batch, iris_resource_bo(cbuf->data.res), false);
3520 iris_use_pinned_bo(batch, iris_resource_bo(cbuf->surface_state.res), false);
3521
3522 return cbuf->surface_state.offset;
3523 }
3524
3525 static uint32_t
3526 use_ssbo(struct iris_batch *batch, struct iris_context *ice,
3527 struct iris_shader_state *shs, int i)
3528 {
3529 if (!shs->ssbo[i])
3530 return use_null_surface(batch, ice);
3531
3532 struct iris_state_ref *surf_state = &shs->ssbo_surface_state[i];
3533
3534 iris_use_pinned_bo(batch, iris_resource_bo(shs->ssbo[i]), true);
3535 iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
3536
3537 return surf_state->offset;
3538 }
3539
3540 static uint32_t
3541 use_image(struct iris_batch *batch, struct iris_context *ice,
3542 struct iris_shader_state *shs, int i)
3543 {
3544 if (!shs->image[i].res)
3545 return use_null_surface(batch, ice);
3546
3547 struct iris_state_ref *surf_state = &shs->image[i].surface_state;
3548
3549 iris_use_pinned_bo(batch, iris_resource_bo(shs->image[i].res),
3550 shs->image[i].access & PIPE_IMAGE_ACCESS_WRITE);
3551 iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false);
3552
3553 return surf_state->offset;
3554 }
3555
3556 #define push_bt_entry(addr) \
3557 assert(addr >= binder_addr); \
3558 if (!pin_only) bt_map[s++] = (addr) - binder_addr;
3559
3560 /**
3561 * Populate the binding table for a given shader stage.
3562 *
3563 * This fills out the table of pointers to surfaces required by the shader,
3564 * and also adds those buffers to the validation list so the kernel can make
3565 * resident before running our batch.
3566 */
3567 static void
3568 iris_populate_binding_table(struct iris_context *ice,
3569 struct iris_batch *batch,
3570 gl_shader_stage stage,
3571 bool pin_only)
3572 {
3573 const struct iris_binder *binder = &ice->state.binder;
3574 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3575 if (!shader)
3576 return;
3577
3578 struct iris_shader_state *shs = &ice->state.shaders[stage];
3579 uint32_t binder_addr = binder->bo->gtt_offset;
3580
3581 //struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
3582 uint32_t *bt_map = binder->map + binder->bt_offset[stage];
3583 int s = 0;
3584
3585 const struct shader_info *info = iris_get_shader_info(ice, stage);
3586 if (!info) {
3587 /* TCS passthrough doesn't need a binding table. */
3588 assert(stage == MESA_SHADER_TESS_CTRL);
3589 return;
3590 }
3591
3592 if (stage == MESA_SHADER_COMPUTE) {
3593 /* surface for gl_NumWorkGroups */
3594 struct iris_state_ref *grid_data = &ice->state.grid_size;
3595 struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
3596 iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false);
3597 iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false);
3598 push_bt_entry(grid_state->offset);
3599 }
3600
3601 if (stage == MESA_SHADER_FRAGMENT) {
3602 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
3603 /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
3604 if (cso_fb->nr_cbufs) {
3605 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
3606 uint32_t addr =
3607 cso_fb->cbufs[i] ? use_surface(batch, cso_fb->cbufs[i], true)
3608 : use_null_fb_surface(batch, ice);
3609 push_bt_entry(addr);
3610 }
3611 } else {
3612 uint32_t addr = use_null_fb_surface(batch, ice);
3613 push_bt_entry(addr);
3614 }
3615 }
3616
3617 //assert(prog_data->binding_table.texture_start ==
3618 //(ice->state.num_textures[stage] ? s : 0xd0d0d0d0));
3619
3620 for (int i = 0; i < shs->num_textures; i++) {
3621 struct iris_sampler_view *view = shs->textures[i];
3622 uint32_t addr = view ? use_sampler_view(batch, view)
3623 : use_null_surface(batch, ice);
3624 push_bt_entry(addr);
3625 }
3626
3627 for (int i = 0; i < info->num_images; i++) {
3628 uint32_t addr = use_image(batch, ice, shs, i);
3629 push_bt_entry(addr);
3630 }
3631
3632 const int num_ubos = iris_get_shader_num_ubos(ice, stage);
3633
3634 for (int i = 0; i < num_ubos; i++) {
3635 uint32_t addr = use_const_buffer(batch, ice, &shs->constbuf[i]);
3636 push_bt_entry(addr);
3637 }
3638
3639 /* XXX: st is wasting 16 binding table slots for ABOs. Should add a cap
3640 * for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
3641 * in st_atom_storagebuf.c so it'll compact them into one range, with
3642 * SSBOs starting at info->num_abos. Ideally it'd reset num_abos to 0 too
3643 */
3644 if (info->num_abos + info->num_ssbos > 0) {
3645 for (int i = 0; i < IRIS_MAX_ABOS + info->num_ssbos; i++) {
3646 uint32_t addr = use_ssbo(batch, ice, shs, i);
3647 push_bt_entry(addr);
3648 }
3649 }
3650
3651 #if 0
3652 // XXX: not implemented yet
3653 assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
3654 assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
3655 #endif
3656 }
3657
3658 static void
3659 iris_use_optional_res(struct iris_batch *batch,
3660 struct pipe_resource *res,
3661 bool writeable)
3662 {
3663 if (res) {
3664 struct iris_bo *bo = iris_resource_bo(res);
3665 iris_use_pinned_bo(batch, bo, writeable);
3666 }
3667 }
3668
3669 /* ------------------------------------------------------------------- */
3670
3671 /**
3672 * Pin any BOs which were installed by a previous batch, and restored
3673 * via the hardware logical context mechanism.
3674 *
3675 * We don't need to re-emit all state every batch - the hardware context
3676 * mechanism will save and restore it for us. This includes pointers to
3677 * various BOs...which won't exist unless we ask the kernel to pin them
3678 * by adding them to the validation list.
3679 *
3680 * We can skip buffers if we've re-emitted those packets, as we're
3681 * overwriting those stale pointers with new ones, and don't actually
3682 * refer to the old BOs.
3683 */
3684 static void
3685 iris_restore_render_saved_bos(struct iris_context *ice,
3686 struct iris_batch *batch,
3687 const struct pipe_draw_info *draw)
3688 {
3689 // XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch
3690
3691 const uint64_t clean = ~ice->state.dirty;
3692
3693 if (clean & IRIS_DIRTY_CC_VIEWPORT) {
3694 iris_use_optional_res(batch, ice->state.last_res.cc_vp, false);
3695 }
3696
3697 if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
3698 iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false);
3699 }
3700
3701 if (clean & IRIS_DIRTY_BLEND_STATE) {
3702 iris_use_optional_res(batch, ice->state.last_res.blend, false);
3703 }
3704
3705 if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
3706 iris_use_optional_res(batch, ice->state.last_res.color_calc, false);
3707 }
3708
3709 if (clean & IRIS_DIRTY_SCISSOR_RECT) {
3710 iris_use_optional_res(batch, ice->state.last_res.scissor, false);
3711 }
3712
3713 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
3714 if (!(clean & (IRIS_DIRTY_CONSTANTS_VS << stage)))
3715 continue;
3716
3717 struct iris_shader_state *shs = &ice->state.shaders[stage];
3718 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3719
3720 if (!shader)
3721 continue;
3722
3723 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
3724
3725 for (int i = 0; i < 4; i++) {
3726 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
3727
3728 if (range->length == 0)
3729 continue;
3730
3731 struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
3732 struct iris_resource *res = (void *) cbuf->data.res;
3733
3734 if (res)
3735 iris_use_pinned_bo(batch, res->bo, false);
3736 else
3737 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false);
3738 }
3739 }
3740
3741 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
3742 if (clean & (IRIS_DIRTY_BINDINGS_VS << stage)) {
3743 /* Re-pin any buffers referred to by the binding table. */
3744 iris_populate_binding_table(ice, batch, stage, true);
3745 }
3746 }
3747
3748 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
3749 struct iris_shader_state *shs = &ice->state.shaders[stage];
3750 struct pipe_resource *res = shs->sampler_table.res;
3751 if (res)
3752 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
3753 }
3754
3755 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
3756 if (clean & (IRIS_DIRTY_VS << stage)) {
3757 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3758 if (shader) {
3759 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
3760 iris_use_pinned_bo(batch, bo, false);
3761 }
3762
3763 // XXX: scratch buffer
3764 }
3765 }
3766
3767 if (clean & IRIS_DIRTY_DEPTH_BUFFER) {
3768 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
3769
3770 if (cso_fb->zsbuf) {
3771 struct iris_resource *zres, *sres;
3772 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
3773 &zres, &sres);
3774 // XXX: might not be writable...
3775 if (zres)
3776 iris_use_pinned_bo(batch, zres->bo, true);
3777 if (sres)
3778 iris_use_pinned_bo(batch, sres->bo, true);
3779 }
3780 }
3781
3782 if (draw->index_size == 0 && ice->state.last_res.index_buffer) {
3783 /* This draw didn't emit a new index buffer, so we are inheriting the
3784 * older index buffer. This draw didn't need it, but future ones may.
3785 */
3786 struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
3787 iris_use_pinned_bo(batch, bo, false);
3788 }
3789
3790 if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
3791 struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
3792 for (unsigned i = 0; i < cso->num_buffers; i++) {
3793 struct iris_resource *res = (void *) cso->resources[i];
3794 iris_use_pinned_bo(batch, res->bo, false);
3795 }
3796 }
3797 }
3798
3799 static void
3800 iris_restore_compute_saved_bos(struct iris_context *ice,
3801 struct iris_batch *batch,
3802 const struct pipe_grid_info *grid)
3803 {
3804 const uint64_t clean = ~ice->state.dirty;
3805
3806 const int stage = MESA_SHADER_COMPUTE;
3807 struct iris_shader_state *shs = &ice->state.shaders[stage];
3808
3809 if (clean & IRIS_DIRTY_CONSTANTS_CS) {
3810 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3811
3812 if (shader) {
3813 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
3814 const struct brw_ubo_range *range = &prog_data->ubo_ranges[0];
3815
3816 if (range->length > 0) {
3817 struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
3818 struct iris_resource *res = (void *) cbuf->data.res;
3819
3820 if (res)
3821 iris_use_pinned_bo(batch, res->bo, false);
3822 else
3823 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false);
3824 }
3825 }
3826 }
3827
3828 if (clean & IRIS_DIRTY_BINDINGS_CS) {
3829 /* Re-pin any buffers referred to by the binding table. */
3830 iris_populate_binding_table(ice, batch, stage, true);
3831 }
3832
3833 struct pipe_resource *sampler_res = shs->sampler_table.res;
3834 if (sampler_res)
3835 iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false);
3836
3837 if (clean & IRIS_DIRTY_CS) {
3838 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3839 if (shader) {
3840 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
3841 iris_use_pinned_bo(batch, bo, false);
3842 }
3843
3844 // XXX: scratch buffer
3845 }
3846 }
3847
3848 /**
3849 * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
3850 */
3851 static void
3852 iris_update_surface_base_address(struct iris_batch *batch,
3853 struct iris_binder *binder)
3854 {
3855 if (batch->last_surface_base_address == binder->bo->gtt_offset)
3856 return;
3857
3858 flush_for_state_base_change(batch);
3859
3860 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
3861 // XXX: sba.SurfaceStateMemoryObjectControlState = MOCS_WB;
3862 sba.SurfaceStateBaseAddressModifyEnable = true;
3863 sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
3864 }
3865
3866 batch->last_surface_base_address = binder->bo->gtt_offset;
3867 }
3868
3869 static void
3870 iris_upload_dirty_render_state(struct iris_context *ice,
3871 struct iris_batch *batch,
3872 const struct pipe_draw_info *draw)
3873 {
3874 const uint64_t dirty = ice->state.dirty;
3875
3876 if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER))
3877 return;
3878
3879 struct iris_genx_state *genx = ice->state.genx;
3880 struct iris_binder *binder = &ice->state.binder;
3881 struct brw_wm_prog_data *wm_prog_data = (void *)
3882 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3883
3884 if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
3885 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3886 uint32_t cc_vp_address;
3887
3888 /* XXX: could avoid streaming for depth_clip [0,1] case. */
3889 uint32_t *cc_vp_map =
3890 stream_state(batch, ice->state.dynamic_uploader,
3891 &ice->state.last_res.cc_vp,
3892 4 * ice->state.num_viewports *
3893 GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
3894 for (int i = 0; i < ice->state.num_viewports; i++) {
3895 float zmin, zmax;
3896 util_viewport_zmin_zmax(&ice->state.viewports[i],
3897 cso_rast->clip_halfz, &zmin, &zmax);
3898 if (cso_rast->depth_clip_near)
3899 zmin = 0.0;
3900 if (cso_rast->depth_clip_far)
3901 zmax = 1.0;
3902
3903 iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
3904 ccv.MinimumDepth = zmin;
3905 ccv.MaximumDepth = zmax;
3906 }
3907
3908 cc_vp_map += GENX(CC_VIEWPORT_length);
3909 }
3910
3911 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
3912 ptr.CCViewportPointer = cc_vp_address;
3913 }
3914 }
3915
3916 if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
3917 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
3918 ptr.SFClipViewportPointer =
3919 emit_state(batch, ice->state.dynamic_uploader,
3920 &ice->state.last_res.sf_cl_vp,
3921 genx->sf_cl_vp, 4 * GENX(SF_CLIP_VIEWPORT_length) *
3922 ice->state.num_viewports, 64);
3923 }
3924 }
3925
3926 /* XXX: L3 State */
3927
3928 // XXX: this is only flagged at setup, we assume a static configuration
3929 if (dirty & IRIS_DIRTY_URB) {
3930 iris_upload_urb_config(ice, batch);
3931 }
3932
3933 if (dirty & IRIS_DIRTY_BLEND_STATE) {
3934 struct iris_blend_state *cso_blend = ice->state.cso_blend;
3935 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
3936 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
3937 const int header_dwords = GENX(BLEND_STATE_length);
3938 const int rt_dwords = cso_fb->nr_cbufs * GENX(BLEND_STATE_ENTRY_length);
3939 uint32_t blend_offset;
3940 uint32_t *blend_map =
3941 stream_state(batch, ice->state.dynamic_uploader,
3942 &ice->state.last_res.blend,
3943 4 * (header_dwords + rt_dwords), 64, &blend_offset);
3944
3945 uint32_t blend_state_header;
3946 iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
3947 bs.AlphaTestEnable = cso_zsa->alpha.enabled;
3948 bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha.func);
3949 }
3950
3951 blend_map[0] = blend_state_header | cso_blend->blend_state[0];
3952 memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
3953
3954 iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
3955 ptr.BlendStatePointer = blend_offset;
3956 ptr.BlendStatePointerValid = true;
3957 }
3958 }
3959
3960 if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
3961 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
3962 uint32_t cc_offset;
3963 void *cc_map =
3964 stream_state(batch, ice->state.dynamic_uploader,
3965 &ice->state.last_res.color_calc,
3966 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
3967 64, &cc_offset);
3968 iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
3969 cc.AlphaTestFormat = ALPHATEST_FLOAT32;
3970 cc.AlphaReferenceValueAsFLOAT32 = cso->alpha.ref_value;
3971 cc.BlendConstantColorRed = ice->state.blend_color.color[0];
3972 cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
3973 cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
3974 cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
3975 }
3976 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
3977 ptr.ColorCalcStatePointer = cc_offset;
3978 ptr.ColorCalcStatePointerValid = true;
3979 }
3980 }
3981
3982 /* Upload constants for TCS passthrough. */
3983 if ((dirty & IRIS_DIRTY_CONSTANTS_TCS) &&
3984 ice->shaders.prog[MESA_SHADER_TESS_CTRL] &&
3985 !ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL]) {
3986 struct iris_compiled_shader *tes_shader = ice->shaders.prog[MESA_SHADER_TESS_EVAL];
3987 assert(tes_shader);
3988
3989 /* Passthrough always copies 2 vec4s, so when uploading data we ensure
3990 * it is in the right layout for TES.
3991 */
3992 float hdr[8] = {};
3993 struct brw_tes_prog_data *tes_prog_data = (void *) tes_shader->prog_data;
3994 switch (tes_prog_data->domain) {
3995 case BRW_TESS_DOMAIN_QUAD:
3996 for (int i = 0; i < 4; i++)
3997 hdr[7 - i] = ice->state.default_outer_level[i];
3998 hdr[3] = ice->state.default_inner_level[0];
3999 hdr[2] = ice->state.default_inner_level[1];
4000 break;
4001 case BRW_TESS_DOMAIN_TRI:
4002 for (int i = 0; i < 3; i++)
4003 hdr[7 - i] = ice->state.default_outer_level[i];
4004 hdr[4] = ice->state.default_inner_level[0];
4005 break;
4006 case BRW_TESS_DOMAIN_ISOLINE:
4007 hdr[7] = ice->state.default_outer_level[1];
4008 hdr[6] = ice->state.default_outer_level[0];
4009 break;
4010 }
4011
4012 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
4013 struct iris_const_buffer *cbuf = &shs->constbuf[0];
4014 u_upload_data(ice->ctx.const_uploader, 0, sizeof(hdr), 32,
4015 &hdr[0], &cbuf->data.offset,
4016 &cbuf->data.res);
4017 }
4018
4019 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4020 if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)))
4021 continue;
4022
4023 struct iris_shader_state *shs = &ice->state.shaders[stage];
4024 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4025
4026 if (!shader)
4027 continue;
4028
4029 if (shs->cbuf0_needs_upload)
4030 upload_uniforms(ice, stage);
4031
4032 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
4033
4034 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
4035 pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
4036 if (prog_data) {
4037 /* The Skylake PRM contains the following restriction:
4038 *
4039 * "The driver must ensure The following case does not occur
4040 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
4041 * buffer 3 read length equal to zero committed followed by a
4042 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
4043 * zero committed."
4044 *
4045 * To avoid this, we program the buffers in the highest slots.
4046 * This way, slot 0 is only used if slot 3 is also used.
4047 */
4048 int n = 3;
4049
4050 for (int i = 3; i >= 0; i--) {
4051 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
4052
4053 if (range->length == 0)
4054 continue;
4055
4056 struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
4057 struct iris_resource *res = (void *) cbuf->data.res;
4058
4059 assert(cbuf->data.offset % 32 == 0);
4060
4061 pkt.ConstantBody.ReadLength[n] = range->length;
4062 pkt.ConstantBody.Buffer[n] =
4063 res ? ro_bo(res->bo, range->start * 32 + cbuf->data.offset)
4064 : ro_bo(batch->screen->workaround_bo, 0);
4065 n--;
4066 }
4067 }
4068 }
4069 }
4070
4071 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4072 if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
4073 iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
4074 ptr._3DCommandSubOpcode = 38 + stage;
4075 ptr.PointertoVSBindingTable = binder->bt_offset[stage];
4076 }
4077 }
4078 }
4079
4080 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4081 if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
4082 iris_populate_binding_table(ice, batch, stage, false);
4083 }
4084 }
4085
4086 if (ice->state.need_border_colors)
4087 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
4088
4089 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4090 if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) ||
4091 !ice->shaders.prog[stage])
4092 continue;
4093
4094 struct iris_shader_state *shs = &ice->state.shaders[stage];
4095 struct pipe_resource *res = shs->sampler_table.res;
4096 if (res)
4097 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
4098
4099 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
4100 ptr._3DCommandSubOpcode = 43 + stage;
4101 ptr.PointertoVSSamplerState = shs->sampler_table.offset;
4102 }
4103 }
4104
4105 if (dirty & IRIS_DIRTY_MULTISAMPLE) {
4106 iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
4107 ms.PixelLocation =
4108 ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
4109 if (ice->state.framebuffer.samples > 0)
4110 ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
4111 }
4112 }
4113
4114 if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
4115 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
4116 ms.SampleMask = MAX2(ice->state.sample_mask, 1);
4117 }
4118 }
4119
4120 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
4121 if (!(dirty & (IRIS_DIRTY_VS << stage)))
4122 continue;
4123
4124 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4125
4126 if (shader) {
4127 struct iris_resource *cache = (void *) shader->assembly.res;
4128 iris_use_pinned_bo(batch, cache->bo, false);
4129 iris_batch_emit(batch, shader->derived_data,
4130 iris_derived_program_state_size(stage));
4131 } else {
4132 if (stage == MESA_SHADER_TESS_EVAL) {
4133 iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
4134 iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
4135 iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
4136 } else if (stage == MESA_SHADER_GEOMETRY) {
4137 iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
4138 }
4139 }
4140 }
4141
4142 if (ice->state.streamout_active) {
4143 if (dirty & IRIS_DIRTY_SO_BUFFERS) {
4144 iris_batch_emit(batch, genx->so_buffers,
4145 4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
4146 for (int i = 0; i < 4; i++) {
4147 struct iris_stream_output_target *tgt =
4148 (void *) ice->state.so_target[i];
4149 if (tgt) {
4150 iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
4151 true);
4152 iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
4153 true);
4154 }
4155 }
4156 }
4157
4158 if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
4159 uint32_t *decl_list =
4160 ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
4161 iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
4162 }
4163
4164 if (dirty & IRIS_DIRTY_STREAMOUT) {
4165 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4166
4167 uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
4168 iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
4169 sol.SOFunctionEnable = true;
4170 sol.SOStatisticsEnable = true;
4171
4172 sol.RenderingDisable = cso_rast->rasterizer_discard &&
4173 !ice->state.prims_generated_query_active;
4174 sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
4175 }
4176
4177 assert(ice->state.streamout);
4178
4179 iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
4180 GENX(3DSTATE_STREAMOUT_length));
4181 }
4182 } else {
4183 if (dirty & IRIS_DIRTY_STREAMOUT) {
4184 iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
4185 }
4186 }
4187
4188 if (dirty & IRIS_DIRTY_CLIP) {
4189 struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4190 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4191
4192 uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
4193 iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
4194 if (wm_prog_data->barycentric_interp_modes &
4195 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
4196 cl.NonPerspectiveBarycentricEnable = true;
4197
4198 cl.ForceZeroRTAIndexEnable = cso_fb->layers == 0;
4199 cl.MaximumVPIndex = ice->state.num_viewports - 1;
4200 }
4201 iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
4202 ARRAY_SIZE(cso_rast->clip));
4203 }
4204
4205 if (dirty & IRIS_DIRTY_RASTER) {
4206 struct iris_rasterizer_state *cso = ice->state.cso_rast;
4207 iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
4208 iris_batch_emit(batch, cso->sf, sizeof(cso->sf));
4209
4210 }
4211
4212 /* XXX: FS program updates needs to flag IRIS_DIRTY_WM */
4213 if (dirty & IRIS_DIRTY_WM) {
4214 struct iris_rasterizer_state *cso = ice->state.cso_rast;
4215 uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
4216
4217 iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
4218 wm.StatisticsEnable = ice->state.statistics_counters_enabled;
4219
4220 wm.BarycentricInterpolationMode =
4221 wm_prog_data->barycentric_interp_modes;
4222
4223 if (wm_prog_data->early_fragment_tests)
4224 wm.EarlyDepthStencilControl = EDSC_PREPS;
4225 else if (wm_prog_data->has_side_effects)
4226 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
4227 }
4228 iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
4229 }
4230
4231 if (dirty & IRIS_DIRTY_SBE) {
4232 iris_emit_sbe(batch, ice);
4233 }
4234
4235 if (dirty & IRIS_DIRTY_PS_BLEND) {
4236 struct iris_blend_state *cso_blend = ice->state.cso_blend;
4237 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
4238 uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
4239 iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
4240 pb.HasWriteableRT = true; // XXX: comes from somewhere :(
4241 pb.AlphaTestEnable = cso_zsa->alpha.enabled;
4242 }
4243
4244 iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
4245 ARRAY_SIZE(cso_blend->ps_blend));
4246 }
4247
4248 if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
4249 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
4250 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
4251
4252 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
4253 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
4254 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
4255 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
4256 }
4257 iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
4258 }
4259
4260 if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
4261 uint32_t scissor_offset =
4262 emit_state(batch, ice->state.dynamic_uploader,
4263 &ice->state.last_res.scissor,
4264 ice->state.scissors,
4265 sizeof(struct pipe_scissor_state) *
4266 ice->state.num_viewports, 32);
4267
4268 iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
4269 ptr.ScissorRectPointer = scissor_offset;
4270 }
4271 }
4272
4273 if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
4274 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4275 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
4276
4277 iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
4278
4279 if (cso_fb->zsbuf) {
4280 struct iris_resource *zres, *sres;
4281 iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
4282 &zres, &sres);
4283 // XXX: might not be writable...
4284 if (zres)
4285 iris_use_pinned_bo(batch, zres->bo, true);
4286 if (sres)
4287 iris_use_pinned_bo(batch, sres->bo, true);
4288 }
4289 }
4290
4291 if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
4292 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
4293 for (int i = 0; i < 32; i++) {
4294 poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
4295 }
4296 }
4297 }
4298
4299 if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
4300 struct iris_rasterizer_state *cso = ice->state.cso_rast;
4301 iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
4302 }
4303
4304 if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
4305 iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
4306 topo.PrimitiveTopologyType =
4307 translate_prim_type(draw->mode, draw->vertices_per_patch);
4308 }
4309 }
4310
4311 if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
4312 struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
4313 const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
4314
4315 if (cso->num_buffers > 0) {
4316 iris_batch_emit(batch, cso->vertex_buffers, sizeof(uint32_t) *
4317 (1 + vb_dwords * cso->num_buffers));
4318
4319 for (unsigned i = 0; i < cso->num_buffers; i++) {
4320 struct iris_resource *res = (void *) cso->resources[i];
4321 if (res)
4322 iris_use_pinned_bo(batch, res->bo, false);
4323 }
4324 }
4325 }
4326
4327 if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
4328 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
4329 const unsigned entries = MAX2(cso->count, 1);
4330 iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
4331 (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
4332 iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
4333 entries * GENX(3DSTATE_VF_INSTANCING_length));
4334 }
4335
4336 if (dirty & IRIS_DIRTY_VF_SGVS) {
4337 const struct brw_vs_prog_data *vs_prog_data = (void *)
4338 ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
4339 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
4340
4341 iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
4342 if (vs_prog_data->uses_vertexid) {
4343 sgv.VertexIDEnable = true;
4344 sgv.VertexIDComponentNumber = 2;
4345 sgv.VertexIDElementOffset = cso->count;
4346 }
4347
4348 if (vs_prog_data->uses_instanceid) {
4349 sgv.InstanceIDEnable = true;
4350 sgv.InstanceIDComponentNumber = 3;
4351 sgv.InstanceIDElementOffset = cso->count;
4352 }
4353 }
4354 }
4355
4356 if (dirty & IRIS_DIRTY_VF) {
4357 iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
4358 if (draw->primitive_restart) {
4359 vf.IndexedDrawCutIndexEnable = true;
4360 vf.CutIndex = draw->restart_index;
4361 }
4362 }
4363 }
4364
4365 // XXX: Gen8 - PMA fix
4366 }
4367
4368 static void
4369 iris_upload_render_state(struct iris_context *ice,
4370 struct iris_batch *batch,
4371 const struct pipe_draw_info *draw)
4372 {
4373 /* Always pin the binder. If we're emitting new binding table pointers,
4374 * we need it. If not, we're probably inheriting old tables via the
4375 * context, and need it anyway. Since true zero-bindings cases are
4376 * practically non-existent, just pin it and avoid last_res tracking.
4377 */
4378 iris_use_pinned_bo(batch, ice->state.binder.bo, false);
4379
4380 iris_upload_dirty_render_state(ice, batch, draw);
4381
4382 if (draw->index_size > 0) {
4383 unsigned offset;
4384
4385 if (draw->has_user_indices) {
4386 u_upload_data(ice->ctx.stream_uploader, 0,
4387 draw->count * draw->index_size, 4, draw->index.user,
4388 &offset, &ice->state.last_res.index_buffer);
4389 } else {
4390 pipe_resource_reference(&ice->state.last_res.index_buffer,
4391 draw->index.resource);
4392 offset = 0;
4393 }
4394
4395 struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
4396
4397 iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
4398 ib.IndexFormat = draw->index_size >> 1;
4399 ib.MOCS = MOCS_WB;
4400 ib.BufferSize = bo->size;
4401 ib.BufferStartingAddress = ro_bo(bo, offset);
4402 }
4403 }
4404
4405 #define _3DPRIM_END_OFFSET 0x2420
4406 #define _3DPRIM_START_VERTEX 0x2430
4407 #define _3DPRIM_VERTEX_COUNT 0x2434
4408 #define _3DPRIM_INSTANCE_COUNT 0x2438
4409 #define _3DPRIM_START_INSTANCE 0x243C
4410 #define _3DPRIM_BASE_VERTEX 0x2440
4411
4412 if (draw->indirect) {
4413 /* We don't support this MultidrawIndirect. */
4414 assert(!draw->indirect->indirect_draw_count);
4415
4416 struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
4417 assert(bo);
4418
4419 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4420 lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
4421 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 0);
4422 }
4423 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4424 lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
4425 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 4);
4426 }
4427 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4428 lrm.RegisterAddress = _3DPRIM_START_VERTEX;
4429 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 8);
4430 }
4431 if (draw->index_size) {
4432 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4433 lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
4434 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
4435 }
4436 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4437 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
4438 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 16);
4439 }
4440 } else {
4441 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4442 lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
4443 lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
4444 }
4445 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
4446 lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
4447 lri.DataDWord = 0;
4448 }
4449 }
4450 }
4451
4452 iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
4453 prim.StartInstanceLocation = draw->start_instance;
4454 prim.InstanceCount = draw->instance_count;
4455 prim.VertexCountPerInstance = draw->count;
4456 prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
4457
4458 // XXX: this is probably bonkers.
4459 prim.StartVertexLocation = draw->start;
4460
4461 prim.IndirectParameterEnable = draw->indirect != NULL;
4462
4463 if (draw->index_size) {
4464 prim.BaseVertexLocation += draw->index_bias;
4465 } else {
4466 prim.StartVertexLocation += draw->index_bias;
4467 }
4468
4469 //prim.BaseVertexLocation = ...;
4470 }
4471
4472 if (!batch->contains_draw) {
4473 iris_restore_render_saved_bos(ice, batch, draw);
4474 batch->contains_draw = true;
4475 }
4476 }
4477
4478 static void
4479 iris_upload_compute_state(struct iris_context *ice,
4480 struct iris_batch *batch,
4481 const struct pipe_grid_info *grid)
4482 {
4483 const uint64_t dirty = ice->state.dirty;
4484 struct iris_screen *screen = batch->screen;
4485 const struct gen_device_info *devinfo = &screen->devinfo;
4486 struct iris_binder *binder = &ice->state.binder;
4487 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
4488 struct iris_compiled_shader *shader =
4489 ice->shaders.prog[MESA_SHADER_COMPUTE];
4490 struct brw_stage_prog_data *prog_data = shader->prog_data;
4491 struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
4492
4493 // XXX: L3 configuration not set up for SLM
4494 assert(prog_data->total_shared == 0);
4495
4496 if ((dirty & IRIS_DIRTY_CONSTANTS_CS) && shs->cbuf0_needs_upload)
4497 upload_uniforms(ice, MESA_SHADER_COMPUTE);
4498
4499 if (dirty & IRIS_DIRTY_BINDINGS_CS)
4500 iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
4501
4502 iris_use_optional_res(batch, shs->sampler_table.res, false);
4503 iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false);
4504
4505 if (ice->state.need_border_colors)
4506 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
4507
4508 if (dirty & IRIS_DIRTY_CS) {
4509 /* The MEDIA_VFE_STATE documentation for Gen8+ says:
4510 *
4511 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4512 * the only bits that are changed are scoreboard related: Scoreboard
4513 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta. For
4514 * these scoreboard related states, a MEDIA_STATE_FLUSH is
4515 * sufficient."
4516 */
4517 iris_emit_pipe_control_flush(batch, PIPE_CONTROL_CS_STALL);
4518
4519 iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
4520 if (prog_data->total_scratch) {
4521 uint32_t scratch_addr =
4522 iris_get_scratch_space(ice, prog_data->total_scratch,
4523 MESA_SHADER_COMPUTE);
4524 vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4525 vfe.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr);
4526 }
4527
4528 vfe.MaximumNumberofThreads =
4529 devinfo->max_cs_threads * screen->subslice_total - 1;
4530 #if GEN_GEN < 11
4531 vfe.ResetGatewayTimer =
4532 Resettingrelativetimerandlatchingtheglobaltimestamp;
4533 #endif
4534
4535 vfe.NumberofURBEntries = 2;
4536 vfe.URBEntryAllocationSize = 2;
4537
4538 // XXX: Use Indirect Payload Storage?
4539 vfe.CURBEAllocationSize =
4540 ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
4541 cs_prog_data->push.cross_thread.regs, 2);
4542 }
4543 }
4544
4545 // XXX: hack iris_set_constant_buffers to upload these thread counts
4546 // XXX: along with regular uniforms for compute shaders, somehow.
4547
4548 uint32_t curbe_data_offset = 0;
4549 // TODO: Move subgroup-id into uniforms ubo so we can push uniforms
4550 assert(cs_prog_data->push.cross_thread.dwords == 0 &&
4551 cs_prog_data->push.per_thread.dwords == 1 &&
4552 cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
4553 struct pipe_resource *curbe_data_res = NULL;
4554 uint32_t *curbe_data_map =
4555 stream_state(batch, ice->state.dynamic_uploader, &curbe_data_res,
4556 ALIGN(cs_prog_data->push.total.size, 64), 64,
4557 &curbe_data_offset);
4558 assert(curbe_data_map);
4559 memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64));
4560 iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map);
4561
4562 if (dirty & IRIS_DIRTY_CONSTANTS_CS) {
4563 iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
4564 curbe.CURBETotalDataLength =
4565 ALIGN(cs_prog_data->push.total.size, 64);
4566 curbe.CURBEDataStartAddress = curbe_data_offset;
4567 }
4568 }
4569
4570 if (dirty & (IRIS_DIRTY_SAMPLER_STATES_CS |
4571 IRIS_DIRTY_BINDINGS_CS |
4572 IRIS_DIRTY_CONSTANTS_CS |
4573 IRIS_DIRTY_CS)) {
4574 struct pipe_resource *desc_res = NULL;
4575 uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
4576
4577 iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
4578 idd.SamplerStatePointer = shs->sampler_table.offset;
4579 idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
4580 idd.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4581 idd.CrossThreadConstantDataReadLength =
4582 cs_prog_data->push.cross_thread.regs;
4583 }
4584
4585 for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
4586 desc[i] |= ((uint32_t *) shader->derived_data)[i];
4587
4588 iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
4589 load.InterfaceDescriptorTotalLength =
4590 GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
4591 load.InterfaceDescriptorDataStartAddress =
4592 emit_state(batch, ice->state.dynamic_uploader,
4593 &desc_res, desc, sizeof(desc), 32);
4594 }
4595
4596 pipe_resource_reference(&desc_res, NULL);
4597 }
4598
4599 uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
4600 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
4601 uint32_t right_mask;
4602
4603 if (remainder > 0)
4604 right_mask = ~0u >> (32 - remainder);
4605 else
4606 right_mask = ~0u >> (32 - cs_prog_data->simd_size);
4607
4608 #define GPGPU_DISPATCHDIMX 0x2500
4609 #define GPGPU_DISPATCHDIMY 0x2504
4610 #define GPGPU_DISPATCHDIMZ 0x2508
4611
4612 if (grid->indirect) {
4613 struct iris_state_ref *grid_size = &ice->state.grid_size;
4614 struct iris_bo *bo = iris_resource_bo(grid_size->res);
4615 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4616 lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
4617 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
4618 }
4619 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4620 lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
4621 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
4622 }
4623 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4624 lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
4625 lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
4626 }
4627 }
4628
4629 iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
4630 ggw.IndirectParameterEnable = grid->indirect != NULL;
4631 ggw.SIMDSize = cs_prog_data->simd_size / 16;
4632 ggw.ThreadDepthCounterMaximum = 0;
4633 ggw.ThreadHeightCounterMaximum = 0;
4634 ggw.ThreadWidthCounterMaximum = cs_prog_data->threads - 1;
4635 ggw.ThreadGroupIDXDimension = grid->grid[0];
4636 ggw.ThreadGroupIDYDimension = grid->grid[1];
4637 ggw.ThreadGroupIDZDimension = grid->grid[2];
4638 ggw.RightExecutionMask = right_mask;
4639 ggw.BottomExecutionMask = 0xffffffff;
4640 }
4641
4642 iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
4643
4644 if (!batch->contains_draw) {
4645 iris_restore_compute_saved_bos(ice, batch, grid);
4646 batch->contains_draw = true;
4647 }
4648 }
4649
4650 /**
4651 * State module teardown.
4652 */
4653 static void
4654 iris_destroy_state(struct iris_context *ice)
4655 {
4656 iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
4657
4658 // XXX: unreference resources/surfaces.
4659 for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
4660 pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
4661 }
4662 pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
4663
4664 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
4665 struct iris_shader_state *shs = &ice->state.shaders[stage];
4666 pipe_resource_reference(&shs->sampler_table.res, NULL);
4667 }
4668 free(ice->state.genx);
4669
4670 pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
4671 pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
4672 pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
4673 pipe_resource_reference(&ice->state.last_res.scissor, NULL);
4674 pipe_resource_reference(&ice->state.last_res.blend, NULL);
4675 pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
4676 }
4677
4678 /* ------------------------------------------------------------------- */
4679
4680 static void
4681 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
4682 uint32_t val)
4683 {
4684 _iris_emit_lri(batch, reg, val);
4685 }
4686
4687 static void
4688 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
4689 uint64_t val)
4690 {
4691 _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
4692 _iris_emit_lri(batch, reg + 4, val >> 32);
4693 }
4694
4695 /**
4696 * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
4697 */
4698 static void
4699 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
4700 struct iris_bo *bo, uint32_t offset)
4701 {
4702 iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4703 lrm.RegisterAddress = reg;
4704 lrm.MemoryAddress = ro_bo(bo, offset);
4705 }
4706 }
4707
4708 /**
4709 * Load a 64-bit value from a buffer into a MMIO register via
4710 * two MI_LOAD_REGISTER_MEM commands.
4711 */
4712 static void
4713 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
4714 struct iris_bo *bo, uint32_t offset)
4715 {
4716 iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
4717 iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
4718 }
4719
4720 static void
4721 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
4722 struct iris_bo *bo, uint32_t offset,
4723 bool predicated)
4724 {
4725 iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
4726 srm.RegisterAddress = reg;
4727 srm.MemoryAddress = rw_bo(bo, offset);
4728 srm.PredicateEnable = predicated;
4729 }
4730 }
4731
4732 static void
4733 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
4734 struct iris_bo *bo, uint32_t offset,
4735 bool predicated)
4736 {
4737 iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
4738 iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
4739 }
4740
4741 static void
4742 iris_store_data_imm32(struct iris_batch *batch,
4743 struct iris_bo *bo, uint32_t offset,
4744 uint32_t imm)
4745 {
4746 iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
4747 sdi.Address = rw_bo(bo, offset);
4748 sdi.ImmediateData = imm;
4749 }
4750 }
4751
4752 static void
4753 iris_store_data_imm64(struct iris_batch *batch,
4754 struct iris_bo *bo, uint32_t offset,
4755 uint64_t imm)
4756 {
4757 /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
4758 * 2 in genxml but it's actually variable length and we need 5 DWords.
4759 */
4760 void *map = iris_get_command_space(batch, 4 * 5);
4761 _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
4762 sdi.DWordLength = 5 - 2;
4763 sdi.Address = rw_bo(bo, offset);
4764 sdi.ImmediateData = imm;
4765 }
4766 }
4767
4768 static void
4769 iris_copy_mem_mem(struct iris_batch *batch,
4770 struct iris_bo *dst_bo, uint32_t dst_offset,
4771 struct iris_bo *src_bo, uint32_t src_offset,
4772 unsigned bytes)
4773 {
4774 /* MI_COPY_MEM_MEM operates on DWords. */
4775 assert(bytes % 4 == 0);
4776 assert(dst_offset % 4 == 0);
4777 assert(src_offset % 4 == 0);
4778
4779 for (unsigned i = 0; i < bytes; i += 4) {
4780 iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
4781 cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
4782 cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
4783 }
4784 }
4785 }
4786
4787 /* ------------------------------------------------------------------- */
4788
4789 static unsigned
4790 flags_to_post_sync_op(uint32_t flags)
4791 {
4792 if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
4793 return WriteImmediateData;
4794
4795 if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
4796 return WritePSDepthCount;
4797
4798 if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
4799 return WriteTimestamp;
4800
4801 return 0;
4802 }
4803
4804 /**
4805 * Do the given flags have a Post Sync or LRI Post Sync operation?
4806 */
4807 static enum pipe_control_flags
4808 get_post_sync_flags(enum pipe_control_flags flags)
4809 {
4810 flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
4811 PIPE_CONTROL_WRITE_DEPTH_COUNT |
4812 PIPE_CONTROL_WRITE_TIMESTAMP |
4813 PIPE_CONTROL_LRI_POST_SYNC_OP;
4814
4815 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
4816 * "LRI Post Sync Operation". So more than one bit set would be illegal.
4817 */
4818 assert(util_bitcount(flags) <= 1);
4819
4820 return flags;
4821 }
4822
4823 // XXX: compute support
4824 #define IS_COMPUTE_PIPELINE(batch) (batch->engine != I915_EXEC_RENDER)
4825
4826 /**
4827 * Emit a series of PIPE_CONTROL commands, taking into account any
4828 * workarounds necessary to actually accomplish the caller's request.
4829 *
4830 * Unless otherwise noted, spec quotations in this function come from:
4831 *
4832 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
4833 * Restrictions for PIPE_CONTROL.
4834 *
4835 * You should not use this function directly. Use the helpers in
4836 * iris_pipe_control.c instead, which may split the pipe control further.
4837 */
4838 static void
4839 iris_emit_raw_pipe_control(struct iris_batch *batch, uint32_t flags,
4840 struct iris_bo *bo, uint32_t offset, uint64_t imm)
4841 {
4842 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
4843 enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
4844 enum pipe_control_flags non_lri_post_sync_flags =
4845 post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
4846
4847 /* Recursive PIPE_CONTROL workarounds --------------------------------
4848 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
4849 *
4850 * We do these first because we want to look at the original operation,
4851 * rather than any workarounds we set.
4852 */
4853 if (GEN_GEN == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
4854 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
4855 * lists several workarounds:
4856 *
4857 * "Project: SKL, KBL, BXT
4858 *
4859 * If the VF Cache Invalidation Enable is set to a 1 in a
4860 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
4861 * sets to 0, with the VF Cache Invalidation Enable set to 0
4862 * needs to be sent prior to the PIPE_CONTROL with VF Cache
4863 * Invalidation Enable set to a 1."
4864 */
4865 iris_emit_raw_pipe_control(batch, 0, NULL, 0, 0);
4866 }
4867
4868 if (GEN_GEN == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
4869 /* Project: SKL / Argument: LRI Post Sync Operation [23]
4870 *
4871 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
4872 * programmed prior to programming a PIPECONTROL command with "LRI
4873 * Post Sync Operation" in GPGPU mode of operation (i.e when
4874 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
4875 *
4876 * The same text exists a few rows below for Post Sync Op.
4877 */
4878 iris_emit_raw_pipe_control(batch, PIPE_CONTROL_CS_STALL, bo, offset, imm);
4879 }
4880
4881 if (GEN_GEN == 10 && (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
4882 /* Cannonlake:
4883 * "Before sending a PIPE_CONTROL command with bit 12 set, SW must issue
4884 * another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
4885 * = 0 and Pipe Control Flush Enable (bit 7) = 1"
4886 */
4887 iris_emit_raw_pipe_control(batch, PIPE_CONTROL_FLUSH_ENABLE, bo,
4888 offset, imm);
4889 }
4890
4891 /* "Flush Types" workarounds ---------------------------------------------
4892 * We do these now because they may add post-sync operations or CS stalls.
4893 */
4894
4895 if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
4896 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
4897 *
4898 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
4899 * 'Write PS Depth Count' or 'Write Timestamp'."
4900 */
4901 if (!bo) {
4902 flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
4903 post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
4904 non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
4905 bo = batch->screen->workaround_bo;
4906 }
4907 }
4908
4909 /* #1130 from Gen10 workarounds page:
4910 *
4911 * "Enable Depth Stall on every Post Sync Op if Render target Cache
4912 * Flush is not enabled in same PIPE CONTROL and Enable Pixel score
4913 * board stall if Render target cache flush is enabled."
4914 *
4915 * Applicable to CNL B0 and C0 steppings only.
4916 *
4917 * The wording here is unclear, and this workaround doesn't look anything
4918 * like the internal bug report recommendations, but leave it be for now...
4919 */
4920 if (GEN_GEN == 10) {
4921 if (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) {
4922 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
4923 } else if (flags & non_lri_post_sync_flags) {
4924 flags |= PIPE_CONTROL_DEPTH_STALL;
4925 }
4926 }
4927
4928 if (flags & PIPE_CONTROL_DEPTH_STALL) {
4929 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
4930 *
4931 * "This bit must be DISABLED for operations other than writing
4932 * PS_DEPTH_COUNT."
4933 *
4934 * This seems like nonsense. An Ivybridge workaround requires us to
4935 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
4936 * operation. Gen8+ requires us to emit depth stalls and depth cache
4937 * flushes together. So, it's hard to imagine this means anything other
4938 * than "we originally intended this to be used for PS_DEPTH_COUNT".
4939 *
4940 * We ignore the supposed restriction and do nothing.
4941 */
4942 }
4943
4944 if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
4945 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
4946 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
4947 *
4948 * "This bit must be DISABLED for End-of-pipe (Read) fences,
4949 * PS_DEPTH_COUNT or TIMESTAMP queries."
4950 *
4951 * TODO: Implement end-of-pipe checking.
4952 */
4953 assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
4954 PIPE_CONTROL_WRITE_TIMESTAMP)));
4955 }
4956
4957 if (GEN_GEN < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
4958 /* From the PIPE_CONTROL instruction table, bit 1:
4959 *
4960 * "This bit is ignored if Depth Stall Enable is set.
4961 * Further, the render cache is not flushed even if Write Cache
4962 * Flush Enable bit is set."
4963 *
4964 * We assert that the caller doesn't do this combination, to try and
4965 * prevent mistakes. It shouldn't hurt the GPU, though.
4966 *
4967 * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
4968 * and "Render Target Flush" combo is explicitly required for BTI
4969 * update workarounds.
4970 */
4971 assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
4972 PIPE_CONTROL_RENDER_TARGET_FLUSH)));
4973 }
4974
4975 /* PIPE_CONTROL page workarounds ------------------------------------- */
4976
4977 if (GEN_GEN <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
4978 /* From the PIPE_CONTROL page itself:
4979 *
4980 * "IVB, HSW, BDW
4981 * Restriction: Pipe_control with CS-stall bit set must be issued
4982 * before a pipe-control command that has the State Cache
4983 * Invalidate bit set."
4984 */
4985 flags |= PIPE_CONTROL_CS_STALL;
4986 }
4987
4988 if (flags & PIPE_CONTROL_FLUSH_LLC) {
4989 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
4990 *
4991 * "Project: ALL
4992 * SW must always program Post-Sync Operation to "Write Immediate
4993 * Data" when Flush LLC is set."
4994 *
4995 * For now, we just require the caller to do it.
4996 */
4997 assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
4998 }
4999
5000 /* "Post-Sync Operation" workarounds -------------------------------- */
5001
5002 /* Project: All / Argument: Global Snapshot Count Reset [19]
5003 *
5004 * "This bit must not be exercised on any product.
5005 * Requires stall bit ([20] of DW1) set."
5006 *
5007 * We don't use this, so we just assert that it isn't used. The
5008 * PIPE_CONTROL instruction page indicates that they intended this
5009 * as a debug feature and don't think it is useful in production,
5010 * but it may actually be usable, should we ever want to.
5011 */
5012 assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
5013
5014 if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
5015 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
5016 /* Project: All / Arguments:
5017 *
5018 * - Generic Media State Clear [16]
5019 * - Indirect State Pointers Disable [16]
5020 *
5021 * "Requires stall bit ([20] of DW1) set."
5022 *
5023 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
5024 * State Clear) says:
5025 *
5026 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
5027 * programmed prior to programming a PIPECONTROL command with "Media
5028 * State Clear" set in GPGPU mode of operation"
5029 *
5030 * This is a subset of the earlier rule, so there's nothing to do.
5031 */
5032 flags |= PIPE_CONTROL_CS_STALL;
5033 }
5034
5035 if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
5036 /* Project: All / Argument: Store Data Index
5037 *
5038 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
5039 * than '0'."
5040 *
5041 * For now, we just assert that the caller does this. We might want to
5042 * automatically add a write to the workaround BO...
5043 */
5044 assert(non_lri_post_sync_flags != 0);
5045 }
5046
5047 if (flags & PIPE_CONTROL_SYNC_GFDT) {
5048 /* Project: All / Argument: Sync GFDT
5049 *
5050 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
5051 * than '0' or 0x2520[13] must be set."
5052 *
5053 * For now, we just assert that the caller does this.
5054 */
5055 assert(non_lri_post_sync_flags != 0);
5056 }
5057
5058 if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
5059 /* Project: IVB+ / Argument: TLB inv
5060 *
5061 * "Requires stall bit ([20] of DW1) set."
5062 *
5063 * Also, from the PIPE_CONTROL instruction table:
5064 *
5065 * "Project: SKL+
5066 * Post Sync Operation or CS stall must be set to ensure a TLB
5067 * invalidation occurs. Otherwise no cycle will occur to the TLB
5068 * cache to invalidate."
5069 *
5070 * This is not a subset of the earlier rule, so there's nothing to do.
5071 */
5072 flags |= PIPE_CONTROL_CS_STALL;
5073 }
5074
5075 if (GEN_GEN == 9 && devinfo->gt == 4) {
5076 /* TODO: The big Skylake GT4 post sync op workaround */
5077 }
5078
5079 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
5080
5081 if (IS_COMPUTE_PIPELINE(batch)) {
5082 if (GEN_GEN >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
5083 /* Project: SKL+ / Argument: Tex Invalidate
5084 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
5085 */
5086 flags |= PIPE_CONTROL_CS_STALL;
5087 }
5088
5089 if (GEN_GEN == 8 && (post_sync_flags ||
5090 (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
5091 PIPE_CONTROL_DEPTH_STALL |
5092 PIPE_CONTROL_RENDER_TARGET_FLUSH |
5093 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
5094 PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
5095 /* Project: BDW / Arguments:
5096 *
5097 * - LRI Post Sync Operation [23]
5098 * - Post Sync Op [15:14]
5099 * - Notify En [8]
5100 * - Depth Stall [13]
5101 * - Render Target Cache Flush [12]
5102 * - Depth Cache Flush [0]
5103 * - DC Flush Enable [5]
5104 *
5105 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
5106 * Workloads."
5107 */
5108 flags |= PIPE_CONTROL_CS_STALL;
5109
5110 /* Also, from the PIPE_CONTROL instruction table, bit 20:
5111 *
5112 * "Project: BDW
5113 * This bit must be always set when PIPE_CONTROL command is
5114 * programmed by GPGPU and MEDIA workloads, except for the cases
5115 * when only Read Only Cache Invalidation bits are set (State
5116 * Cache Invalidation Enable, Instruction cache Invalidation
5117 * Enable, Texture Cache Invalidation Enable, Constant Cache
5118 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
5119 * need not implemented when FF_DOP_CG is disable via "Fixed
5120 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
5121 *
5122 * It sounds like we could avoid CS stalls in some cases, but we
5123 * don't currently bother. This list isn't exactly the list above,
5124 * either...
5125 */
5126 }
5127 }
5128
5129 /* "Stall" workarounds ----------------------------------------------
5130 * These have to come after the earlier ones because we may have added
5131 * some additional CS stalls above.
5132 */
5133
5134 if (GEN_GEN < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
5135 /* Project: PRE-SKL, VLV, CHV
5136 *
5137 * "[All Stepping][All SKUs]:
5138 *
5139 * One of the following must also be set:
5140 *
5141 * - Render Target Cache Flush Enable ([12] of DW1)
5142 * - Depth Cache Flush Enable ([0] of DW1)
5143 * - Stall at Pixel Scoreboard ([1] of DW1)
5144 * - Depth Stall ([13] of DW1)
5145 * - Post-Sync Operation ([13] of DW1)
5146 * - DC Flush Enable ([5] of DW1)"
5147 *
5148 * If we don't already have one of those bits set, we choose to add
5149 * "Stall at Pixel Scoreboard". Some of the other bits require a
5150 * CS stall as a workaround (see above), which would send us into
5151 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
5152 * appears to be safe, so we choose that.
5153 */
5154 const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
5155 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
5156 PIPE_CONTROL_WRITE_IMMEDIATE |
5157 PIPE_CONTROL_WRITE_DEPTH_COUNT |
5158 PIPE_CONTROL_WRITE_TIMESTAMP |
5159 PIPE_CONTROL_STALL_AT_SCOREBOARD |
5160 PIPE_CONTROL_DEPTH_STALL |
5161 PIPE_CONTROL_DATA_CACHE_FLUSH;
5162 if (!(flags & wa_bits))
5163 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
5164 }
5165
5166 /* Emit --------------------------------------------------------------- */
5167
5168 iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
5169 pc.LRIPostSyncOperation = NoLRIOperation;
5170 pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
5171 pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
5172 pc.StoreDataIndex = 0;
5173 pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
5174 pc.GlobalSnapshotCountReset =
5175 flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
5176 pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
5177 pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
5178 pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
5179 pc.RenderTargetCacheFlushEnable =
5180 flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
5181 pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
5182 pc.StateCacheInvalidationEnable =
5183 flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
5184 pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
5185 pc.ConstantCacheInvalidationEnable =
5186 flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
5187 pc.PostSyncOperation = flags_to_post_sync_op(flags);
5188 pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
5189 pc.InstructionCacheInvalidateEnable =
5190 flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
5191 pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
5192 pc.IndirectStatePointersDisable =
5193 flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
5194 pc.TextureCacheInvalidationEnable =
5195 flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
5196 pc.Address = rw_bo(bo, offset);
5197 pc.ImmediateData = imm;
5198 }
5199 }
5200
5201 void
5202 genX(init_state)(struct iris_context *ice)
5203 {
5204 struct pipe_context *ctx = &ice->ctx;
5205 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
5206
5207 ctx->create_blend_state = iris_create_blend_state;
5208 ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
5209 ctx->create_rasterizer_state = iris_create_rasterizer_state;
5210 ctx->create_sampler_state = iris_create_sampler_state;
5211 ctx->create_sampler_view = iris_create_sampler_view;
5212 ctx->create_surface = iris_create_surface;
5213 ctx->create_vertex_elements_state = iris_create_vertex_elements;
5214 ctx->bind_blend_state = iris_bind_blend_state;
5215 ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
5216 ctx->bind_sampler_states = iris_bind_sampler_states;
5217 ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
5218 ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
5219 ctx->delete_blend_state = iris_delete_state;
5220 ctx->delete_depth_stencil_alpha_state = iris_delete_state;
5221 ctx->delete_fs_state = iris_delete_state;
5222 ctx->delete_rasterizer_state = iris_delete_state;
5223 ctx->delete_sampler_state = iris_delete_state;
5224 ctx->delete_vertex_elements_state = iris_delete_state;
5225 ctx->delete_tcs_state = iris_delete_state;
5226 ctx->delete_tes_state = iris_delete_state;
5227 ctx->delete_gs_state = iris_delete_state;
5228 ctx->delete_vs_state = iris_delete_state;
5229 ctx->set_blend_color = iris_set_blend_color;
5230 ctx->set_clip_state = iris_set_clip_state;
5231 ctx->set_constant_buffer = iris_set_constant_buffer;
5232 ctx->set_shader_buffers = iris_set_shader_buffers;
5233 ctx->set_shader_images = iris_set_shader_images;
5234 ctx->set_sampler_views = iris_set_sampler_views;
5235 ctx->set_tess_state = iris_set_tess_state;
5236 ctx->set_framebuffer_state = iris_set_framebuffer_state;
5237 ctx->set_polygon_stipple = iris_set_polygon_stipple;
5238 ctx->set_sample_mask = iris_set_sample_mask;
5239 ctx->set_scissor_states = iris_set_scissor_states;
5240 ctx->set_stencil_ref = iris_set_stencil_ref;
5241 ctx->set_vertex_buffers = iris_set_vertex_buffers;
5242 ctx->set_viewport_states = iris_set_viewport_states;
5243 ctx->sampler_view_destroy = iris_sampler_view_destroy;
5244 ctx->surface_destroy = iris_surface_destroy;
5245 ctx->draw_vbo = iris_draw_vbo;
5246 ctx->launch_grid = iris_launch_grid;
5247 ctx->create_stream_output_target = iris_create_stream_output_target;
5248 ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
5249 ctx->set_stream_output_targets = iris_set_stream_output_targets;
5250
5251 ice->vtbl.destroy_state = iris_destroy_state;
5252 ice->vtbl.init_render_context = iris_init_render_context;
5253 ice->vtbl.init_compute_context = iris_init_compute_context;
5254 ice->vtbl.upload_render_state = iris_upload_render_state;
5255 ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
5256 ice->vtbl.upload_compute_state = iris_upload_compute_state;
5257 ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
5258 ice->vtbl.load_register_imm32 = iris_load_register_imm32;
5259 ice->vtbl.load_register_imm64 = iris_load_register_imm64;
5260 ice->vtbl.load_register_mem32 = iris_load_register_mem32;
5261 ice->vtbl.load_register_mem64 = iris_load_register_mem64;
5262 ice->vtbl.store_register_mem32 = iris_store_register_mem32;
5263 ice->vtbl.store_register_mem64 = iris_store_register_mem64;
5264 ice->vtbl.store_data_imm32 = iris_store_data_imm32;
5265 ice->vtbl.store_data_imm64 = iris_store_data_imm64;
5266 ice->vtbl.copy_mem_mem = iris_copy_mem_mem;
5267 ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
5268 ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
5269 ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
5270 ice->vtbl.populate_vs_key = iris_populate_vs_key;
5271 ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
5272 ice->vtbl.populate_tes_key = iris_populate_tes_key;
5273 ice->vtbl.populate_gs_key = iris_populate_gs_key;
5274 ice->vtbl.populate_fs_key = iris_populate_fs_key;
5275 ice->vtbl.populate_cs_key = iris_populate_cs_key;
5276
5277 ice->state.dirty = ~0ull;
5278
5279 ice->state.statistics_counters_enabled = true;
5280
5281 ice->state.sample_mask = 0xffff;
5282 ice->state.num_viewports = 1;
5283 ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
5284
5285 /* Make a 1x1x1 null surface for unbound textures */
5286 void *null_surf_map =
5287 upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
5288 4 * GENX(RENDER_SURFACE_STATE_length), 64);
5289 isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
5290 ice->state.unbound_tex.offset +=
5291 iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
5292
5293 /* Default all scissor rectangles to be empty regions. */
5294 for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
5295 ice->state.scissors[i] = (struct pipe_scissor_state) {
5296 .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
5297 };
5298 }
5299 }