iris: streamout
[mesa.git] / src / gallium / drivers / iris / iris_state.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdio.h>
24 #include <errno.h>
25
26 #if HAVE_VALGRIND
27 #include <valgrind.h>
28 #include <memcheck.h>
29 #define VG(x) x
30 #ifndef NDEBUG
31 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
32 #endif
33 #else
34 #define VG(x)
35 #endif
36
37 #include "pipe/p_defines.h"
38 #include "pipe/p_state.h"
39 #include "pipe/p_context.h"
40 #include "pipe/p_screen.h"
41 #include "util/u_inlines.h"
42 #include "util/u_format.h"
43 #include "util/u_framebuffer.h"
44 #include "util/u_transfer.h"
45 #include "util/u_upload_mgr.h"
46 #include "i915_drm.h"
47 #include "nir.h"
48 #include "intel/compiler/brw_compiler.h"
49 #include "intel/common/gen_l3_config.h"
50 #include "intel/common/gen_sample_positions.h"
51 #include "iris_batch.h"
52 #include "iris_context.h"
53 #include "iris_pipe.h"
54 #include "iris_resource.h"
55
56 #define __gen_address_type struct iris_address
57 #define __gen_user_data struct iris_batch
58
59 #define ARRAY_BYTES(x) (sizeof(uint32_t) * ARRAY_SIZE(x))
60
61 static uint64_t
62 __gen_combine_address(struct iris_batch *batch, void *location,
63 struct iris_address addr, uint32_t delta)
64 {
65 uint64_t result = addr.offset + delta;
66
67 if (addr.bo) {
68 iris_use_pinned_bo(batch, addr.bo, addr.write);
69 /* Assume this is a general address, not relative to a base. */
70 result += addr.bo->gtt_offset;
71 }
72
73 return result;
74 }
75
76 #define __genxml_cmd_length(cmd) cmd ## _length
77 #define __genxml_cmd_length_bias(cmd) cmd ## _length_bias
78 #define __genxml_cmd_header(cmd) cmd ## _header
79 #define __genxml_cmd_pack(cmd) cmd ## _pack
80
81 #define _iris_pack_command(batch, cmd, dst, name) \
82 for (struct cmd name = { __genxml_cmd_header(cmd) }, \
83 *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
84 ({ __genxml_cmd_pack(cmd)(batch, (void *)_dst, &name); \
85 _dst = NULL; \
86 }))
87
88 #define iris_pack_command(cmd, dst, name) \
89 _iris_pack_command(NULL, cmd, dst, name)
90
91 #define iris_pack_state(cmd, dst, name) \
92 for (struct cmd name = {}, \
93 *_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
94 __genxml_cmd_pack(cmd)(NULL, (void *)_dst, &name), \
95 _dst = NULL)
96
97 #define iris_emit_cmd(batch, cmd, name) \
98 _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
99
100 #define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \
101 do { \
102 uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \
103 for (uint32_t i = 0; i < num_dwords; i++) \
104 dw[i] = (dwords0)[i] | (dwords1)[i]; \
105 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \
106 } while (0)
107
108 #include "genxml/genX_pack.h"
109 #include "genxml/gen_macros.h"
110 #include "genxml/genX_bits.h"
111
112 #define MOCS_WB (2 << 1)
113
114 UNUSED static void pipe_asserts()
115 {
116 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
117
118 /* pipe_logicop happens to match the hardware. */
119 PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
120 PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
121 PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
122 PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
123 PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
124 PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
125 PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
126 PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
127 PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
128 PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
129 PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
130 PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
131 PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
132 PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
133 PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
134 PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
135
136 /* pipe_blend_func happens to match the hardware. */
137 PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
138 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
139 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
140 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
141 PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
142 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
143 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
144 PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
145 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
146 PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
147 PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
148 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
149 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
150 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
151 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
152 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
153 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
154 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
155 PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
156
157 /* pipe_blend_func happens to match the hardware. */
158 PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
159 PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
160 PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
161 PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
162 PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
163
164 /* pipe_stencil_op happens to match the hardware. */
165 PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
166 PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
167 PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
168 PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
169 PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
170 PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
171 PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
172 PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
173
174 /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
175 PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
176 PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
177 #undef PIPE_ASSERT
178 }
179
180 static unsigned
181 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
182 {
183 static const unsigned map[] = {
184 [PIPE_PRIM_POINTS] = _3DPRIM_POINTLIST,
185 [PIPE_PRIM_LINES] = _3DPRIM_LINELIST,
186 [PIPE_PRIM_LINE_LOOP] = _3DPRIM_LINELOOP,
187 [PIPE_PRIM_LINE_STRIP] = _3DPRIM_LINESTRIP,
188 [PIPE_PRIM_TRIANGLES] = _3DPRIM_TRILIST,
189 [PIPE_PRIM_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
190 [PIPE_PRIM_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
191 [PIPE_PRIM_QUADS] = _3DPRIM_QUADLIST,
192 [PIPE_PRIM_QUAD_STRIP] = _3DPRIM_QUADSTRIP,
193 [PIPE_PRIM_POLYGON] = _3DPRIM_POLYGON,
194 [PIPE_PRIM_LINES_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
195 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
196 [PIPE_PRIM_TRIANGLES_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
197 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
198 [PIPE_PRIM_PATCHES] = _3DPRIM_PATCHLIST_1 - 1,
199 };
200
201 return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
202 }
203
204 static unsigned
205 translate_compare_func(enum pipe_compare_func pipe_func)
206 {
207 static const unsigned map[] = {
208 [PIPE_FUNC_NEVER] = COMPAREFUNCTION_NEVER,
209 [PIPE_FUNC_LESS] = COMPAREFUNCTION_LESS,
210 [PIPE_FUNC_EQUAL] = COMPAREFUNCTION_EQUAL,
211 [PIPE_FUNC_LEQUAL] = COMPAREFUNCTION_LEQUAL,
212 [PIPE_FUNC_GREATER] = COMPAREFUNCTION_GREATER,
213 [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
214 [PIPE_FUNC_GEQUAL] = COMPAREFUNCTION_GEQUAL,
215 [PIPE_FUNC_ALWAYS] = COMPAREFUNCTION_ALWAYS,
216 };
217 return map[pipe_func];
218 }
219
220 static unsigned
221 translate_shadow_func(enum pipe_compare_func pipe_func)
222 {
223 /* Gallium specifies the result of shadow comparisons as:
224 *
225 * 1 if ref <op> texel,
226 * 0 otherwise.
227 *
228 * The hardware does:
229 *
230 * 0 if texel <op> ref,
231 * 1 otherwise.
232 *
233 * So we need to flip the operator and also negate.
234 */
235 static const unsigned map[] = {
236 [PIPE_FUNC_NEVER] = PREFILTEROPALWAYS,
237 [PIPE_FUNC_LESS] = PREFILTEROPLEQUAL,
238 [PIPE_FUNC_EQUAL] = PREFILTEROPNOTEQUAL,
239 [PIPE_FUNC_LEQUAL] = PREFILTEROPLESS,
240 [PIPE_FUNC_GREATER] = PREFILTEROPGEQUAL,
241 [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
242 [PIPE_FUNC_GEQUAL] = PREFILTEROPGREATER,
243 [PIPE_FUNC_ALWAYS] = PREFILTEROPNEVER,
244 };
245 return map[pipe_func];
246 }
247
248 static unsigned
249 translate_cull_mode(unsigned pipe_face)
250 {
251 static const unsigned map[4] = {
252 [PIPE_FACE_NONE] = CULLMODE_NONE,
253 [PIPE_FACE_FRONT] = CULLMODE_FRONT,
254 [PIPE_FACE_BACK] = CULLMODE_BACK,
255 [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
256 };
257 return map[pipe_face];
258 }
259
260 static unsigned
261 translate_fill_mode(unsigned pipe_polymode)
262 {
263 static const unsigned map[4] = {
264 [PIPE_POLYGON_MODE_FILL] = FILL_MODE_SOLID,
265 [PIPE_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME,
266 [PIPE_POLYGON_MODE_POINT] = FILL_MODE_POINT,
267 [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
268 };
269 return map[pipe_polymode];
270 }
271
272 static struct iris_address
273 ro_bo(struct iris_bo *bo, uint64_t offset)
274 {
275 /* Not for CSOs! */
276 return (struct iris_address) { .bo = bo, .offset = offset };
277 }
278
279 static struct iris_address
280 rw_bo(struct iris_bo *bo, uint64_t offset)
281 {
282 /* Not for CSOs! */
283 return (struct iris_address) { .bo = bo, .offset = offset, .write = true };
284 }
285
286 static void *
287 upload_state(struct u_upload_mgr *uploader,
288 struct iris_state_ref *ref,
289 unsigned size,
290 unsigned alignment)
291 {
292 void *p = NULL;
293 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
294 return p;
295 }
296
297 static uint32_t *
298 stream_state(struct iris_batch *batch,
299 struct u_upload_mgr *uploader,
300 struct pipe_resource **out_res,
301 unsigned size,
302 unsigned alignment,
303 uint32_t *out_offset)
304 {
305 void *ptr = NULL;
306
307 u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
308
309 struct iris_bo *bo = iris_resource_bo(*out_res);
310 iris_use_pinned_bo(batch, bo, false);
311
312 *out_offset += iris_bo_offset_from_base_address(bo);
313
314 return ptr;
315 }
316
317 static uint32_t
318 emit_state(struct iris_batch *batch,
319 struct u_upload_mgr *uploader,
320 struct pipe_resource **out_res,
321 const void *data,
322 unsigned size,
323 unsigned alignment)
324 {
325 unsigned offset = 0;
326 uint32_t *map =
327 stream_state(batch, uploader, out_res, size, alignment, &offset);
328
329 if (map)
330 memcpy(map, data, size);
331
332 return offset;
333 }
334
335 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
336 #define cso_changed_memcmp(x) \
337 (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
338
339 static void
340 iris_init_render_context(struct iris_screen *screen,
341 struct iris_batch *batch,
342 struct iris_vtable *vtbl,
343 struct pipe_debug_callback *dbg)
344 {
345 iris_init_batch(batch, screen, vtbl, dbg, I915_EXEC_RENDER);
346
347 /* XXX: PIPE_CONTROLs */
348
349 iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
350 #if 0
351 // XXX: MOCS is stupid for this.
352 sba.GeneralStateMemoryObjectControlState = MOCS_WB;
353 sba.StatelessDataPortAccessMemoryObjectControlState = MOCS_WB;
354 sba.SurfaceStateMemoryObjectControlState = MOCS_WB;
355 sba.DynamicStateMemoryObjectControlState = MOCS_WB;
356 sba.IndirectObjectMemoryObjectControlState = MOCS_WB;
357 sba.InstructionMemoryObjectControlState = MOCS_WB;
358 sba.BindlessSurfaceStateMemoryObjectControlState = MOCS_WB;
359 #endif
360
361 sba.GeneralStateBaseAddressModifyEnable = true;
362 sba.SurfaceStateBaseAddressModifyEnable = true;
363 sba.DynamicStateBaseAddressModifyEnable = true;
364 sba.IndirectObjectBaseAddressModifyEnable = true;
365 sba.InstructionBaseAddressModifyEnable = true;
366 sba.GeneralStateBufferSizeModifyEnable = true;
367 sba.DynamicStateBufferSizeModifyEnable = true;
368 sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
369 sba.IndirectObjectBufferSizeModifyEnable = true;
370 sba.InstructionBuffersizeModifyEnable = true;
371
372 sba.InstructionBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
373 sba.SurfaceStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_SURFACE_START);
374 sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
375
376 sba.GeneralStateBufferSize = 0xfffff;
377 sba.IndirectObjectBufferSize = 0xfffff;
378 sba.InstructionBufferSize = 0xfffff;
379 sba.DynamicStateBufferSize = 0xfffff;
380 }
381
382 iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
383 rect.ClippedDrawingRectangleXMax = UINT16_MAX;
384 rect.ClippedDrawingRectangleYMax = UINT16_MAX;
385 }
386 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
387 GEN_SAMPLE_POS_1X(pat._1xSample);
388 GEN_SAMPLE_POS_2X(pat._2xSample);
389 GEN_SAMPLE_POS_4X(pat._4xSample);
390 GEN_SAMPLE_POS_8X(pat._8xSample);
391 GEN_SAMPLE_POS_16X(pat._16xSample);
392 }
393 iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
394 iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
395 iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
396 /* XXX: may need to set an offset for origin-UL framebuffers */
397 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
398
399 /* Just assign a static partitioning. */
400 for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
401 iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
402 alloc._3DCommandSubOpcode = 18 + i;
403 alloc.ConstantBufferOffset = 6 * i;
404 alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
405 }
406 }
407 }
408
409 struct iris_viewport_state {
410 uint32_t sf_cl_vp[GENX(SF_CLIP_VIEWPORT_length) * IRIS_MAX_VIEWPORTS];
411 };
412
413 struct iris_vertex_buffer_state {
414 uint32_t vertex_buffers[1 + 33 * GENX(VERTEX_BUFFER_STATE_length)];
415 struct pipe_resource *resources[33];
416 unsigned num_buffers;
417 };
418
419 struct iris_depth_buffer_state {
420 uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
421 GENX(3DSTATE_STENCIL_BUFFER_length) +
422 GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
423 GENX(3DSTATE_CLEAR_PARAMS_length)];
424 };
425
426 /**
427 * State that can't be stored directly in iris_context because the data
428 * layout varies per generation.
429 */
430 struct iris_genx_state {
431 struct iris_viewport_state viewport;
432 struct iris_vertex_buffer_state vertex_buffers;
433 struct iris_depth_buffer_state depth_buffer;
434
435 uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
436 uint32_t streamout[4 * GENX(3DSTATE_STREAMOUT_length)];
437 };
438
439 static void
440 iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *info)
441 {
442 }
443
444 static void
445 iris_set_blend_color(struct pipe_context *ctx,
446 const struct pipe_blend_color *state)
447 {
448 struct iris_context *ice = (struct iris_context *) ctx;
449
450 memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
451 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
452 }
453
454 struct iris_blend_state {
455 /** Partial 3DSTATE_PS_BLEND */
456 uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
457
458 /** Partial BLEND_STATE */
459 uint32_t blend_state[GENX(BLEND_STATE_length) +
460 BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
461
462 bool alpha_to_coverage; /* for shader key */
463 };
464
465 static void *
466 iris_create_blend_state(struct pipe_context *ctx,
467 const struct pipe_blend_state *state)
468 {
469 struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
470 uint32_t *blend_state = cso->blend_state;
471
472 cso->alpha_to_coverage = state->alpha_to_coverage;
473
474 iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
475 /* pb.HasWriteableRT is filled in at draw time. */
476 /* pb.AlphaTestEnable is filled in at draw time. */
477 pb.AlphaToCoverageEnable = state->alpha_to_coverage;
478 pb.IndependentAlphaBlendEnable = state->independent_blend_enable;
479
480 pb.ColorBufferBlendEnable = state->rt[0].blend_enable;
481
482 pb.SourceBlendFactor = state->rt[0].rgb_src_factor;
483 pb.SourceAlphaBlendFactor = state->rt[0].alpha_func;
484 pb.DestinationBlendFactor = state->rt[0].rgb_dst_factor;
485 pb.DestinationAlphaBlendFactor = state->rt[0].alpha_dst_factor;
486 }
487
488 iris_pack_state(GENX(BLEND_STATE), blend_state, bs) {
489 bs.AlphaToCoverageEnable = state->alpha_to_coverage;
490 bs.IndependentAlphaBlendEnable = state->independent_blend_enable;
491 bs.AlphaToOneEnable = state->alpha_to_one;
492 bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
493 bs.ColorDitherEnable = state->dither;
494 /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
495 }
496
497 blend_state += GENX(BLEND_STATE_length);
498
499 for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
500 iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_state, be) {
501 be.LogicOpEnable = state->logicop_enable;
502 be.LogicOpFunction = state->logicop_func;
503
504 be.PreBlendSourceOnlyClampEnable = false;
505 be.ColorClampRange = COLORCLAMP_RTFORMAT;
506 be.PreBlendColorClampEnable = true;
507 be.PostBlendColorClampEnable = true;
508
509 be.ColorBufferBlendEnable = state->rt[i].blend_enable;
510
511 be.ColorBlendFunction = state->rt[i].rgb_func;
512 be.AlphaBlendFunction = state->rt[i].alpha_func;
513 be.SourceBlendFactor = state->rt[i].rgb_src_factor;
514 be.SourceAlphaBlendFactor = state->rt[i].alpha_func;
515 be.DestinationBlendFactor = state->rt[i].rgb_dst_factor;
516 be.DestinationAlphaBlendFactor = state->rt[i].alpha_dst_factor;
517
518 be.WriteDisableRed = !(state->rt[i].colormask & PIPE_MASK_R);
519 be.WriteDisableGreen = !(state->rt[i].colormask & PIPE_MASK_G);
520 be.WriteDisableBlue = !(state->rt[i].colormask & PIPE_MASK_B);
521 be.WriteDisableAlpha = !(state->rt[i].colormask & PIPE_MASK_A);
522 }
523 blend_state += GENX(BLEND_STATE_ENTRY_length);
524 }
525
526 return cso;
527 }
528
529 static void
530 iris_bind_blend_state(struct pipe_context *ctx, void *state)
531 {
532 struct iris_context *ice = (struct iris_context *) ctx;
533 ice->state.cso_blend = state;
534 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
535 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
536 }
537
538 struct iris_depth_stencil_alpha_state {
539 /** Partial 3DSTATE_WM_DEPTH_STENCIL */
540 uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
541
542 /** Complete CC_VIEWPORT */
543 uint32_t cc_vp[GENX(CC_VIEWPORT_length)];
544
545 /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE */
546 struct pipe_alpha_state alpha;
547 };
548
549 static void *
550 iris_create_zsa_state(struct pipe_context *ctx,
551 const struct pipe_depth_stencil_alpha_state *state)
552 {
553 struct iris_depth_stencil_alpha_state *cso =
554 malloc(sizeof(struct iris_depth_stencil_alpha_state));
555
556 cso->alpha = state->alpha;
557
558 bool two_sided_stencil = state->stencil[1].enabled;
559
560 /* The state tracker needs to optimize away EQUAL writes for us. */
561 assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
562
563 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
564 wmds.StencilFailOp = state->stencil[0].fail_op;
565 wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
566 wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
567 wmds.StencilTestFunction =
568 translate_compare_func(state->stencil[0].func);
569 wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
570 wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
571 wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
572 wmds.BackfaceStencilTestFunction =
573 translate_compare_func(state->stencil[1].func);
574 wmds.DepthTestFunction = translate_compare_func(state->depth.func);
575 wmds.DoubleSidedStencilEnable = two_sided_stencil;
576 wmds.StencilTestEnable = state->stencil[0].enabled;
577 wmds.StencilBufferWriteEnable =
578 state->stencil[0].writemask != 0 ||
579 (two_sided_stencil && state->stencil[1].writemask != 0);
580 wmds.DepthTestEnable = state->depth.enabled;
581 wmds.DepthBufferWriteEnable = state->depth.writemask;
582 wmds.StencilTestMask = state->stencil[0].valuemask;
583 wmds.StencilWriteMask = state->stencil[0].writemask;
584 wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
585 wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
586 /* wmds.[Backface]StencilReferenceValue are merged later */
587 }
588
589 iris_pack_state(GENX(CC_VIEWPORT), cso->cc_vp, ccvp) {
590 if (state->depth.bounds_test) {
591 ccvp.MinimumDepth = state->depth.bounds_min;
592 ccvp.MaximumDepth = state->depth.bounds_max;
593 } else {
594 ccvp.MinimumDepth = 0.0;
595 ccvp.MaximumDepth = 1.0;
596 }
597 }
598
599 return cso;
600 }
601
602 static void
603 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
604 {
605 struct iris_context *ice = (struct iris_context *) ctx;
606 struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
607 struct iris_depth_stencil_alpha_state *new_cso = state;
608
609 if (new_cso) {
610 if (cso_changed(alpha.ref_value))
611 ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
612
613 if (cso_changed(alpha.enabled))
614 ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
615 }
616
617 ice->state.cso_zsa = new_cso;
618 ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
619 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
620 }
621
622 struct iris_rasterizer_state {
623 uint32_t sf[GENX(3DSTATE_SF_length)];
624 uint32_t clip[GENX(3DSTATE_CLIP_length)];
625 uint32_t raster[GENX(3DSTATE_RASTER_length)];
626 uint32_t wm[GENX(3DSTATE_WM_length)];
627 uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
628
629 bool flatshade; /* for shader state */
630 bool flatshade_first; /* for stream output */
631 bool clamp_fragment_color; /* for shader state */
632 bool light_twoside; /* for shader state */
633 bool rasterizer_discard; /* for 3DSTATE_STREAMOUT */
634 bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
635 bool line_stipple_enable;
636 bool poly_stipple_enable;
637 enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
638 uint16_t sprite_coord_enable;
639 };
640
641 static void *
642 iris_create_rasterizer_state(struct pipe_context *ctx,
643 const struct pipe_rasterizer_state *state)
644 {
645 struct iris_rasterizer_state *cso =
646 malloc(sizeof(struct iris_rasterizer_state));
647
648 #if 0
649 point_quad_rasterization -> SBE?
650
651 not necessary?
652 {
653 poly_smooth
654 force_persample_interp - ?
655 bottom_edge_rule
656
657 offset_units_unscaled - cap not exposed
658 }
659 #endif
660
661 cso->flatshade = state->flatshade;
662 cso->flatshade_first = state->flatshade_first;
663 cso->clamp_fragment_color = state->clamp_fragment_color;
664 cso->light_twoside = state->light_twoside;
665 cso->rasterizer_discard = state->rasterizer_discard;
666 cso->half_pixel_center = state->half_pixel_center;
667 cso->sprite_coord_mode = state->sprite_coord_mode;
668 cso->sprite_coord_enable = state->sprite_coord_enable;
669 cso->line_stipple_enable = state->line_stipple_enable;
670 cso->poly_stipple_enable = state->poly_stipple_enable;
671
672 iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
673 sf.StatisticsEnable = true;
674 sf.ViewportTransformEnable = true;
675 sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
676 sf.LineEndCapAntialiasingRegionWidth =
677 state->line_smooth ? _10pixels : _05pixels;
678 sf.LastPixelEnable = state->line_last_pixel;
679 sf.LineWidth = state->line_width;
680 sf.SmoothPointEnable = state->point_smooth;
681 sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
682 sf.PointWidth = state->point_size;
683
684 if (state->flatshade_first) {
685 sf.TriangleFanProvokingVertexSelect = 1;
686 } else {
687 sf.TriangleStripListProvokingVertexSelect = 2;
688 sf.TriangleFanProvokingVertexSelect = 2;
689 sf.LineStripListProvokingVertexSelect = 1;
690 }
691 }
692
693 iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
694 rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
695 rr.CullMode = translate_cull_mode(state->cull_face);
696 rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
697 rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
698 rr.DXMultisampleRasterizationEnable = state->multisample;
699 rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
700 rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
701 rr.GlobalDepthOffsetEnablePoint = state->offset_point;
702 rr.GlobalDepthOffsetConstant = state->offset_units * 2;
703 rr.GlobalDepthOffsetScale = state->offset_scale;
704 rr.GlobalDepthOffsetClamp = state->offset_clamp;
705 rr.SmoothPointEnable = state->point_smooth;
706 rr.AntialiasingEnable = state->line_smooth;
707 rr.ScissorRectangleEnable = state->scissor;
708 rr.ViewportZNearClipTestEnable = state->depth_clip_near;
709 rr.ViewportZFarClipTestEnable = state->depth_clip_far;
710 //rr.ConservativeRasterizationEnable = not yet supported by Gallium...
711 }
712
713 iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
714 /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
715 * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
716 */
717 cl.StatisticsEnable = true;
718 cl.EarlyCullEnable = true;
719 cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
720 cl.ForceUserClipDistanceClipTestEnableBitmask = true;
721 cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
722 cl.GuardbandClipTestEnable = true;
723 cl.ClipMode = CLIPMODE_NORMAL;
724 cl.ClipEnable = true;
725 cl.ViewportXYClipTestEnable = state->point_tri_clip;
726 cl.MinimumPointWidth = 0.125;
727 cl.MaximumPointWidth = 255.875;
728
729 if (state->flatshade_first) {
730 cl.TriangleFanProvokingVertexSelect = 1;
731 } else {
732 cl.TriangleStripListProvokingVertexSelect = 2;
733 cl.TriangleFanProvokingVertexSelect = 2;
734 cl.LineStripListProvokingVertexSelect = 1;
735 }
736 }
737
738 iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
739 /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
740 * filled in at draw time from the FS program.
741 */
742 wm.LineAntialiasingRegionWidth = _10pixels;
743 wm.LineEndCapAntialiasingRegionWidth = _05pixels;
744 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
745 wm.StatisticsEnable = true;
746 wm.LineStippleEnable = state->line_stipple_enable;
747 wm.PolygonStippleEnable = state->poly_stipple_enable;
748 }
749
750 /* Remap from 0..255 back to 1..256 */
751 const unsigned line_stipple_factor = state->line_stipple_factor + 1;
752
753 iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
754 line.LineStipplePattern = state->line_stipple_pattern;
755 line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
756 line.LineStippleRepeatCount = line_stipple_factor;
757 }
758
759 return cso;
760 }
761
762 static void
763 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
764 {
765 struct iris_context *ice = (struct iris_context *) ctx;
766 struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
767 struct iris_rasterizer_state *new_cso = state;
768
769 if (new_cso) {
770 /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
771 if (cso_changed_memcmp(line_stipple))
772 ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
773
774 if (cso_changed(half_pixel_center))
775 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
776
777 if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
778 ice->state.dirty |= IRIS_DIRTY_WM;
779
780 if (cso_changed(rasterizer_discard) || cso_changed(flatshade_first))
781 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
782 }
783
784 ice->state.cso_rast = new_cso;
785 ice->state.dirty |= IRIS_DIRTY_RASTER;
786 ice->state.dirty |= IRIS_DIRTY_CLIP;
787 }
788
789 static uint32_t
790 translate_wrap(unsigned pipe_wrap)
791 {
792 static const unsigned map[] = {
793 [PIPE_TEX_WRAP_REPEAT] = TCM_WRAP,
794 [PIPE_TEX_WRAP_CLAMP] = TCM_HALF_BORDER,
795 [PIPE_TEX_WRAP_CLAMP_TO_EDGE] = TCM_CLAMP,
796 [PIPE_TEX_WRAP_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
797 [PIPE_TEX_WRAP_MIRROR_REPEAT] = TCM_MIRROR,
798 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
799
800 /* These are unsupported. */
801 [PIPE_TEX_WRAP_MIRROR_CLAMP] = -1,
802 [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
803 };
804 return map[pipe_wrap];
805 }
806
807 /**
808 * Return true if the given wrap mode requires the border color to exist.
809 */
810 static bool
811 wrap_mode_needs_border_color(unsigned wrap_mode)
812 {
813 return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
814 }
815
816 static unsigned
817 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
818 {
819 static const unsigned map[] = {
820 [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
821 [PIPE_TEX_MIPFILTER_LINEAR] = MIPFILTER_LINEAR,
822 [PIPE_TEX_MIPFILTER_NONE] = MIPFILTER_NONE,
823 };
824 return map[pipe_mip];
825 }
826
827 struct iris_sampler_state {
828 struct pipe_sampler_state base;
829
830 bool needs_border_color;
831
832 uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
833 };
834
835 static void *
836 iris_create_sampler_state(struct pipe_context *ctx,
837 const struct pipe_sampler_state *state)
838 {
839 struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
840
841 if (!cso)
842 return NULL;
843
844 memcpy(&cso->base, state, sizeof(*state));
845
846 STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
847 STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
848
849 unsigned wrap_s = translate_wrap(state->wrap_s);
850 unsigned wrap_t = translate_wrap(state->wrap_t);
851 unsigned wrap_r = translate_wrap(state->wrap_r);
852
853 cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
854 wrap_mode_needs_border_color(wrap_t) ||
855 wrap_mode_needs_border_color(wrap_r);
856
857 iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
858 samp.TCXAddressControlMode = wrap_s;
859 samp.TCYAddressControlMode = wrap_t;
860 samp.TCZAddressControlMode = wrap_r;
861 samp.CubeSurfaceControlMode = state->seamless_cube_map;
862 samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
863 samp.MinModeFilter = state->min_img_filter;
864 samp.MagModeFilter = state->mag_img_filter;
865 samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
866 samp.MaximumAnisotropy = RATIO21;
867
868 if (state->max_anisotropy >= 2) {
869 if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
870 samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
871 samp.AnisotropicAlgorithm = EWAApproximation;
872 }
873
874 if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
875 samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
876
877 samp.MaximumAnisotropy =
878 MIN2((state->max_anisotropy - 2) / 2, RATIO161);
879 }
880
881 /* Set address rounding bits if not using nearest filtering. */
882 if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
883 samp.UAddressMinFilterRoundingEnable = true;
884 samp.VAddressMinFilterRoundingEnable = true;
885 samp.RAddressMinFilterRoundingEnable = true;
886 }
887
888 if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
889 samp.UAddressMagFilterRoundingEnable = true;
890 samp.VAddressMagFilterRoundingEnable = true;
891 samp.RAddressMagFilterRoundingEnable = true;
892 }
893
894 if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
895 samp.ShadowFunction = translate_shadow_func(state->compare_func);
896
897 const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
898
899 samp.LODPreClampMode = CLAMP_MODE_OGL;
900 samp.MinLOD = CLAMP(state->min_lod, 0, hw_max_lod);
901 samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
902 samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
903
904 /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
905 }
906
907 return cso;
908 }
909
910 static void
911 iris_bind_sampler_states(struct pipe_context *ctx,
912 enum pipe_shader_type p_stage,
913 unsigned start, unsigned count,
914 void **states)
915 {
916 struct iris_context *ice = (struct iris_context *) ctx;
917 gl_shader_stage stage = stage_from_pipe(p_stage);
918
919 assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
920 ice->state.num_samplers[stage] =
921 MAX2(ice->state.num_samplers[stage], start + count);
922
923 for (int i = 0; i < count; i++) {
924 ice->state.samplers[stage][start + i] = states[i];
925 }
926
927 /* Assemble the SAMPLER_STATEs into a contiguous table that lives
928 * in the dynamic state memory zone, so we can point to it via the
929 * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
930 */
931 void *map = upload_state(ice->state.dynamic_uploader,
932 &ice->state.sampler_table[stage],
933 count * 4 * GENX(SAMPLER_STATE_length), 32);
934 if (unlikely(!map))
935 return;
936
937 struct pipe_resource *res = ice->state.sampler_table[stage].res;
938 ice->state.sampler_table[stage].offset +=
939 iris_bo_offset_from_base_address(iris_resource_bo(res));
940
941 /* Make sure all land in the same BO */
942 iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
943
944 for (int i = 0; i < count; i++) {
945 struct iris_sampler_state *state = ice->state.samplers[stage][i];
946
947 /* Save a pointer to the iris_sampler_state, a few fields need
948 * to inform draw-time decisions.
949 */
950 ice->state.samplers[stage][start + i] = state;
951
952 if (!state) {
953 memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
954 } else if (!state->needs_border_color) {
955 memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
956 } else {
957 ice->state.need_border_colors = true;
958
959 /* Stream out the border color and merge the pointer. */
960 uint32_t offset =
961 iris_upload_border_color(ice, &state->base.border_color);
962
963 uint32_t dynamic[GENX(SAMPLER_STATE_length)];
964 iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
965 dyns.BorderColorPointer = offset;
966 }
967
968 for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
969 ((uint32_t *) map)[j] = state->sampler_state[j] | dynamic[j];
970 }
971
972 map += GENX(SAMPLER_STATE_length);
973 }
974
975 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
976 }
977
978 struct iris_sampler_view {
979 struct pipe_sampler_view pipe;
980 struct isl_view view;
981
982 /** The resource (BO) holding our SURFACE_STATE. */
983 struct iris_state_ref surface_state;
984 };
985
986 /**
987 * Convert an swizzle enumeration (i.e. PIPE_SWIZZLE_X) to one of the Gen7.5+
988 * "Shader Channel Select" enumerations (i.e. HSW_SCS_RED). The mappings are
989 *
990 * SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_ZERO, SWIZZLE_ONE
991 * 0 1 2 3 4 5
992 * 4 5 6 7 0 1
993 * SCS_RED, SCS_GREEN, SCS_BLUE, SCS_ALPHA, SCS_ZERO, SCS_ONE
994 *
995 * which is simply adding 4 then modding by 8 (or anding with 7).
996 *
997 * We then may need to apply workarounds for textureGather hardware bugs.
998 */
999 static enum isl_channel_select
1000 pipe_swizzle_to_isl_channel(enum pipe_swizzle swizzle)
1001 {
1002 return (swizzle + 4) & 7;
1003 }
1004
1005 static struct pipe_sampler_view *
1006 iris_create_sampler_view(struct pipe_context *ctx,
1007 struct pipe_resource *tex,
1008 const struct pipe_sampler_view *tmpl)
1009 {
1010 struct iris_context *ice = (struct iris_context *) ctx;
1011 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1012 struct iris_resource *itex = (struct iris_resource *) tex;
1013 struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
1014
1015 if (!isv)
1016 return NULL;
1017
1018 /* initialize base object */
1019 isv->pipe = *tmpl;
1020 isv->pipe.context = ctx;
1021 isv->pipe.texture = NULL;
1022 pipe_reference_init(&isv->pipe.reference, 1);
1023 pipe_resource_reference(&isv->pipe.texture, tex);
1024
1025 /* XXX: do we need brw_get_texture_swizzle hacks here? */
1026
1027 isv->view = (struct isl_view) {
1028 .format = iris_isl_format_for_pipe_format(tmpl->format),
1029 .base_level = tmpl->u.tex.first_level,
1030 .levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1,
1031 .base_array_layer = tmpl->u.tex.first_layer,
1032 .array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1,
1033 .swizzle = (struct isl_swizzle) {
1034 .r = pipe_swizzle_to_isl_channel(tmpl->swizzle_r),
1035 .g = pipe_swizzle_to_isl_channel(tmpl->swizzle_g),
1036 .b = pipe_swizzle_to_isl_channel(tmpl->swizzle_b),
1037 .a = pipe_swizzle_to_isl_channel(tmpl->swizzle_a),
1038 },
1039 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
1040 (itex->surf.usage & ISL_SURF_USAGE_CUBE_BIT),
1041 };
1042
1043 void *map = upload_state(ice->state.surface_uploader, &isv->surface_state,
1044 4 * GENX(RENDER_SURFACE_STATE_length), 64);
1045 if (!unlikely(map))
1046 return NULL;
1047
1048 struct iris_bo *state_bo = iris_resource_bo(isv->surface_state.res);
1049 isv->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
1050
1051 isl_surf_fill_state(&screen->isl_dev, map,
1052 .surf = &itex->surf, .view = &isv->view,
1053 .mocs = MOCS_WB,
1054 .address = itex->bo->gtt_offset);
1055 // .aux_surf =
1056 // .clear_color = clear_color,
1057
1058 return &isv->pipe;
1059 }
1060
1061 static struct pipe_surface *
1062 iris_create_surface(struct pipe_context *ctx,
1063 struct pipe_resource *tex,
1064 const struct pipe_surface *tmpl)
1065 {
1066 struct iris_context *ice = (struct iris_context *) ctx;
1067 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1068 struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
1069 struct pipe_surface *psurf = &surf->pipe;
1070 struct iris_resource *res = (struct iris_resource *) tex;
1071
1072 if (!surf)
1073 return NULL;
1074
1075 pipe_reference_init(&psurf->reference, 1);
1076 pipe_resource_reference(&psurf->texture, tex);
1077 psurf->context = ctx;
1078 psurf->format = tmpl->format;
1079 psurf->width = tex->width0;
1080 psurf->height = tex->height0;
1081 psurf->texture = tex;
1082 psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
1083 psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
1084 psurf->u.tex.level = tmpl->u.tex.level;
1085
1086 unsigned usage = 0;
1087 if (tmpl->writable)
1088 usage = ISL_SURF_USAGE_STORAGE_BIT;
1089 else if (util_format_is_depth_or_stencil(tmpl->format))
1090 usage = ISL_SURF_USAGE_DEPTH_BIT;
1091 else
1092 usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
1093
1094 surf->view = (struct isl_view) {
1095 .format = iris_isl_format_for_pipe_format(tmpl->format),
1096 .base_level = tmpl->u.tex.level,
1097 .levels = 1,
1098 .base_array_layer = tmpl->u.tex.first_layer,
1099 .array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1,
1100 .swizzle = ISL_SWIZZLE_IDENTITY,
1101 .usage = usage,
1102 };
1103
1104 /* Bail early for depth/stencil */
1105 if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
1106 ISL_SURF_USAGE_STENCIL_BIT))
1107 return psurf;
1108
1109
1110 void *map = upload_state(ice->state.surface_uploader, &surf->surface_state,
1111 4 * GENX(RENDER_SURFACE_STATE_length), 64);
1112 if (!unlikely(map))
1113 return NULL;
1114
1115 struct iris_bo *state_bo = iris_resource_bo(surf->surface_state.res);
1116 surf->surface_state.offset += iris_bo_offset_from_base_address(state_bo);
1117
1118 isl_surf_fill_state(&screen->isl_dev, map,
1119 .surf = &res->surf, .view = &surf->view,
1120 .mocs = MOCS_WB,
1121 .address = res->bo->gtt_offset);
1122 // .aux_surf =
1123 // .clear_color = clear_color,
1124
1125 return psurf;
1126 }
1127
1128 static void
1129 iris_set_sampler_views(struct pipe_context *ctx,
1130 enum pipe_shader_type p_stage,
1131 unsigned start, unsigned count,
1132 struct pipe_sampler_view **views)
1133 {
1134 struct iris_context *ice = (struct iris_context *) ctx;
1135 gl_shader_stage stage = stage_from_pipe(p_stage);
1136
1137 unsigned i;
1138 for (i = 0; i < count; i++) {
1139 pipe_sampler_view_reference((struct pipe_sampler_view **)
1140 &ice->state.textures[stage][i], views[i]);
1141 }
1142 for (; i < ice->state.num_textures[stage]; i++) {
1143 pipe_sampler_view_reference((struct pipe_sampler_view **)
1144 &ice->state.textures[stage][i], NULL);
1145 }
1146
1147 ice->state.num_textures[stage] = count;
1148
1149 ice->state.dirty |= (IRIS_DIRTY_BINDINGS_VS << stage);
1150 }
1151
1152 static void
1153 iris_set_clip_state(struct pipe_context *ctx,
1154 const struct pipe_clip_state *state)
1155 {
1156 }
1157
1158 static void
1159 iris_set_polygon_stipple(struct pipe_context *ctx,
1160 const struct pipe_poly_stipple *state)
1161 {
1162 struct iris_context *ice = (struct iris_context *) ctx;
1163 memcpy(&ice->state.poly_stipple, state, sizeof(*state));
1164 ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
1165 }
1166
1167 static void
1168 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
1169 {
1170 struct iris_context *ice = (struct iris_context *) ctx;
1171
1172 ice->state.sample_mask = sample_mask;
1173 ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
1174 }
1175
1176 static void
1177 iris_set_scissor_states(struct pipe_context *ctx,
1178 unsigned start_slot,
1179 unsigned num_scissors,
1180 const struct pipe_scissor_state *states)
1181 {
1182 struct iris_context *ice = (struct iris_context *) ctx;
1183
1184 for (unsigned i = 0; i < num_scissors; i++) {
1185 ice->state.scissors[start_slot + i] = states[i];
1186 }
1187
1188 ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
1189 }
1190
1191 static void
1192 iris_set_stencil_ref(struct pipe_context *ctx,
1193 const struct pipe_stencil_ref *state)
1194 {
1195 struct iris_context *ice = (struct iris_context *) ctx;
1196 memcpy(&ice->state.stencil_ref, state, sizeof(*state));
1197 ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1198 }
1199
1200 static float
1201 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
1202 {
1203 return copysignf(state->scale[axis], sign) + state->translate[axis];
1204 }
1205
1206 #if 0
1207 static void
1208 calculate_guardband_size(uint32_t fb_width, uint32_t fb_height,
1209 float m00, float m11, float m30, float m31,
1210 float *xmin, float *xmax,
1211 float *ymin, float *ymax)
1212 {
1213 /* According to the "Vertex X,Y Clamping and Quantization" section of the
1214 * Strips and Fans documentation:
1215 *
1216 * "The vertex X and Y screen-space coordinates are also /clamped/ to the
1217 * fixed-point "guardband" range supported by the rasterization hardware"
1218 *
1219 * and
1220 *
1221 * "In almost all circumstances, if an object’s vertices are actually
1222 * modified by this clamping (i.e., had X or Y coordinates outside of
1223 * the guardband extent the rendered object will not match the intended
1224 * result. Therefore software should take steps to ensure that this does
1225 * not happen - e.g., by clipping objects such that they do not exceed
1226 * these limits after the Drawing Rectangle is applied."
1227 *
1228 * I believe the fundamental restriction is that the rasterizer (in
1229 * the SF/WM stages) have a limit on the number of pixels that can be
1230 * rasterized. We need to ensure any coordinates beyond the rasterizer
1231 * limit are handled by the clipper. So effectively that limit becomes
1232 * the clipper's guardband size.
1233 *
1234 * It goes on to say:
1235 *
1236 * "In addition, in order to be correctly rendered, objects must have a
1237 * screenspace bounding box not exceeding 8K in the X or Y direction.
1238 * This additional restriction must also be comprehended by software,
1239 * i.e., enforced by use of clipping."
1240 *
1241 * This makes no sense. Gen7+ hardware supports 16K render targets,
1242 * and you definitely need to be able to draw polygons that fill the
1243 * surface. Our assumption is that the rasterizer was limited to 8K
1244 * on Sandybridge, which only supports 8K surfaces, and it was actually
1245 * increased to 16K on Ivybridge and later.
1246 *
1247 * So, limit the guardband to 16K on Gen7+ and 8K on Sandybridge.
1248 */
1249 const float gb_size = GEN_GEN >= 7 ? 16384.0f : 8192.0f;
1250
1251 if (m00 != 0 && m11 != 0) {
1252 /* First, we compute the screen-space render area */
1253 const float ss_ra_xmin = MIN3( 0, m30 + m00, m30 - m00);
1254 const float ss_ra_xmax = MAX3( fb_width, m30 + m00, m30 - m00);
1255 const float ss_ra_ymin = MIN3( 0, m31 + m11, m31 - m11);
1256 const float ss_ra_ymax = MAX3(fb_height, m31 + m11, m31 - m11);
1257
1258 /* We want the guardband to be centered on that */
1259 const float ss_gb_xmin = (ss_ra_xmin + ss_ra_xmax) / 2 - gb_size;
1260 const float ss_gb_xmax = (ss_ra_xmin + ss_ra_xmax) / 2 + gb_size;
1261 const float ss_gb_ymin = (ss_ra_ymin + ss_ra_ymax) / 2 - gb_size;
1262 const float ss_gb_ymax = (ss_ra_ymin + ss_ra_ymax) / 2 + gb_size;
1263
1264 /* Now we need it in native device coordinates */
1265 const float ndc_gb_xmin = (ss_gb_xmin - m30) / m00;
1266 const float ndc_gb_xmax = (ss_gb_xmax - m30) / m00;
1267 const float ndc_gb_ymin = (ss_gb_ymin - m31) / m11;
1268 const float ndc_gb_ymax = (ss_gb_ymax - m31) / m11;
1269
1270 /* Thanks to Y-flipping and ORIGIN_UPPER_LEFT, the Y coordinates may be
1271 * flipped upside-down. X should be fine though.
1272 */
1273 assert(ndc_gb_xmin <= ndc_gb_xmax);
1274 *xmin = ndc_gb_xmin;
1275 *xmax = ndc_gb_xmax;
1276 *ymin = MIN2(ndc_gb_ymin, ndc_gb_ymax);
1277 *ymax = MAX2(ndc_gb_ymin, ndc_gb_ymax);
1278 } else {
1279 /* The viewport scales to 0, so nothing will be rendered. */
1280 *xmin = 0.0f;
1281 *xmax = 0.0f;
1282 *ymin = 0.0f;
1283 *ymax = 0.0f;
1284 }
1285 }
1286 #endif
1287
1288 static void
1289 iris_set_viewport_states(struct pipe_context *ctx,
1290 unsigned start_slot,
1291 unsigned count,
1292 const struct pipe_viewport_state *states)
1293 {
1294 struct iris_context *ice = (struct iris_context *) ctx;
1295 struct iris_viewport_state *cso = &ice->state.genx->viewport;
1296 uint32_t *vp_map = &cso->sf_cl_vp[start_slot];
1297
1298 // XXX: sf_cl_vp is only big enough for one slot, we don't iterate right
1299 for (unsigned i = 0; i < count; i++) {
1300 const struct pipe_viewport_state *state = &states[start_slot + i];
1301 iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
1302 vp.ViewportMatrixElementm00 = state->scale[0];
1303 vp.ViewportMatrixElementm11 = state->scale[1];
1304 vp.ViewportMatrixElementm22 = state->scale[2];
1305 vp.ViewportMatrixElementm30 = state->translate[0];
1306 vp.ViewportMatrixElementm31 = state->translate[1];
1307 vp.ViewportMatrixElementm32 = state->translate[2];
1308 /* XXX: in i965 this is computed based on the drawbuffer size,
1309 * but we don't have that here...
1310 */
1311 vp.XMinClipGuardband = -1.0;
1312 vp.XMaxClipGuardband = 1.0;
1313 vp.YMinClipGuardband = -1.0;
1314 vp.YMaxClipGuardband = 1.0;
1315 vp.XMinViewPort = viewport_extent(state, 0, -1.0f);
1316 vp.XMaxViewPort = viewport_extent(state, 0, 1.0f) - 1;
1317 vp.YMinViewPort = viewport_extent(state, 1, -1.0f);
1318 vp.YMaxViewPort = viewport_extent(state, 1, 1.0f) - 1;
1319 }
1320
1321 vp_map += GENX(SF_CLIP_VIEWPORT_length);
1322 }
1323
1324 ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
1325 }
1326
1327 static void
1328 iris_set_framebuffer_state(struct pipe_context *ctx,
1329 const struct pipe_framebuffer_state *state)
1330 {
1331 struct iris_context *ice = (struct iris_context *) ctx;
1332 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1333 struct isl_device *isl_dev = &screen->isl_dev;
1334 struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
1335
1336 if (cso->samples != state->samples) {
1337 ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1338 }
1339
1340 if (cso->nr_cbufs != state->nr_cbufs) {
1341 ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1342 }
1343
1344 if ((cso->layers == 0) != (state->layers == 0)) {
1345 ice->state.dirty |= IRIS_DIRTY_CLIP;
1346 }
1347
1348 util_copy_framebuffer_state(cso, state);
1349
1350 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
1351
1352 struct isl_view view = {
1353 .base_level = 0,
1354 .levels = 1,
1355 .base_array_layer = 0,
1356 .array_len = 1,
1357 .swizzle = ISL_SWIZZLE_IDENTITY,
1358 };
1359
1360 struct isl_depth_stencil_hiz_emit_info info = {
1361 .view = &view,
1362 .mocs = MOCS_WB,
1363 };
1364
1365 struct iris_resource *zres =
1366 (void *) (cso->zsbuf ? cso->zsbuf->texture : NULL);
1367
1368 if (zres) {
1369 view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
1370
1371 info.depth_surf = &zres->surf;
1372 info.depth_address = zres->bo->gtt_offset;
1373
1374 view.format = zres->surf.format;
1375
1376 view.base_level = cso->zsbuf->u.tex.level;
1377 view.base_array_layer = cso->zsbuf->u.tex.first_layer;
1378 view.array_len =
1379 cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
1380
1381 info.hiz_usage = ISL_AUX_USAGE_NONE;
1382 }
1383
1384 #if 0
1385 if (stencil_mt) {
1386 view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
1387 info.stencil_surf = &stencil_mt->surf;
1388
1389 if (!depth_mt) {
1390 view.base_level = stencil_irb->mt_level - stencil_irb->mt->first_level;
1391 view.base_array_layer = stencil_irb->mt_layer;
1392 view.array_len = MAX2(stencil_irb->layer_count, 1);
1393 view.format = stencil_mt->surf.format;
1394 }
1395
1396 uint32_t stencil_offset = 0;
1397 info.stencil_address = stencil_mt->bo->gtt_offset + stencil_mt->offset;
1398 }
1399 #endif
1400
1401 isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
1402
1403 ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
1404
1405 /* Render target change */
1406 ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
1407 }
1408
1409 static void
1410 iris_set_constant_buffer(struct pipe_context *ctx,
1411 enum pipe_shader_type p_stage, unsigned index,
1412 const struct pipe_constant_buffer *input)
1413 {
1414 struct iris_context *ice = (struct iris_context *) ctx;
1415 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1416 gl_shader_stage stage = stage_from_pipe(p_stage);
1417 struct iris_shader_state *shs = &ice->shaders.state[stage];
1418 struct iris_const_buffer *cbuf = &shs->constbuf[index];
1419
1420 if (input && (input->buffer || input->user_buffer)) {
1421 if (input->user_buffer) {
1422 u_upload_data(ctx->const_uploader, 0, input->buffer_size, 32,
1423 input->user_buffer, &cbuf->data.offset,
1424 &cbuf->data.res);
1425 } else {
1426 pipe_resource_reference(&cbuf->data.res, input->buffer);
1427 }
1428
1429 // XXX: these are not retained forever, use a separate uploader?
1430 void *map =
1431 upload_state(ice->state.surface_uploader, &cbuf->surface_state,
1432 4 * GENX(RENDER_SURFACE_STATE_length), 64);
1433 if (!unlikely(map)) {
1434 pipe_resource_reference(&cbuf->data.res, NULL);
1435 return;
1436 }
1437
1438 struct iris_resource *res = (void *) cbuf->data.res;
1439 struct iris_bo *surf_bo = iris_resource_bo(cbuf->surface_state.res);
1440 cbuf->surface_state.offset += iris_bo_offset_from_base_address(surf_bo);
1441
1442 isl_buffer_fill_state(&screen->isl_dev, map,
1443 .address = res->bo->gtt_offset + cbuf->data.offset,
1444 .size_B = input->buffer_size,
1445 .format = ISL_FORMAT_R32G32B32A32_FLOAT,
1446 .stride_B = 1,
1447 .mocs = MOCS_WB)
1448 } else {
1449 pipe_resource_reference(&cbuf->data.res, NULL);
1450 pipe_resource_reference(&cbuf->surface_state.res, NULL);
1451 }
1452
1453 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
1454 // XXX: maybe not necessary all the time...?
1455 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
1456 }
1457
1458 static void
1459 iris_sampler_view_destroy(struct pipe_context *ctx,
1460 struct pipe_sampler_view *state)
1461 {
1462 struct iris_sampler_view *isv = (void *) state;
1463 pipe_resource_reference(&state->texture, NULL);
1464 pipe_resource_reference(&isv->surface_state.res, NULL);
1465 free(isv);
1466 }
1467
1468
1469 static void
1470 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
1471 {
1472 struct iris_surface *surf = (void *) p_surf;
1473 pipe_resource_reference(&p_surf->texture, NULL);
1474 pipe_resource_reference(&surf->surface_state.res, NULL);
1475 free(surf);
1476 }
1477
1478 static void
1479 iris_delete_state(struct pipe_context *ctx, void *state)
1480 {
1481 free(state);
1482 }
1483
1484 static void
1485 iris_free_vertex_buffers(struct iris_vertex_buffer_state *cso)
1486 {
1487 for (unsigned i = 0; i < cso->num_buffers; i++)
1488 pipe_resource_reference(&cso->resources[i], NULL);
1489 }
1490
1491 static void
1492 iris_set_vertex_buffers(struct pipe_context *ctx,
1493 unsigned start_slot, unsigned count,
1494 const struct pipe_vertex_buffer *buffers)
1495 {
1496 struct iris_context *ice = (struct iris_context *) ctx;
1497 struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
1498
1499 iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
1500
1501 if (!buffers)
1502 count = 0;
1503
1504 cso->num_buffers = count;
1505
1506 iris_pack_command(GENX(3DSTATE_VERTEX_BUFFERS), cso->vertex_buffers, vb) {
1507 vb.DWordLength = 4 * MAX2(cso->num_buffers, 1) - 1;
1508 }
1509
1510 uint32_t *vb_pack_dest = &cso->vertex_buffers[1];
1511
1512 if (count == 0) {
1513 iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
1514 vb.VertexBufferIndex = start_slot;
1515 vb.NullVertexBuffer = true;
1516 vb.AddressModifyEnable = true;
1517 }
1518 }
1519
1520 for (unsigned i = 0; i < count; i++) {
1521 assert(!buffers[i].is_user_buffer);
1522
1523 pipe_resource_reference(&cso->resources[i], buffers[i].buffer.resource);
1524 struct iris_resource *res = (void *) cso->resources[i];
1525
1526 iris_pack_state(GENX(VERTEX_BUFFER_STATE), vb_pack_dest, vb) {
1527 vb.VertexBufferIndex = start_slot + i;
1528 vb.MOCS = MOCS_WB;
1529 vb.AddressModifyEnable = true;
1530 vb.BufferPitch = buffers[i].stride;
1531 vb.BufferSize = res->bo->size;
1532 vb.BufferStartingAddress =
1533 ro_bo(NULL, res->bo->gtt_offset + buffers[i].buffer_offset);
1534 }
1535
1536 vb_pack_dest += GENX(VERTEX_BUFFER_STATE_length);
1537 }
1538
1539 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
1540 }
1541
1542 struct iris_vertex_element_state {
1543 uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
1544 uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
1545 unsigned count;
1546 };
1547
1548 static void *
1549 iris_create_vertex_elements(struct pipe_context *ctx,
1550 unsigned count,
1551 const struct pipe_vertex_element *state)
1552 {
1553 struct iris_vertex_element_state *cso =
1554 malloc(sizeof(struct iris_vertex_element_state));
1555
1556 cso->count = MAX2(count, 1);
1557
1558 /* TODO:
1559 * - create edge flag one
1560 * - create SGV ones
1561 * - if those are necessary, use count + 1/2/3... OR in the length
1562 */
1563 iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
1564 ve.DWordLength = 1 + GENX(VERTEX_ELEMENT_STATE_length) * cso->count - 2;
1565 }
1566
1567 uint32_t *ve_pack_dest = &cso->vertex_elements[1];
1568 uint32_t *vfi_pack_dest = cso->vf_instancing;
1569
1570 if (count == 0) {
1571 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
1572 ve.Valid = true;
1573 ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
1574 ve.Component0Control = VFCOMP_STORE_0;
1575 ve.Component1Control = VFCOMP_STORE_0;
1576 ve.Component2Control = VFCOMP_STORE_0;
1577 ve.Component3Control = VFCOMP_STORE_1_FP;
1578 }
1579
1580 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
1581 }
1582 }
1583
1584 for (int i = 0; i < count; i++) {
1585 enum isl_format isl_format =
1586 iris_isl_format_for_pipe_format(state[i].src_format);
1587 unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
1588 VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
1589
1590 switch (isl_format_get_num_channels(isl_format)) {
1591 case 0: comp[0] = VFCOMP_STORE_0;
1592 case 1: comp[1] = VFCOMP_STORE_0;
1593 case 2: comp[2] = VFCOMP_STORE_0;
1594 case 3:
1595 comp[3] = isl_format_has_int_channel(isl_format) ? VFCOMP_STORE_1_INT
1596 : VFCOMP_STORE_1_FP;
1597 break;
1598 }
1599 iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
1600 ve.VertexBufferIndex = state[i].vertex_buffer_index;
1601 ve.Valid = true;
1602 ve.SourceElementOffset = state[i].src_offset;
1603 ve.SourceElementFormat = isl_format;
1604 ve.Component0Control = comp[0];
1605 ve.Component1Control = comp[1];
1606 ve.Component2Control = comp[2];
1607 ve.Component3Control = comp[3];
1608 }
1609
1610 iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
1611 vi.VertexElementIndex = i;
1612 vi.InstancingEnable = state[i].instance_divisor > 0;
1613 vi.InstanceDataStepRate = state[i].instance_divisor;
1614 }
1615
1616 ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
1617 vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
1618 }
1619
1620 return cso;
1621 }
1622
1623 static void
1624 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
1625 {
1626 struct iris_context *ice = (struct iris_context *) ctx;
1627
1628 ice->state.cso_vertex_elements = state;
1629 ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
1630 }
1631
1632 static void *
1633 iris_create_compute_state(struct pipe_context *ctx,
1634 const struct pipe_compute_state *state)
1635 {
1636 return malloc(1);
1637 }
1638
1639 struct iris_stream_output_target {
1640 struct pipe_stream_output_target base;
1641
1642 uint32_t so_buffer[GENX(3DSTATE_SO_BUFFER_length)];
1643
1644 struct iris_state_ref offset;
1645 };
1646
1647 static struct pipe_stream_output_target *
1648 iris_create_stream_output_target(struct pipe_context *ctx,
1649 struct pipe_resource *res,
1650 unsigned buffer_offset,
1651 unsigned buffer_size)
1652 {
1653 struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
1654 if (!cso)
1655 return NULL;
1656
1657 pipe_reference_init(&cso->base.reference, 1);
1658 pipe_resource_reference(&cso->base.buffer, res);
1659 cso->base.buffer_offset = buffer_offset;
1660 cso->base.buffer_size = buffer_size;
1661 cso->base.context = ctx;
1662
1663 upload_state(ctx->stream_uploader, &cso->offset, 4, 4);
1664
1665 iris_pack_command(GENX(3DSTATE_SO_BUFFER), cso->so_buffer, sob) {
1666 sob.SurfaceBaseAddress =
1667 rw_bo(NULL, iris_resource_bo(res)->gtt_offset + buffer_offset);
1668 sob.SOBufferEnable = true;
1669 sob.StreamOffsetWriteEnable = true;
1670 sob.StreamOutputBufferOffsetAddressEnable = true;
1671 sob.MOCS = MOCS_WB; // XXX: MOCS
1672
1673 sob.SurfaceSize = MAX2(buffer_size / 4, 1) - 1;
1674 sob.StreamOutputBufferOffsetAddress =
1675 rw_bo(NULL, iris_resource_bo(cso->offset.res)->gtt_offset + cso->offset.offset);
1676
1677 /* .SOBufferIndex and .StreamOffset are filled in later */
1678 }
1679
1680 return &cso->base;
1681 }
1682
1683 static void
1684 iris_stream_output_target_destroy(struct pipe_context *ctx,
1685 struct pipe_stream_output_target *state)
1686 {
1687 struct iris_stream_output_target *cso = (void *) state;
1688
1689 pipe_resource_reference(&cso->base.buffer, NULL);
1690 pipe_resource_reference(&cso->offset.res, NULL);
1691
1692 free(cso);
1693 }
1694
1695 static void
1696 iris_set_stream_output_targets(struct pipe_context *ctx,
1697 unsigned num_targets,
1698 struct pipe_stream_output_target **targets,
1699 const unsigned *offsets)
1700 {
1701 struct iris_context *ice = (struct iris_context *) ctx;
1702 struct iris_genx_state *genx = ice->state.genx;
1703 uint32_t *so_buffers = genx->so_buffers;
1704
1705 const bool active = num_targets > 0;
1706 if (ice->state.streamout_active != active) {
1707 ice->state.streamout_active = active;
1708 ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1709 }
1710
1711 /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
1712 if (!active)
1713 return;
1714
1715 for (unsigned i = 0; i < 4; i++,
1716 so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
1717
1718 if (i >= num_targets || !targets[i]) {
1719 iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob)
1720 sob.SOBufferIndex = i;
1721 continue;
1722 }
1723
1724 /* Note that offsets[i] will either be 0, causing us to zero
1725 * the value in the buffer, or 0xFFFFFFFF, which happens to mean
1726 * "continue appending at the existing offset."
1727 */
1728 assert(offsets[i] == 0 || offsets[i] == 0xFFFFFFFF);
1729
1730 uint32_t dynamic[GENX(3DSTATE_SO_BUFFER_length)];
1731 iris_pack_state(GENX(3DSTATE_SO_BUFFER), dynamic, dyns) {
1732 dyns.SOBufferIndex = i;
1733 dyns.StreamOffset = offsets[i];
1734 }
1735
1736 struct iris_stream_output_target *tgt = (void *) targets[i];
1737 for (uint32_t j = 0; j < GENX(3DSTATE_SO_BUFFER_length); j++) {
1738 so_buffers[j] = tgt->so_buffer[j] | dynamic[j];
1739 }
1740 }
1741
1742 ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
1743 }
1744
1745 static uint32_t *
1746 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
1747 const struct brw_vue_map *vue_map)
1748 {
1749 struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
1750 int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
1751 int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
1752 int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
1753 int max_decls = 0;
1754 STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
1755
1756 if (info->num_outputs == 0)
1757 return NULL;
1758
1759 memset(so_decl, 0, sizeof(so_decl));
1760
1761 /* Construct the list of SO_DECLs to be emitted. The formatting of the
1762 * command feels strange -- each dword pair contains a SO_DECL per stream.
1763 */
1764 for (unsigned i = 0; i < info->num_outputs; i++) {
1765 const struct pipe_stream_output *output = &info->output[i];
1766 const int buffer = output->output_buffer;
1767 const int varying = output->register_index;
1768 const unsigned stream_id = output->stream;
1769 assert(stream_id < MAX_VERTEX_STREAMS);
1770
1771 buffer_mask[stream_id] |= 1 << buffer;
1772
1773 assert(vue_map->varying_to_slot[varying] >= 0);
1774
1775 /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
1776 * array. Instead, it simply increments DstOffset for the following
1777 * input by the number of components that should be skipped.
1778 *
1779 * Our hardware is unusual in that it requires us to program SO_DECLs
1780 * for fake "hole" components, rather than simply taking the offset
1781 * for each real varying. Each hole can have size 1, 2, 3, or 4; we
1782 * program as many size = 4 holes as we can, then a final hole to
1783 * accommodate the final 1, 2, or 3 remaining.
1784 */
1785 int skip_components = output->dst_offset - next_offset[buffer];
1786
1787 while (skip_components > 0) {
1788 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
1789 .HoleFlag = 1,
1790 .OutputBufferSlot = output->output_buffer,
1791 .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
1792 };
1793 skip_components -= 4;
1794 }
1795
1796 next_offset[buffer] = output->dst_offset + output->num_components;
1797
1798 so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
1799 .OutputBufferSlot = output->output_buffer,
1800 .RegisterIndex = vue_map->varying_to_slot[varying],
1801 .ComponentMask =
1802 ((1 << output->num_components) - 1) << output->start_component,
1803 };
1804
1805 if (decls[stream_id] > max_decls)
1806 max_decls = decls[stream_id];
1807 }
1808
1809 unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
1810 uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
1811 uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
1812
1813 iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
1814 int urb_entry_read_offset = 0;
1815 int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
1816 urb_entry_read_offset;
1817
1818 /* We always read the whole vertex. This could be reduced at some
1819 * point by reading less and offsetting the register index in the
1820 * SO_DECLs.
1821 */
1822 sol.Stream0VertexReadOffset = urb_entry_read_offset;
1823 sol.Stream0VertexReadLength = urb_entry_read_length - 1;
1824 sol.Stream1VertexReadOffset = urb_entry_read_offset;
1825 sol.Stream1VertexReadLength = urb_entry_read_length - 1;
1826 sol.Stream2VertexReadOffset = urb_entry_read_offset;
1827 sol.Stream2VertexReadLength = urb_entry_read_length - 1;
1828 sol.Stream3VertexReadOffset = urb_entry_read_offset;
1829 sol.Stream3VertexReadLength = urb_entry_read_length - 1;
1830
1831 /* Set buffer pitches; 0 means unbound. */
1832 sol.Buffer0SurfacePitch = 4 * info->stride[0];
1833 sol.Buffer1SurfacePitch = 4 * info->stride[1];
1834 sol.Buffer2SurfacePitch = 4 * info->stride[2];
1835 sol.Buffer3SurfacePitch = 4 * info->stride[3];
1836 }
1837
1838 iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
1839 list.DWordLength = 3 + 2 * max_decls - 2;
1840 list.StreamtoBufferSelects0 = buffer_mask[0];
1841 list.StreamtoBufferSelects1 = buffer_mask[1];
1842 list.StreamtoBufferSelects2 = buffer_mask[2];
1843 list.StreamtoBufferSelects3 = buffer_mask[3];
1844 list.NumEntries0 = decls[0];
1845 list.NumEntries1 = decls[1];
1846 list.NumEntries2 = decls[2];
1847 list.NumEntries3 = decls[3];
1848 }
1849
1850 for (int i = 0; i < max_decls; i++) {
1851 iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 2 + i * 2, entry) {
1852 entry.Stream0Decl = so_decl[0][i];
1853 entry.Stream1Decl = so_decl[1][i];
1854 entry.Stream2Decl = so_decl[2][i];
1855 entry.Stream3Decl = so_decl[3][i];
1856 }
1857 }
1858
1859 return map;
1860 }
1861
1862 static void
1863 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
1864 const struct brw_vue_map *last_vue_map,
1865 bool two_sided_color,
1866 unsigned *out_offset,
1867 unsigned *out_length)
1868 {
1869 /* The compiler computes the first URB slot without considering COL/BFC
1870 * swizzling (because it doesn't know whether it's enabled), so we need
1871 * to do that here too. This may result in a smaller offset, which
1872 * should be safe.
1873 */
1874 const unsigned first_slot =
1875 brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
1876
1877 /* This becomes the URB read offset (counted in pairs of slots). */
1878 assert(first_slot % 2 == 0);
1879 *out_offset = first_slot / 2;
1880
1881 /* We need to adjust the inputs read to account for front/back color
1882 * swizzling, as it can make the URB length longer.
1883 */
1884 for (int c = 0; c <= 1; c++) {
1885 if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
1886 /* If two sided color is enabled, the fragment shader's gl_Color
1887 * (COL0) input comes from either the gl_FrontColor (COL0) or
1888 * gl_BackColor (BFC0) input varyings. Mark BFC as used, too.
1889 */
1890 if (two_sided_color)
1891 fs_input_slots |= (VARYING_BIT_BFC0 << c);
1892
1893 /* If front color isn't written, we opt to give them back color
1894 * instead of an undefined value. Switch from COL to BFC.
1895 */
1896 if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
1897 fs_input_slots &= ~(VARYING_BIT_COL0 << c);
1898 fs_input_slots |= (VARYING_BIT_BFC0 << c);
1899 }
1900 }
1901 }
1902
1903 /* Compute the minimum URB Read Length necessary for the FS inputs.
1904 *
1905 * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
1906 * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
1907 *
1908 * "This field should be set to the minimum length required to read the
1909 * maximum source attribute. The maximum source attribute is indicated
1910 * by the maximum value of the enabled Attribute # Source Attribute if
1911 * Attribute Swizzle Enable is set, Number of Output Attributes-1 if
1912 * enable is not set.
1913 * read_length = ceiling((max_source_attr + 1) / 2)
1914 *
1915 * [errata] Corruption/Hang possible if length programmed larger than
1916 * recommended"
1917 *
1918 * Similar text exists for Ivy Bridge.
1919 *
1920 * We find the last URB slot that's actually read by the FS.
1921 */
1922 unsigned last_read_slot = last_vue_map->num_slots - 1;
1923 while (last_read_slot > first_slot && !(fs_input_slots &
1924 (1ull << last_vue_map->slot_to_varying[last_read_slot])))
1925 --last_read_slot;
1926
1927 /* The URB read length is the difference of the two, counted in pairs. */
1928 *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
1929 }
1930
1931 static void
1932 iris_emit_sbe_swiz(struct iris_batch *batch,
1933 const struct iris_context *ice,
1934 unsigned urb_read_offset)
1935 {
1936 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
1937 const struct brw_wm_prog_data *wm_prog_data = (void *)
1938 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1939 const struct brw_vue_map *vue_map = ice->shaders.last_vue_map;
1940 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
1941
1942 /* XXX: this should be generated when putting programs in place */
1943
1944 // XXX: raster->sprite_coord_enable
1945
1946 for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) {
1947 const int input_index = wm_prog_data->urb_setup[fs_attr];
1948 if (input_index < 0 || input_index >= 16)
1949 continue;
1950
1951 struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
1952 &attr_overrides[input_index];
1953
1954 /* Viewport and Layer are stored in the VUE header. We need to override
1955 * them to zero if earlier stages didn't write them, as GL requires that
1956 * they read back as zero when not explicitly set.
1957 */
1958 switch (fs_attr) {
1959 case VARYING_SLOT_VIEWPORT:
1960 case VARYING_SLOT_LAYER:
1961 attr->ComponentOverrideX = true;
1962 attr->ComponentOverrideW = true;
1963 attr->ConstantSource = CONST_0000;
1964
1965 if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
1966 attr->ComponentOverrideY = true;
1967 if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
1968 attr->ComponentOverrideZ = true;
1969 continue;
1970
1971 case VARYING_SLOT_PRIMITIVE_ID:
1972 attr->ComponentOverrideX = true;
1973 attr->ComponentOverrideY = true;
1974 attr->ComponentOverrideZ = true;
1975 attr->ComponentOverrideW = true;
1976 attr->ConstantSource = PRIM_ID;
1977 continue;
1978
1979 default:
1980 break;
1981 }
1982
1983 int slot = vue_map->varying_to_slot[fs_attr];
1984
1985 /* If there was only a back color written but not front, use back
1986 * as the color instead of undefined.
1987 */
1988 if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
1989 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
1990 if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
1991 slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
1992
1993 /* Not written by the previous stage - undefined. */
1994 if (slot == -1) {
1995 attr->ComponentOverrideX = true;
1996 attr->ComponentOverrideY = true;
1997 attr->ComponentOverrideZ = true;
1998 attr->ComponentOverrideW = true;
1999 attr->ConstantSource = CONST_0001_FLOAT;
2000 continue;
2001 }
2002
2003 /* Compute the location of the attribute relative to the read offset,
2004 * which is counted in 256-bit increments (two 128-bit VUE slots).
2005 */
2006 const int source_attr = slot - 2 * urb_read_offset;
2007 assert(source_attr >= 0 && source_attr <= 32);
2008 attr->SourceAttribute = source_attr;
2009
2010 /* If we are doing two-sided color, and the VUE slot following this one
2011 * represents a back-facing color, then we need to instruct the SF unit
2012 * to do back-facing swizzling.
2013 */
2014 if (cso_rast->light_twoside &&
2015 ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
2016 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
2017 (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
2018 vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
2019 attr->SwizzleSelect = INPUTATTR_FACING;
2020 }
2021
2022 iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
2023 for (int i = 0; i < 16; i++)
2024 sbes.Attribute[i] = attr_overrides[i];
2025 }
2026 }
2027
2028 static void
2029 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
2030 {
2031 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
2032 const struct brw_wm_prog_data *wm_prog_data = (void *)
2033 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
2034 struct pipe_shader_state *p_fs =
2035 (void *) ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2036 assert(p_fs->type == PIPE_SHADER_IR_NIR);
2037 nir_shader *fs_nir = p_fs->ir.nir;
2038
2039 unsigned urb_read_offset, urb_read_length;
2040 iris_compute_sbe_urb_read_interval(fs_nir->info.inputs_read,
2041 ice->shaders.last_vue_map,
2042 cso_rast->light_twoside,
2043 &urb_read_offset, &urb_read_length);
2044
2045 iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
2046 sbe.AttributeSwizzleEnable = true;
2047 sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
2048 sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
2049 sbe.VertexURBEntryReadOffset = urb_read_offset;
2050 sbe.VertexURBEntryReadLength = urb_read_length;
2051 sbe.ForceVertexURBEntryReadOffset = true;
2052 sbe.ForceVertexURBEntryReadLength = true;
2053 sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
2054
2055 for (int i = 0; i < 32; i++) {
2056 sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
2057 }
2058 }
2059
2060 iris_emit_sbe_swiz(batch, ice, urb_read_offset);
2061 }
2062
2063 static void
2064 iris_bind_compute_state(struct pipe_context *ctx, void *state)
2065 {
2066 }
2067
2068 static void
2069 iris_populate_sampler_key(const struct iris_context *ice,
2070 struct brw_sampler_prog_key_data *key)
2071 {
2072 for (int i = 0; i < MAX_SAMPLERS; i++) {
2073 key->swizzles[i] = 0x688; /* XYZW */
2074 }
2075 }
2076
2077 static void
2078 iris_populate_vs_key(const struct iris_context *ice,
2079 struct brw_vs_prog_key *key)
2080 {
2081 memset(key, 0, sizeof(*key));
2082 iris_populate_sampler_key(ice, &key->tex);
2083 }
2084
2085 static void
2086 iris_populate_tcs_key(const struct iris_context *ice,
2087 struct brw_tcs_prog_key *key)
2088 {
2089 memset(key, 0, sizeof(*key));
2090 iris_populate_sampler_key(ice, &key->tex);
2091 }
2092
2093 static void
2094 iris_populate_tes_key(const struct iris_context *ice,
2095 struct brw_tes_prog_key *key)
2096 {
2097 memset(key, 0, sizeof(*key));
2098 iris_populate_sampler_key(ice, &key->tex);
2099 }
2100
2101 static void
2102 iris_populate_gs_key(const struct iris_context *ice,
2103 struct brw_gs_prog_key *key)
2104 {
2105 memset(key, 0, sizeof(*key));
2106 iris_populate_sampler_key(ice, &key->tex);
2107 }
2108
2109 static void
2110 iris_populate_fs_key(const struct iris_context *ice,
2111 struct brw_wm_prog_key *key)
2112 {
2113 memset(key, 0, sizeof(*key));
2114 iris_populate_sampler_key(ice, &key->tex);
2115
2116 /* XXX: dirty flags? */
2117 const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
2118 const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
2119 const struct iris_rasterizer_state *rast = ice->state.cso_rast;
2120 const struct iris_blend_state *blend = ice->state.cso_blend;
2121
2122 key->nr_color_regions = fb->nr_cbufs;
2123
2124 key->clamp_fragment_color = rast->clamp_fragment_color;
2125
2126 key->replicate_alpha = fb->nr_cbufs > 1 &&
2127 (zsa->alpha.enabled || blend->alpha_to_coverage);
2128
2129 /* XXX: only bother if COL0/1 are read */
2130 key->flat_shade = rast->flatshade;
2131
2132 // key->force_dual_color_blend for unigine
2133 #if 0
2134 if (cso_rast->multisample) {
2135 key->persample_interp =
2136 ctx->Multisample.SampleShading &&
2137 (ctx->Multisample.MinSampleShadingValue *
2138 _mesa_geometric_samples(ctx->DrawBuffer) > 1);
2139
2140 key->multisample_fbo = fb->samples > 1;
2141 }
2142 #endif
2143
2144 key->coherent_fb_fetch = true;
2145 }
2146
2147 #if 0
2148 // XXX: these need to go in INIT_THREAD_DISPATCH_FIELDS
2149 pkt.SamplerCount = \
2150 DIV_ROUND_UP(CLAMP(stage_state->sampler_count, 0, 16), 4); \
2151 pkt.PerThreadScratchSpace = prog_data->total_scratch == 0 ? 0 : \
2152 ffs(stage_state->per_thread_scratch) - 11; \
2153
2154 #endif
2155
2156 static uint64_t
2157 KSP(const struct iris_compiled_shader *shader)
2158 {
2159 struct iris_resource *res = (void *) shader->assembly.res;
2160 return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
2161 }
2162
2163 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix) \
2164 pkt.KernelStartPointer = KSP(shader); \
2165 pkt.BindingTableEntryCount = prog_data->binding_table.size_bytes / 4; \
2166 pkt.FloatingPointMode = prog_data->use_alt_mode; \
2167 \
2168 pkt.DispatchGRFStartRegisterForURBData = \
2169 prog_data->dispatch_grf_start_reg; \
2170 pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length; \
2171 pkt.prefix##URBEntryReadOffset = 0; \
2172 \
2173 pkt.StatisticsEnable = true; \
2174 pkt.Enable = true;
2175
2176 static void
2177 iris_store_vs_state(const struct gen_device_info *devinfo,
2178 struct iris_compiled_shader *shader)
2179 {
2180 struct brw_stage_prog_data *prog_data = shader->prog_data;
2181 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
2182
2183 iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
2184 INIT_THREAD_DISPATCH_FIELDS(vs, Vertex);
2185 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
2186 vs.SIMD8DispatchEnable = true;
2187 vs.UserClipDistanceCullTestEnableBitmask =
2188 vue_prog_data->cull_distance_mask;
2189 }
2190 }
2191
2192 static void
2193 iris_store_tcs_state(const struct gen_device_info *devinfo,
2194 struct iris_compiled_shader *shader)
2195 {
2196 struct brw_stage_prog_data *prog_data = shader->prog_data;
2197 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
2198 struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
2199
2200 iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
2201 INIT_THREAD_DISPATCH_FIELDS(hs, Vertex);
2202
2203 hs.InstanceCount = tcs_prog_data->instances - 1;
2204 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
2205 hs.IncludeVertexHandles = true;
2206 }
2207 }
2208
2209 static void
2210 iris_store_tes_state(const struct gen_device_info *devinfo,
2211 struct iris_compiled_shader *shader)
2212 {
2213 struct brw_stage_prog_data *prog_data = shader->prog_data;
2214 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
2215 struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
2216
2217 uint32_t *te_state = (void *) shader->derived_data;
2218 uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
2219
2220 iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
2221 te.Partitioning = tes_prog_data->partitioning;
2222 te.OutputTopology = tes_prog_data->output_topology;
2223 te.TEDomain = tes_prog_data->domain;
2224 te.TEEnable = true;
2225 te.MaximumTessellationFactorOdd = 63.0;
2226 te.MaximumTessellationFactorNotOdd = 64.0;
2227 }
2228
2229 iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
2230 INIT_THREAD_DISPATCH_FIELDS(ds, Patch);
2231
2232 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
2233 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
2234 ds.ComputeWCoordinateEnable =
2235 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
2236
2237 ds.UserClipDistanceCullTestEnableBitmask =
2238 vue_prog_data->cull_distance_mask;
2239 }
2240
2241 }
2242
2243 static void
2244 iris_store_gs_state(const struct gen_device_info *devinfo,
2245 struct iris_compiled_shader *shader)
2246 {
2247 struct brw_stage_prog_data *prog_data = shader->prog_data;
2248 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
2249 struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
2250
2251 iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
2252 INIT_THREAD_DISPATCH_FIELDS(gs, Vertex);
2253
2254 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
2255 gs.OutputTopology = gs_prog_data->output_topology;
2256 gs.ControlDataHeaderSize =
2257 gs_prog_data->control_data_header_size_hwords;
2258 gs.InstanceControl = gs_prog_data->invocations - 1;
2259 gs.DispatchMode = DISPATCH_MODE_SIMD8;
2260 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
2261 gs.ControlDataFormat = gs_prog_data->control_data_format;
2262 gs.ReorderMode = TRAILING;
2263 gs.ExpectedVertexCount = gs_prog_data->vertices_in;
2264 gs.MaximumNumberofThreads =
2265 GEN_GEN == 8 ? (devinfo->max_gs_threads / 2 - 1)
2266 : (devinfo->max_gs_threads - 1);
2267
2268 if (gs_prog_data->static_vertex_count != -1) {
2269 gs.StaticOutput = true;
2270 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
2271 }
2272 gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
2273
2274 gs.UserClipDistanceCullTestEnableBitmask =
2275 vue_prog_data->cull_distance_mask;
2276
2277 const int urb_entry_write_offset = 1;
2278 const uint32_t urb_entry_output_length =
2279 DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
2280 urb_entry_write_offset;
2281
2282 gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
2283 gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
2284 }
2285 }
2286
2287 static void
2288 iris_store_fs_state(const struct gen_device_info *devinfo,
2289 struct iris_compiled_shader *shader)
2290 {
2291 struct brw_stage_prog_data *prog_data = shader->prog_data;
2292 struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
2293
2294 uint32_t *ps_state = (void *) shader->derived_data;
2295 uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
2296
2297 iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
2298 ps.VectorMaskEnable = true;
2299 //ps.SamplerCount = ...
2300 ps.BindingTableEntryCount = prog_data->binding_table.size_bytes / 4;
2301 ps.FloatingPointMode = prog_data->use_alt_mode;
2302 ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
2303
2304 ps.PushConstantEnable = prog_data->nr_params > 0 ||
2305 prog_data->ubo_ranges[0].length > 0;
2306
2307 /* From the documentation for this packet:
2308 * "If the PS kernel does not need the Position XY Offsets to
2309 * compute a Position Value, then this field should be programmed
2310 * to POSOFFSET_NONE."
2311 *
2312 * "SW Recommendation: If the PS kernel needs the Position Offsets
2313 * to compute a Position XY value, this field should match Position
2314 * ZW Interpolation Mode to ensure a consistent position.xyzw
2315 * computation."
2316 *
2317 * We only require XY sample offsets. So, this recommendation doesn't
2318 * look useful at the moment. We might need this in future.
2319 */
2320 ps.PositionXYOffsetSelect =
2321 wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
2322 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
2323 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
2324 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
2325
2326 // XXX: Disable SIMD32 with 16x MSAA
2327
2328 ps.DispatchGRFStartRegisterForConstantSetupData0 =
2329 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
2330 ps.DispatchGRFStartRegisterForConstantSetupData1 =
2331 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
2332 ps.DispatchGRFStartRegisterForConstantSetupData2 =
2333 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
2334
2335 ps.KernelStartPointer0 =
2336 KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
2337 ps.KernelStartPointer1 =
2338 KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
2339 ps.KernelStartPointer2 =
2340 KSP(shader) + brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
2341 }
2342
2343 iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
2344 psx.PixelShaderValid = true;
2345 psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
2346 psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
2347 psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
2348 psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
2349 psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
2350 psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
2351
2352 if (wm_prog_data->uses_sample_mask) {
2353 /* TODO: conservative rasterization */
2354 if (wm_prog_data->post_depth_coverage)
2355 psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
2356 else
2357 psx.InputCoverageMaskState = ICMS_NORMAL;
2358 }
2359
2360 psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
2361 psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
2362 psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
2363
2364 // XXX: UAV bit
2365 }
2366 }
2367
2368 static unsigned
2369 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
2370 {
2371 assert(cache_id <= IRIS_CACHE_BLORP);
2372
2373 static const unsigned dwords[] = {
2374 [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
2375 [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
2376 [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
2377 [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
2378 [IRIS_CACHE_FS] =
2379 GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
2380 [IRIS_CACHE_CS] = 0,
2381 [IRIS_CACHE_BLORP] = 0,
2382 };
2383
2384 return sizeof(uint32_t) * dwords[cache_id];
2385 }
2386
2387 static void
2388 iris_store_derived_program_state(const struct gen_device_info *devinfo,
2389 enum iris_program_cache_id cache_id,
2390 struct iris_compiled_shader *shader)
2391 {
2392 switch (cache_id) {
2393 case IRIS_CACHE_VS:
2394 iris_store_vs_state(devinfo, shader);
2395 break;
2396 case IRIS_CACHE_TCS:
2397 iris_store_tcs_state(devinfo, shader);
2398 break;
2399 case IRIS_CACHE_TES:
2400 iris_store_tes_state(devinfo, shader);
2401 break;
2402 case IRIS_CACHE_GS:
2403 iris_store_gs_state(devinfo, shader);
2404 break;
2405 case IRIS_CACHE_FS:
2406 iris_store_fs_state(devinfo, shader);
2407 break;
2408 case IRIS_CACHE_CS:
2409 case IRIS_CACHE_BLORP:
2410 break;
2411 default:
2412 break;
2413 }
2414 }
2415
2416 static void
2417 iris_upload_urb_config(struct iris_context *ice, struct iris_batch *batch)
2418 {
2419 const struct gen_device_info *devinfo = &batch->screen->devinfo;
2420 const unsigned push_size_kB = 32;
2421 unsigned entries[4];
2422 unsigned start[4];
2423 unsigned size[4];
2424
2425 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
2426 if (!ice->shaders.prog[i]) {
2427 size[i] = 1;
2428 } else {
2429 struct brw_vue_prog_data *vue_prog_data =
2430 (void *) ice->shaders.prog[i]->prog_data;
2431 size[i] = vue_prog_data->urb_entry_size;
2432 }
2433 assert(size[i] != 0);
2434 }
2435
2436 gen_get_urb_config(devinfo, 1024 * push_size_kB,
2437 1024 * ice->shaders.urb_size,
2438 ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
2439 ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
2440 size, entries, start);
2441
2442 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
2443 iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
2444 urb._3DCommandSubOpcode += i;
2445 urb.VSURBStartingAddress = start[i];
2446 urb.VSURBEntryAllocationSize = size[i] - 1;
2447 urb.VSNumberofURBEntries = entries[i];
2448 }
2449 }
2450 }
2451
2452 static const uint32_t push_constant_opcodes[] = {
2453 [MESA_SHADER_VERTEX] = 21,
2454 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
2455 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
2456 [MESA_SHADER_GEOMETRY] = 22,
2457 [MESA_SHADER_FRAGMENT] = 23,
2458 [MESA_SHADER_COMPUTE] = 0,
2459 };
2460
2461 /**
2462 * Add a surface to the validation list, as well as the buffer containing
2463 * the corresponding SURFACE_STATE.
2464 *
2465 * Returns the binding table entry (offset to SURFACE_STATE).
2466 */
2467 static uint32_t
2468 use_surface(struct iris_batch *batch,
2469 struct pipe_surface *p_surf,
2470 bool writeable)
2471 {
2472 struct iris_surface *surf = (void *) p_surf;
2473
2474 iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
2475 iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
2476
2477 return surf->surface_state.offset;
2478 }
2479
2480 static uint32_t
2481 use_sampler_view(struct iris_batch *batch, struct iris_sampler_view *isv)
2482 {
2483 iris_use_pinned_bo(batch, iris_resource_bo(isv->pipe.texture), false);
2484 iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
2485
2486 return isv->surface_state.offset;
2487 }
2488
2489 static uint32_t
2490 use_const_buffer(struct iris_batch *batch, struct iris_const_buffer *cbuf)
2491 {
2492 iris_use_pinned_bo(batch, iris_resource_bo(cbuf->data.res), false);
2493 iris_use_pinned_bo(batch, iris_resource_bo(cbuf->surface_state.res), false);
2494
2495 return cbuf->surface_state.offset;
2496 }
2497
2498 static uint32_t
2499 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
2500 {
2501 struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
2502
2503 iris_use_pinned_bo(batch, state_bo, false);
2504
2505 return ice->state.unbound_tex.offset;
2506 }
2507
2508 static void
2509 iris_populate_binding_table(struct iris_context *ice,
2510 struct iris_batch *batch,
2511 gl_shader_stage stage)
2512 {
2513 const struct iris_binder *binder = &batch->binder;
2514 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
2515 if (!shader)
2516 return;
2517
2518 // Surfaces:
2519 // - pull constants
2520 // - ubos/ssbos/abos
2521 // - images
2522 // - textures
2523 // - render targets - write and read
2524
2525 //struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
2526 uint32_t *bt_map = binder->map + binder->bt_offset[stage];
2527 int s = 0;
2528
2529 if (stage == MESA_SHADER_FRAGMENT) {
2530 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
2531 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
2532 bt_map[s++] = use_surface(batch, cso_fb->cbufs[i], true);
2533 }
2534 }
2535
2536 //assert(prog_data->binding_table.texture_start ==
2537 //(ice->state.num_textures[stage] ? s : 0xd0d0d0d0));
2538
2539 for (int i = 0; i < ice->state.num_textures[stage]; i++) {
2540 struct iris_sampler_view *view = ice->state.textures[stage][i];
2541 bt_map[s++] = view ? use_sampler_view(batch, view)
2542 : use_null_surface(batch, ice);
2543 }
2544
2545 // XXX: want the number of BTE's to shorten this loop
2546 struct iris_shader_state *shs = &ice->shaders.state[stage];
2547 for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
2548 struct iris_const_buffer *cbuf = &shs->constbuf[i];
2549 if (!cbuf->surface_state.res)
2550 break;
2551
2552 bt_map[s++] = use_const_buffer(batch, cbuf);
2553 }
2554 #if 0
2555 // XXX: not implemented yet
2556 assert(prog_data->binding_table.pull_constants_start == 0xd0d0d0d0);
2557 assert(prog_data->binding_table.ubo_start == 0xd0d0d0d0);
2558 assert(prog_data->binding_table.ssbo_start == 0xd0d0d0d0);
2559 assert(prog_data->binding_table.image_start == 0xd0d0d0d0);
2560 assert(prog_data->binding_table.shader_time_start == 0xd0d0d0d0);
2561 //assert(prog_data->binding_table.plane_start[1] == 0xd0d0d0d0);
2562 //assert(prog_data->binding_table.plane_start[2] == 0xd0d0d0d0);
2563 #endif
2564 }
2565
2566 static void
2567 iris_use_optional_res(struct iris_batch *batch,
2568 struct pipe_resource *res,
2569 bool writeable)
2570 {
2571 if (res) {
2572 struct iris_bo *bo = iris_resource_bo(res);
2573 iris_use_pinned_bo(batch, bo, writeable);
2574 }
2575 }
2576
2577
2578 /**
2579 * Pin any BOs which were installed by a previous batch, and restored
2580 * via the hardware logical context mechanism.
2581 *
2582 * We don't need to re-emit all state every batch - the hardware context
2583 * mechanism will save and restore it for us. This includes pointers to
2584 * various BOs...which won't exist unless we ask the kernel to pin them
2585 * by adding them to the validation list.
2586 *
2587 * We can skip buffers if we've re-emitted those packets, as we're
2588 * overwriting those stale pointers with new ones, and don't actually
2589 * refer to the old BOs.
2590 */
2591 static void
2592 iris_restore_context_saved_bos(struct iris_context *ice,
2593 struct iris_batch *batch,
2594 const struct pipe_draw_info *draw)
2595 {
2596 // XXX: whack IRIS_SHADER_DIRTY_BINDING_TABLE on new batch
2597
2598 const uint64_t clean = ~ice->state.dirty;
2599
2600 if (clean & IRIS_DIRTY_CC_VIEWPORT) {
2601 iris_use_optional_res(batch, ice->state.last_res.cc_vp, false);
2602 }
2603
2604 if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
2605 iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false);
2606 }
2607
2608 if (clean & IRIS_DIRTY_BLEND_STATE) {
2609 iris_use_optional_res(batch, ice->state.last_res.blend, false);
2610 }
2611
2612 if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
2613 iris_use_optional_res(batch, ice->state.last_res.color_calc, false);
2614 }
2615
2616 if (clean & IRIS_DIRTY_SCISSOR_RECT) {
2617 iris_use_optional_res(batch, ice->state.last_res.scissor, false);
2618 }
2619
2620 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2621 if (clean & (IRIS_DIRTY_CONSTANTS_VS << stage))
2622 continue;
2623
2624 struct iris_shader_state *shs = &ice->shaders.state[stage];
2625 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
2626
2627 if (!shader)
2628 continue;
2629
2630 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
2631
2632 for (int i = 0; i < 4; i++) {
2633 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
2634
2635 if (range->length == 0)
2636 continue;
2637
2638 struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
2639 struct iris_resource *res = (void *) cbuf->data.res;
2640
2641 if (res)
2642 iris_use_pinned_bo(batch, res->bo, false);
2643 else
2644 iris_use_pinned_bo(batch, batch->screen->workaround_bo, false);
2645 }
2646 }
2647
2648 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2649 struct pipe_resource *res = ice->state.sampler_table[stage].res;
2650 if (res)
2651 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
2652 }
2653
2654 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2655 if (clean & (IRIS_DIRTY_VS << stage)) {
2656 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
2657 if (shader) {
2658 struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
2659 iris_use_pinned_bo(batch, bo, false);
2660 }
2661
2662 // XXX: scratch buffer
2663 }
2664 }
2665
2666 if (clean & IRIS_DIRTY_DEPTH_BUFFER) {
2667 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
2668
2669 if (cso_fb->zsbuf) {
2670 struct iris_resource *zres = (void *) cso_fb->zsbuf->texture;
2671 // XXX: depth might not be writable...
2672 iris_use_pinned_bo(batch, zres->bo, true);
2673 }
2674 }
2675
2676 if (draw->index_size > 0) {
2677 // XXX: index buffer
2678 }
2679
2680 if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
2681 struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
2682 for (unsigned i = 0; i < cso->num_buffers; i++) {
2683 struct iris_resource *res = (void *) cso->resources[i];
2684 iris_use_pinned_bo(batch, res->bo, false);
2685 }
2686 }
2687 }
2688
2689 static void
2690 iris_upload_render_state(struct iris_context *ice,
2691 struct iris_batch *batch,
2692 const struct pipe_draw_info *draw)
2693 {
2694 const uint64_t dirty = ice->state.dirty;
2695
2696 struct iris_genx_state *genx = ice->state.genx;
2697 struct brw_wm_prog_data *wm_prog_data = (void *)
2698 ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
2699
2700 if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
2701 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
2702 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
2703 ptr.CCViewportPointer =
2704 emit_state(batch, ice->state.dynamic_uploader,
2705 &ice->state.last_res.cc_vp,
2706 cso->cc_vp, sizeof(cso->cc_vp), 32);
2707 }
2708 }
2709
2710 if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
2711 struct iris_viewport_state *cso = &ice->state.genx->viewport;
2712 iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
2713 ptr.SFClipViewportPointer =
2714 emit_state(batch, ice->state.dynamic_uploader,
2715 &ice->state.last_res.sf_cl_vp,
2716 cso->sf_cl_vp, 4 * GENX(SF_CLIP_VIEWPORT_length) *
2717 ice->state.num_viewports, 64);
2718 }
2719 }
2720
2721 /* XXX: L3 State */
2722
2723 // XXX: this is only flagged at setup, we assume a static configuration
2724 if (dirty & IRIS_DIRTY_URB) {
2725 iris_upload_urb_config(ice, batch);
2726 }
2727
2728 if (dirty & IRIS_DIRTY_BLEND_STATE) {
2729 struct iris_blend_state *cso_blend = ice->state.cso_blend;
2730 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
2731 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
2732 const int num_dwords = 4 * (GENX(BLEND_STATE_length) +
2733 cso_fb->nr_cbufs * GENX(BLEND_STATE_ENTRY_length));
2734 uint32_t blend_offset;
2735 uint32_t *blend_map =
2736 stream_state(batch, ice->state.dynamic_uploader,
2737 &ice->state.last_res.blend,
2738 4 * num_dwords, 64, &blend_offset);
2739
2740 uint32_t blend_state_header;
2741 iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
2742 bs.AlphaTestEnable = cso_zsa->alpha.enabled;
2743 bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha.func);
2744 }
2745
2746 blend_map[0] = blend_state_header | cso_blend->blend_state[0];
2747 memcpy(&blend_map[1], &cso_blend->blend_state[1],
2748 sizeof(cso_blend->blend_state) - sizeof(uint32_t));
2749
2750 iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
2751 ptr.BlendStatePointer = blend_offset;
2752 ptr.BlendStatePointerValid = true;
2753 }
2754 }
2755
2756 if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
2757 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
2758 uint32_t cc_offset;
2759 void *cc_map =
2760 stream_state(batch, ice->state.dynamic_uploader,
2761 &ice->state.last_res.color_calc,
2762 sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
2763 64, &cc_offset);
2764 iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
2765 cc.AlphaTestFormat = ALPHATEST_FLOAT32;
2766 cc.AlphaReferenceValueAsFLOAT32 = cso->alpha.ref_value;
2767 cc.BlendConstantColorRed = ice->state.blend_color.color[0];
2768 cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
2769 cc.BlendConstantColorBlue = ice->state.blend_color.color[2];
2770 cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
2771 }
2772 iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
2773 ptr.ColorCalcStatePointer = cc_offset;
2774 ptr.ColorCalcStatePointerValid = true;
2775 }
2776 }
2777
2778 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2779 // XXX: wrong dirty tracking...
2780 if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)))
2781 continue;
2782
2783 struct iris_shader_state *shs = &ice->shaders.state[stage];
2784 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
2785
2786 if (!shader)
2787 continue;
2788
2789 struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
2790
2791 iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
2792 pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
2793 if (prog_data) {
2794 /* The Skylake PRM contains the following restriction:
2795 *
2796 * "The driver must ensure The following case does not occur
2797 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
2798 * buffer 3 read length equal to zero committed followed by a
2799 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
2800 * zero committed."
2801 *
2802 * To avoid this, we program the buffers in the highest slots.
2803 * This way, slot 0 is only used if slot 3 is also used.
2804 */
2805 int n = 3;
2806
2807 for (int i = 3; i >= 0; i--) {
2808 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
2809
2810 if (range->length == 0)
2811 continue;
2812
2813 // XXX: is range->block a constbuf index? it would be nice
2814 struct iris_const_buffer *cbuf = &shs->constbuf[range->block];
2815 struct iris_resource *res = (void *) cbuf->data.res;
2816
2817 assert(cbuf->data.offset % 32 == 0);
2818
2819 pkt.ConstantBody.ReadLength[n] = range->length;
2820 pkt.ConstantBody.Buffer[n] =
2821 res ? ro_bo(res->bo, range->start * 32 + cbuf->data.offset)
2822 : ro_bo(batch->screen->workaround_bo, 0);
2823 n--;
2824 }
2825 }
2826 }
2827 }
2828
2829 struct iris_binder *binder = &batch->binder;
2830
2831 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2832 if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
2833 iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
2834 ptr._3DCommandSubOpcode = 38 + stage;
2835 ptr.PointertoVSBindingTable = binder->bt_offset[stage];
2836 }
2837 }
2838 }
2839
2840 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2841 if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
2842 iris_populate_binding_table(ice, batch, stage);
2843 }
2844 }
2845
2846 if (ice->state.need_border_colors)
2847 iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
2848
2849 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2850 if (!(dirty & (IRIS_DIRTY_SAMPLER_STATES_VS << stage)) ||
2851 !ice->shaders.prog[stage])
2852 continue;
2853
2854 struct pipe_resource *res = ice->state.sampler_table[stage].res;
2855 if (res)
2856 iris_use_pinned_bo(batch, iris_resource_bo(res), false);
2857
2858 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
2859 ptr._3DCommandSubOpcode = 43 + stage;
2860 ptr.PointertoVSSamplerState = ice->state.sampler_table[stage].offset;
2861 }
2862 }
2863
2864 if (dirty & IRIS_DIRTY_MULTISAMPLE) {
2865 iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
2866 ms.PixelLocation =
2867 ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
2868 if (ice->state.framebuffer.samples > 0)
2869 ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
2870 }
2871 }
2872
2873 if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
2874 iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
2875 ms.SampleMask = MAX2(ice->state.sample_mask, 1);
2876 }
2877 }
2878
2879 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
2880 if (!(dirty & (IRIS_DIRTY_VS << stage)))
2881 continue;
2882
2883 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
2884
2885 if (shader) {
2886 struct iris_resource *cache = (void *) shader->assembly.res;
2887 iris_use_pinned_bo(batch, cache->bo, false);
2888 iris_batch_emit(batch, shader->derived_data,
2889 iris_derived_program_state_size(stage));
2890 } else {
2891 if (stage == MESA_SHADER_TESS_EVAL) {
2892 iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
2893 iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
2894 iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
2895 } else if (stage == MESA_SHADER_GEOMETRY) {
2896 iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
2897 }
2898 }
2899 }
2900
2901 if (dirty & IRIS_DIRTY_SO_BUFFERS) {
2902 iris_batch_emit(batch, genx->so_buffers,
2903 4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
2904 }
2905
2906 if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
2907 uint32_t *decl_list =
2908 ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
2909 iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
2910 }
2911
2912 if (dirty & IRIS_DIRTY_STREAMOUT) {
2913 const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
2914
2915 if (!ice->state.streamout_active) {
2916 iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
2917 } else {
2918 uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
2919 iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
2920 sol.SOFunctionEnable = true;
2921 sol.SOStatisticsEnable = true;
2922
2923 // XXX: GL_PRIMITIVES_GENERATED query
2924 sol.RenderingDisable = cso_rast->rasterizer_discard;
2925 sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
2926 }
2927
2928 assert(ice->state.streamout);
2929
2930 iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
2931 GENX(3DSTATE_STREAMOUT_length));
2932 }
2933 }
2934
2935 if (dirty & IRIS_DIRTY_CLIP) {
2936 struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
2937 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
2938
2939 uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
2940 iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
2941 if (wm_prog_data->barycentric_interp_modes &
2942 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
2943 cl.NonPerspectiveBarycentricEnable = true;
2944
2945 cl.ForceZeroRTAIndexEnable = cso_fb->layers == 0;
2946 cl.MaximumVPIndex = ice->state.num_viewports - 1;
2947 }
2948 iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
2949 ARRAY_SIZE(cso_rast->clip));
2950 }
2951
2952 if (dirty & IRIS_DIRTY_RASTER) {
2953 struct iris_rasterizer_state *cso = ice->state.cso_rast;
2954 iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
2955 iris_batch_emit(batch, cso->sf, sizeof(cso->sf));
2956
2957 }
2958
2959 /* XXX: FS program updates needs to flag IRIS_DIRTY_WM */
2960 if (dirty & IRIS_DIRTY_WM) {
2961 struct iris_rasterizer_state *cso = ice->state.cso_rast;
2962 uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
2963
2964 iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
2965 wm.BarycentricInterpolationMode =
2966 wm_prog_data->barycentric_interp_modes;
2967
2968 if (wm_prog_data->early_fragment_tests)
2969 wm.EarlyDepthStencilControl = EDSC_PREPS;
2970 else if (wm_prog_data->has_side_effects)
2971 wm.EarlyDepthStencilControl = EDSC_PSEXEC;
2972 }
2973 iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
2974 }
2975
2976 if (1) {
2977 // XXX: 3DSTATE_SBE, 3DSTATE_SBE_SWIZ
2978 // -> iris_raster_state (point sprite texture coordinate origin)
2979 // -> bunch of shader state...
2980 iris_emit_sbe(batch, ice);
2981 }
2982
2983 if (dirty & IRIS_DIRTY_PS_BLEND) {
2984 struct iris_blend_state *cso_blend = ice->state.cso_blend;
2985 struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
2986 uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
2987 iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
2988 pb.HasWriteableRT = true; // XXX: comes from somewhere :(
2989 pb.AlphaTestEnable = cso_zsa->alpha.enabled;
2990 }
2991
2992 iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
2993 ARRAY_SIZE(cso_blend->ps_blend));
2994 }
2995
2996 if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
2997 struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
2998 struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
2999
3000 uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
3001 iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
3002 wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
3003 wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
3004 }
3005 iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
3006 }
3007
3008 if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
3009 uint32_t scissor_offset =
3010 emit_state(batch, ice->state.dynamic_uploader,
3011 &ice->state.last_res.scissor,
3012 ice->state.scissors,
3013 sizeof(struct pipe_scissor_state) *
3014 ice->state.num_viewports, 32);
3015
3016 iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
3017 ptr.ScissorRectPointer = scissor_offset;
3018 }
3019 }
3020
3021 if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
3022 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
3023 struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3024
3025 iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
3026
3027 if (cso_fb->zsbuf) {
3028 struct iris_resource *zres = (void *) cso_fb->zsbuf->texture;
3029 // XXX: depth might not be writable...
3030 iris_use_pinned_bo(batch, zres->bo, true);
3031 }
3032 }
3033
3034 if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
3035 iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
3036 for (int i = 0; i < 32; i++) {
3037 poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
3038 }
3039 }
3040 }
3041
3042 if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
3043 struct iris_rasterizer_state *cso = ice->state.cso_rast;
3044 iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
3045 }
3046
3047 if (1) {
3048 iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
3049 topo.PrimitiveTopologyType =
3050 translate_prim_type(draw->mode, draw->vertices_per_patch);
3051 }
3052 }
3053
3054 if (draw->index_size > 0) {
3055 struct iris_resource *res = NULL;
3056 unsigned offset;
3057
3058 if (draw->has_user_indices) {
3059 u_upload_data(ice->ctx.stream_uploader, 0,
3060 draw->count * draw->index_size, 4, draw->index.user,
3061 &offset, (struct pipe_resource **) &res);
3062 } else {
3063 res = (struct iris_resource *) draw->index.resource;
3064 offset = 0;
3065 }
3066
3067 iris_emit_cmd(batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
3068 ib.IndexFormat = draw->index_size >> 1;
3069 ib.MOCS = MOCS_WB;
3070 ib.BufferSize = res->bo->size;
3071 ib.BufferStartingAddress = ro_bo(res->bo, offset);
3072 }
3073 }
3074
3075 if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
3076 struct iris_vertex_buffer_state *cso = &ice->state.genx->vertex_buffers;
3077 const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
3078
3079 if (cso->num_buffers > 0) {
3080 iris_batch_emit(batch, cso->vertex_buffers, sizeof(uint32_t) *
3081 (1 + vb_dwords * cso->num_buffers));
3082
3083 for (unsigned i = 0; i < cso->num_buffers; i++) {
3084 struct iris_resource *res = (void *) cso->resources[i];
3085 iris_use_pinned_bo(batch, res->bo, false);
3086 }
3087 }
3088 }
3089
3090 if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
3091 struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
3092 iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
3093 (1 + cso->count * GENX(VERTEX_ELEMENT_STATE_length)));
3094 iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
3095 cso->count * GENX(3DSTATE_VF_INSTANCING_length));
3096 for (int i = 0; i < cso->count; i++) {
3097 /* TODO: vertexid, instanceid support */
3098 iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgvs);
3099 }
3100 }
3101
3102 if (1) {
3103 iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
3104 if (draw->primitive_restart) {
3105 vf.IndexedDrawCutIndexEnable = true;
3106 vf.CutIndex = draw->restart_index;
3107 }
3108 }
3109 }
3110
3111 // XXX: Gen8 - PMA fix
3112
3113 assert(!draw->indirect); // XXX: indirect support
3114
3115 iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
3116 prim.StartInstanceLocation = draw->start_instance;
3117 prim.InstanceCount = draw->instance_count;
3118 prim.VertexCountPerInstance = draw->count;
3119 prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
3120
3121 // XXX: this is probably bonkers.
3122 prim.StartVertexLocation = draw->start;
3123
3124 if (draw->index_size) {
3125 prim.BaseVertexLocation += draw->index_bias;
3126 } else {
3127 prim.StartVertexLocation += draw->index_bias;
3128 }
3129
3130 //prim.BaseVertexLocation = ...;
3131 }
3132
3133 if (!batch->contains_draw) {
3134 iris_restore_context_saved_bos(ice, batch, draw);
3135 batch->contains_draw = true;
3136 }
3137 }
3138
3139 /**
3140 * State module teardown.
3141 */
3142 static void
3143 iris_destroy_state(struct iris_context *ice)
3144 {
3145 iris_free_vertex_buffers(&ice->state.genx->vertex_buffers);
3146
3147 // XXX: unreference resources/surfaces.
3148 for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
3149 pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
3150 }
3151 pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
3152
3153 for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
3154 pipe_resource_reference(&ice->state.sampler_table[stage].res, NULL);
3155 }
3156 free(ice->state.genx);
3157
3158 pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
3159 pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
3160 pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
3161 pipe_resource_reference(&ice->state.last_res.scissor, NULL);
3162 pipe_resource_reference(&ice->state.last_res.blend, NULL);
3163 }
3164
3165 static unsigned
3166 flags_to_post_sync_op(uint32_t flags)
3167 {
3168 if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
3169 return WriteImmediateData;
3170
3171 if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
3172 return WritePSDepthCount;
3173
3174 if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
3175 return WriteTimestamp;
3176
3177 return 0;
3178 }
3179
3180 /**
3181 * Do the given flags have a Post Sync or LRI Post Sync operation?
3182 */
3183 static enum pipe_control_flags
3184 get_post_sync_flags(enum pipe_control_flags flags)
3185 {
3186 flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
3187 PIPE_CONTROL_WRITE_DEPTH_COUNT |
3188 PIPE_CONTROL_WRITE_TIMESTAMP |
3189 PIPE_CONTROL_LRI_POST_SYNC_OP;
3190
3191 /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
3192 * "LRI Post Sync Operation". So more than one bit set would be illegal.
3193 */
3194 assert(util_bitcount(flags) <= 1);
3195
3196 return flags;
3197 }
3198
3199 // XXX: compute support
3200 #define IS_COMPUTE_PIPELINE(batch) (batch->ring != I915_EXEC_RENDER)
3201
3202 /**
3203 * Emit a series of PIPE_CONTROL commands, taking into account any
3204 * workarounds necessary to actually accomplish the caller's request.
3205 *
3206 * Unless otherwise noted, spec quotations in this function come from:
3207 *
3208 * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
3209 * Restrictions for PIPE_CONTROL.
3210 */
3211 static void
3212 iris_emit_raw_pipe_control(struct iris_batch *batch, uint32_t flags,
3213 struct iris_bo *bo, uint32_t offset, uint64_t imm)
3214 {
3215 UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
3216 enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
3217 enum pipe_control_flags non_lri_post_sync_flags =
3218 post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
3219
3220 /* Recursive PIPE_CONTROL workarounds --------------------------------
3221 * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
3222 *
3223 * We do these first because we want to look at the original operation,
3224 * rather than any workarounds we set.
3225 */
3226 if (GEN_GEN == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
3227 /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
3228 * lists several workarounds:
3229 *
3230 * "Project: SKL, KBL, BXT
3231 *
3232 * If the VF Cache Invalidation Enable is set to a 1 in a
3233 * PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
3234 * sets to 0, with the VF Cache Invalidation Enable set to 0
3235 * needs to be sent prior to the PIPE_CONTROL with VF Cache
3236 * Invalidation Enable set to a 1."
3237 */
3238 iris_emit_raw_pipe_control(batch, 0, NULL, 0, 0);
3239 }
3240
3241 if (GEN_GEN == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
3242 /* Project: SKL / Argument: LRI Post Sync Operation [23]
3243 *
3244 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
3245 * programmed prior to programming a PIPECONTROL command with "LRI
3246 * Post Sync Operation" in GPGPU mode of operation (i.e when
3247 * PIPELINE_SELECT command is set to GPGPU mode of operation)."
3248 *
3249 * The same text exists a few rows below for Post Sync Op.
3250 */
3251 iris_emit_raw_pipe_control(batch, PIPE_CONTROL_CS_STALL, bo, offset, imm);
3252 }
3253
3254 if (GEN_GEN == 10 && (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
3255 /* Cannonlake:
3256 * "Before sending a PIPE_CONTROL command with bit 12 set, SW must issue
3257 * another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
3258 * = 0 and Pipe Control Flush Enable (bit 7) = 1"
3259 */
3260 iris_emit_raw_pipe_control(batch, PIPE_CONTROL_FLUSH_ENABLE, bo,
3261 offset, imm);
3262 }
3263
3264 /* "Flush Types" workarounds ---------------------------------------------
3265 * We do these now because they may add post-sync operations or CS stalls.
3266 */
3267
3268 if (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
3269 /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
3270 *
3271 * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
3272 * 'Write PS Depth Count' or 'Write Timestamp'."
3273 */
3274 if (!bo) {
3275 flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
3276 post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
3277 non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
3278 bo = batch->screen->workaround_bo;
3279 }
3280 }
3281
3282 /* #1130 from Gen10 workarounds page:
3283 *
3284 * "Enable Depth Stall on every Post Sync Op if Render target Cache
3285 * Flush is not enabled in same PIPE CONTROL and Enable Pixel score
3286 * board stall if Render target cache flush is enabled."
3287 *
3288 * Applicable to CNL B0 and C0 steppings only.
3289 *
3290 * The wording here is unclear, and this workaround doesn't look anything
3291 * like the internal bug report recommendations, but leave it be for now...
3292 */
3293 if (GEN_GEN == 10) {
3294 if (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) {
3295 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
3296 } else if (flags & non_lri_post_sync_flags) {
3297 flags |= PIPE_CONTROL_DEPTH_STALL;
3298 }
3299 }
3300
3301 if (flags & PIPE_CONTROL_DEPTH_STALL) {
3302 /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
3303 *
3304 * "This bit must be DISABLED for operations other than writing
3305 * PS_DEPTH_COUNT."
3306 *
3307 * This seems like nonsense. An Ivybridge workaround requires us to
3308 * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
3309 * operation. Gen8+ requires us to emit depth stalls and depth cache
3310 * flushes together. So, it's hard to imagine this means anything other
3311 * than "we originally intended this to be used for PS_DEPTH_COUNT".
3312 *
3313 * We ignore the supposed restriction and do nothing.
3314 */
3315 }
3316
3317 if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
3318 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
3319 /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
3320 *
3321 * "This bit must be DISABLED for End-of-pipe (Read) fences,
3322 * PS_DEPTH_COUNT or TIMESTAMP queries."
3323 *
3324 * TODO: Implement end-of-pipe checking.
3325 */
3326 assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
3327 PIPE_CONTROL_WRITE_TIMESTAMP)));
3328 }
3329
3330 if (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) {
3331 /* From the PIPE_CONTROL instruction table, bit 1:
3332 *
3333 * "This bit is ignored if Depth Stall Enable is set.
3334 * Further, the render cache is not flushed even if Write Cache
3335 * Flush Enable bit is set."
3336 *
3337 * We assert that the caller doesn't do this combination, to try and
3338 * prevent mistakes. It shouldn't hurt the GPU, though.
3339 */
3340 assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
3341 PIPE_CONTROL_RENDER_TARGET_FLUSH)));
3342 }
3343
3344 /* PIPE_CONTROL page workarounds ------------------------------------- */
3345
3346 if (GEN_GEN <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
3347 /* From the PIPE_CONTROL page itself:
3348 *
3349 * "IVB, HSW, BDW
3350 * Restriction: Pipe_control with CS-stall bit set must be issued
3351 * before a pipe-control command that has the State Cache
3352 * Invalidate bit set."
3353 */
3354 flags |= PIPE_CONTROL_CS_STALL;
3355 }
3356
3357 if (flags & PIPE_CONTROL_FLUSH_LLC) {
3358 /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
3359 *
3360 * "Project: ALL
3361 * SW must always program Post-Sync Operation to "Write Immediate
3362 * Data" when Flush LLC is set."
3363 *
3364 * For now, we just require the caller to do it.
3365 */
3366 assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
3367 }
3368
3369 /* "Post-Sync Operation" workarounds -------------------------------- */
3370
3371 /* Project: All / Argument: Global Snapshot Count Reset [19]
3372 *
3373 * "This bit must not be exercised on any product.
3374 * Requires stall bit ([20] of DW1) set."
3375 *
3376 * We don't use this, so we just assert that it isn't used. The
3377 * PIPE_CONTROL instruction page indicates that they intended this
3378 * as a debug feature and don't think it is useful in production,
3379 * but it may actually be usable, should we ever want to.
3380 */
3381 assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
3382
3383 if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
3384 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
3385 /* Project: All / Arguments:
3386 *
3387 * - Generic Media State Clear [16]
3388 * - Indirect State Pointers Disable [16]
3389 *
3390 * "Requires stall bit ([20] of DW1) set."
3391 *
3392 * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
3393 * State Clear) says:
3394 *
3395 * "PIPECONTROL command with “Command Streamer Stall Enable” must be
3396 * programmed prior to programming a PIPECONTROL command with "Media
3397 * State Clear" set in GPGPU mode of operation"
3398 *
3399 * This is a subset of the earlier rule, so there's nothing to do.
3400 */
3401 flags |= PIPE_CONTROL_CS_STALL;
3402 }
3403
3404 if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
3405 /* Project: All / Argument: Store Data Index
3406 *
3407 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
3408 * than '0'."
3409 *
3410 * For now, we just assert that the caller does this. We might want to
3411 * automatically add a write to the workaround BO...
3412 */
3413 assert(non_lri_post_sync_flags != 0);
3414 }
3415
3416 if (flags & PIPE_CONTROL_SYNC_GFDT) {
3417 /* Project: All / Argument: Sync GFDT
3418 *
3419 * "Post-Sync Operation ([15:14] of DW1) must be set to something other
3420 * than '0' or 0x2520[13] must be set."
3421 *
3422 * For now, we just assert that the caller does this.
3423 */
3424 assert(non_lri_post_sync_flags != 0);
3425 }
3426
3427 if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
3428 /* Project: IVB+ / Argument: TLB inv
3429 *
3430 * "Requires stall bit ([20] of DW1) set."
3431 *
3432 * Also, from the PIPE_CONTROL instruction table:
3433 *
3434 * "Project: SKL+
3435 * Post Sync Operation or CS stall must be set to ensure a TLB
3436 * invalidation occurs. Otherwise no cycle will occur to the TLB
3437 * cache to invalidate."
3438 *
3439 * This is not a subset of the earlier rule, so there's nothing to do.
3440 */
3441 flags |= PIPE_CONTROL_CS_STALL;
3442 }
3443
3444 if (GEN_GEN == 9 && devinfo->gt == 4) {
3445 /* TODO: The big Skylake GT4 post sync op workaround */
3446 }
3447
3448 /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
3449
3450 if (IS_COMPUTE_PIPELINE(batch)) {
3451 if (GEN_GEN >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
3452 /* Project: SKL+ / Argument: Tex Invalidate
3453 * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
3454 */
3455 flags |= PIPE_CONTROL_CS_STALL;
3456 }
3457
3458 if (GEN_GEN == 8 && (post_sync_flags ||
3459 (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
3460 PIPE_CONTROL_DEPTH_STALL |
3461 PIPE_CONTROL_RENDER_TARGET_FLUSH |
3462 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
3463 PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
3464 /* Project: BDW / Arguments:
3465 *
3466 * - LRI Post Sync Operation [23]
3467 * - Post Sync Op [15:14]
3468 * - Notify En [8]
3469 * - Depth Stall [13]
3470 * - Render Target Cache Flush [12]
3471 * - Depth Cache Flush [0]
3472 * - DC Flush Enable [5]
3473 *
3474 * "Requires stall bit ([20] of DW) set for all GPGPU and Media
3475 * Workloads."
3476 */
3477 flags |= PIPE_CONTROL_CS_STALL;
3478
3479 /* Also, from the PIPE_CONTROL instruction table, bit 20:
3480 *
3481 * "Project: BDW
3482 * This bit must be always set when PIPE_CONTROL command is
3483 * programmed by GPGPU and MEDIA workloads, except for the cases
3484 * when only Read Only Cache Invalidation bits are set (State
3485 * Cache Invalidation Enable, Instruction cache Invalidation
3486 * Enable, Texture Cache Invalidation Enable, Constant Cache
3487 * Invalidation Enable). This is to WA FFDOP CG issue, this WA
3488 * need not implemented when FF_DOP_CG is disable via "Fixed
3489 * Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
3490 *
3491 * It sounds like we could avoid CS stalls in some cases, but we
3492 * don't currently bother. This list isn't exactly the list above,
3493 * either...
3494 */
3495 }
3496 }
3497
3498 /* "Stall" workarounds ----------------------------------------------
3499 * These have to come after the earlier ones because we may have added
3500 * some additional CS stalls above.
3501 */
3502
3503 if (GEN_GEN < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
3504 /* Project: PRE-SKL, VLV, CHV
3505 *
3506 * "[All Stepping][All SKUs]:
3507 *
3508 * One of the following must also be set:
3509 *
3510 * - Render Target Cache Flush Enable ([12] of DW1)
3511 * - Depth Cache Flush Enable ([0] of DW1)
3512 * - Stall at Pixel Scoreboard ([1] of DW1)
3513 * - Depth Stall ([13] of DW1)
3514 * - Post-Sync Operation ([13] of DW1)
3515 * - DC Flush Enable ([5] of DW1)"
3516 *
3517 * If we don't already have one of those bits set, we choose to add
3518 * "Stall at Pixel Scoreboard". Some of the other bits require a
3519 * CS stall as a workaround (see above), which would send us into
3520 * an infinite recursion of PIPE_CONTROLs. "Stall at Pixel Scoreboard"
3521 * appears to be safe, so we choose that.
3522 */
3523 const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
3524 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
3525 PIPE_CONTROL_WRITE_IMMEDIATE |
3526 PIPE_CONTROL_WRITE_DEPTH_COUNT |
3527 PIPE_CONTROL_WRITE_TIMESTAMP |
3528 PIPE_CONTROL_STALL_AT_SCOREBOARD |
3529 PIPE_CONTROL_DEPTH_STALL |
3530 PIPE_CONTROL_DATA_CACHE_FLUSH;
3531 if (!(flags & wa_bits))
3532 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
3533 }
3534
3535 /* Emit --------------------------------------------------------------- */
3536
3537 iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
3538 pc.LRIPostSyncOperation = NoLRIOperation;
3539 pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
3540 pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
3541 pc.StoreDataIndex = 0;
3542 pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
3543 pc.GlobalSnapshotCountReset =
3544 flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
3545 pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
3546 pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
3547 pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
3548 pc.RenderTargetCacheFlushEnable =
3549 flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
3550 pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
3551 pc.StateCacheInvalidationEnable =
3552 flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
3553 pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
3554 pc.ConstantCacheInvalidationEnable =
3555 flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
3556 pc.PostSyncOperation = flags_to_post_sync_op(flags);
3557 pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
3558 pc.InstructionCacheInvalidateEnable =
3559 flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
3560 pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
3561 pc.IndirectStatePointersDisable =
3562 flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
3563 pc.TextureCacheInvalidationEnable =
3564 flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
3565 pc.Address = ro_bo(bo, offset);
3566 pc.ImmediateData = imm;
3567 }
3568 }
3569
3570 void
3571 genX(init_state)(struct iris_context *ice)
3572 {
3573 struct pipe_context *ctx = &ice->ctx;
3574 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3575
3576 ctx->create_blend_state = iris_create_blend_state;
3577 ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
3578 ctx->create_rasterizer_state = iris_create_rasterizer_state;
3579 ctx->create_sampler_state = iris_create_sampler_state;
3580 ctx->create_sampler_view = iris_create_sampler_view;
3581 ctx->create_surface = iris_create_surface;
3582 ctx->create_vertex_elements_state = iris_create_vertex_elements;
3583 ctx->create_compute_state = iris_create_compute_state;
3584 ctx->bind_blend_state = iris_bind_blend_state;
3585 ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
3586 ctx->bind_sampler_states = iris_bind_sampler_states;
3587 ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
3588 ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
3589 ctx->bind_compute_state = iris_bind_compute_state;
3590 ctx->delete_blend_state = iris_delete_state;
3591 ctx->delete_depth_stencil_alpha_state = iris_delete_state;
3592 ctx->delete_fs_state = iris_delete_state;
3593 ctx->delete_rasterizer_state = iris_delete_state;
3594 ctx->delete_sampler_state = iris_delete_state;
3595 ctx->delete_vertex_elements_state = iris_delete_state;
3596 ctx->delete_compute_state = iris_delete_state;
3597 ctx->delete_tcs_state = iris_delete_state;
3598 ctx->delete_tes_state = iris_delete_state;
3599 ctx->delete_gs_state = iris_delete_state;
3600 ctx->delete_vs_state = iris_delete_state;
3601 ctx->set_blend_color = iris_set_blend_color;
3602 ctx->set_clip_state = iris_set_clip_state;
3603 ctx->set_constant_buffer = iris_set_constant_buffer;
3604 ctx->set_sampler_views = iris_set_sampler_views;
3605 ctx->set_framebuffer_state = iris_set_framebuffer_state;
3606 ctx->set_polygon_stipple = iris_set_polygon_stipple;
3607 ctx->set_sample_mask = iris_set_sample_mask;
3608 ctx->set_scissor_states = iris_set_scissor_states;
3609 ctx->set_stencil_ref = iris_set_stencil_ref;
3610 ctx->set_vertex_buffers = iris_set_vertex_buffers;
3611 ctx->set_viewport_states = iris_set_viewport_states;
3612 ctx->sampler_view_destroy = iris_sampler_view_destroy;
3613 ctx->surface_destroy = iris_surface_destroy;
3614 ctx->draw_vbo = iris_draw_vbo;
3615 ctx->launch_grid = iris_launch_grid;
3616 ctx->create_stream_output_target = iris_create_stream_output_target;
3617 ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
3618 ctx->set_stream_output_targets = iris_set_stream_output_targets;
3619
3620 ice->vtbl.destroy_state = iris_destroy_state;
3621 ice->vtbl.init_render_context = iris_init_render_context;
3622 ice->vtbl.upload_render_state = iris_upload_render_state;
3623 ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
3624 ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
3625 ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
3626 ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
3627 ice->vtbl.populate_vs_key = iris_populate_vs_key;
3628 ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
3629 ice->vtbl.populate_tes_key = iris_populate_tes_key;
3630 ice->vtbl.populate_gs_key = iris_populate_gs_key;
3631 ice->vtbl.populate_fs_key = iris_populate_fs_key;
3632
3633 ice->state.dirty = ~0ull;
3634
3635 ice->state.num_viewports = 1;
3636 ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
3637
3638 /* Make a 1x1x1 null surface for unbound textures */
3639 void *null_surf_map =
3640 upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
3641 4 * GENX(RENDER_SURFACE_STATE_length), 64);
3642 isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
3643 }