iris: Set XY Clipping correctly.
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "intel/compiler/brw_compiler.h"
42 #include "intel/compiler/brw_nir.h"
43 #include "iris_context.h"
44 #include "nir/tgsi_to_nir.h"
45
46 #define KEY_INIT_NO_ID(gen) \
47 .tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
48 .tex.compressed_multisample_layout_mask = ~0, \
49 .tex.msaa_16 = (gen >= 9 ? ~0 : 0)
50 #define KEY_INIT(gen) .program_string_id = ish->program_id, KEY_INIT_NO_ID(gen)
51
52 static unsigned
53 get_new_program_id(struct iris_screen *screen)
54 {
55 return p_atomic_inc_return(&screen->program_id);
56 }
57
58 /**
59 * An uncompiled, API-facing shader. This is the Gallium CSO for shaders.
60 * It primarily contains the NIR for the shader.
61 *
62 * Each API-facing shader can be compiled into multiple shader variants,
63 * based on non-orthogonal state dependencies, recorded in the shader key.
64 *
65 * See iris_compiled_shader, which represents a compiled shader variant.
66 */
67 struct iris_uncompiled_shader {
68 nir_shader *nir;
69
70 struct pipe_stream_output_info stream_output;
71
72 unsigned program_id;
73
74 /** Bitfield of (1 << IRIS_NOS_*) flags. */
75 unsigned nos;
76
77 /** Have any shader variants been compiled yet? */
78 bool compiled_once;
79 };
80
81 static nir_ssa_def *
82 get_aoa_deref_offset(nir_builder *b,
83 nir_deref_instr *deref,
84 unsigned elem_size)
85 {
86 unsigned array_size = elem_size;
87 nir_ssa_def *offset = nir_imm_int(b, 0);
88
89 while (deref->deref_type != nir_deref_type_var) {
90 assert(deref->deref_type == nir_deref_type_array);
91
92 /* This level's element size is the previous level's array size */
93 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
94 assert(deref->arr.index.ssa);
95 offset = nir_iadd(b, offset,
96 nir_imul(b, index, nir_imm_int(b, array_size)));
97
98 deref = nir_deref_instr_parent(deref);
99 assert(glsl_type_is_array(deref->type));
100 array_size *= glsl_get_length(deref->type);
101 }
102
103 /* Accessing an invalid surface index with the dataport can result in a
104 * hang. According to the spec "if the index used to select an individual
105 * element is negative or greater than or equal to the size of the array,
106 * the results of the operation are undefined but may not lead to
107 * termination" -- which is one of the possible outcomes of the hang.
108 * Clamp the index to prevent access outside of the array bounds.
109 */
110 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
111 }
112
113 static void
114 iris_lower_storage_image_derefs(nir_shader *nir)
115 {
116 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
117
118 nir_builder b;
119 nir_builder_init(&b, impl);
120
121 nir_foreach_block(block, impl) {
122 nir_foreach_instr_safe(instr, block) {
123 if (instr->type != nir_instr_type_intrinsic)
124 continue;
125
126 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
127 switch (intrin->intrinsic) {
128 case nir_intrinsic_image_deref_load:
129 case nir_intrinsic_image_deref_store:
130 case nir_intrinsic_image_deref_atomic_add:
131 case nir_intrinsic_image_deref_atomic_min:
132 case nir_intrinsic_image_deref_atomic_max:
133 case nir_intrinsic_image_deref_atomic_and:
134 case nir_intrinsic_image_deref_atomic_or:
135 case nir_intrinsic_image_deref_atomic_xor:
136 case nir_intrinsic_image_deref_atomic_exchange:
137 case nir_intrinsic_image_deref_atomic_comp_swap:
138 case nir_intrinsic_image_deref_size:
139 case nir_intrinsic_image_deref_samples:
140 case nir_intrinsic_image_deref_load_raw_intel:
141 case nir_intrinsic_image_deref_store_raw_intel: {
142 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
143 nir_variable *var = nir_deref_instr_get_variable(deref);
144
145 b.cursor = nir_before_instr(&intrin->instr);
146 nir_ssa_def *index =
147 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
148 get_aoa_deref_offset(&b, deref, 1));
149 nir_rewrite_image_intrinsic(intrin, index, false);
150 break;
151 }
152
153 default:
154 break;
155 }
156 }
157 }
158 }
159
160 // XXX: need unify_interfaces() at link time...
161
162 /**
163 * Fix an uncompiled shader's stream output info.
164 *
165 * Core Gallium stores output->register_index as a "slot" number, where
166 * slots are assigned consecutively to all outputs in info->outputs_written.
167 * This naive packing of outputs doesn't work for us - we too have slots,
168 * but the layout is defined by the VUE map, which we won't have until we
169 * compile a specific shader variant. So, we remap these and simply store
170 * VARYING_SLOT_* in our copy's output->register_index fields.
171 *
172 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
173 * components of our VUE header. See brw_vue_map.c for the layout.
174 */
175 static void
176 update_so_info(struct pipe_stream_output_info *so_info,
177 uint64_t outputs_written)
178 {
179 uint8_t reverse_map[64] = {};
180 unsigned slot = 0;
181 while (outputs_written) {
182 reverse_map[slot++] = u_bit_scan64(&outputs_written);
183 }
184
185 for (unsigned i = 0; i < so_info->num_outputs; i++) {
186 struct pipe_stream_output *output = &so_info->output[i];
187
188 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
189 output->register_index = reverse_map[output->register_index];
190
191 /* The VUE header contains three scalar fields packed together:
192 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
193 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
194 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
195 */
196 switch (output->register_index) {
197 case VARYING_SLOT_LAYER:
198 assert(output->num_components == 1);
199 output->register_index = VARYING_SLOT_PSIZ;
200 output->start_component = 1;
201 break;
202 case VARYING_SLOT_VIEWPORT:
203 assert(output->num_components == 1);
204 output->register_index = VARYING_SLOT_PSIZ;
205 output->start_component = 2;
206 break;
207 case VARYING_SLOT_PSIZ:
208 assert(output->num_components == 1);
209 output->start_component = 3;
210 break;
211 }
212
213 //info->outputs_written |= 1ull << output->register_index;
214 }
215 }
216
217 /**
218 * Sets up the starting offsets for the groups of binding table entries
219 * common to all pipeline stages.
220 *
221 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
222 * unused but also make sure that addition of small offsets to them will
223 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
224 */
225 static uint32_t
226 assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
227 const struct nir_shader *nir,
228 struct brw_stage_prog_data *prog_data,
229 uint32_t next_binding_table_offset,
230 unsigned num_system_values,
231 unsigned num_cbufs)
232 {
233 const struct shader_info *info = &nir->info;
234
235 unsigned num_textures = util_last_bit(info->textures_used);
236
237 if (num_textures) {
238 prog_data->binding_table.texture_start = next_binding_table_offset;
239 prog_data->binding_table.gather_texture_start = next_binding_table_offset;
240 next_binding_table_offset += num_textures;
241 } else {
242 prog_data->binding_table.texture_start = 0xd0d0d0d0;
243 prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
244 }
245
246 if (info->num_images) {
247 prog_data->binding_table.image_start = next_binding_table_offset;
248 next_binding_table_offset += info->num_images;
249 } else {
250 prog_data->binding_table.image_start = 0xd0d0d0d0;
251 }
252
253 if (num_cbufs) {
254 //assert(info->num_ubos <= BRW_MAX_UBO);
255 prog_data->binding_table.ubo_start = next_binding_table_offset;
256 next_binding_table_offset += num_cbufs;
257 } else {
258 prog_data->binding_table.ubo_start = 0xd0d0d0d0;
259 }
260
261 if (info->num_ssbos || info->num_abos) {
262 prog_data->binding_table.ssbo_start = next_binding_table_offset;
263 // XXX: see iris_state "wasting 16 binding table slots for ABOs" comment
264 next_binding_table_offset += IRIS_MAX_ABOS + info->num_ssbos;
265 } else {
266 prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
267 }
268
269 prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
270
271 /* Plane 0 is just the regular texture section */
272 prog_data->binding_table.plane_start[0] = prog_data->binding_table.texture_start;
273
274 prog_data->binding_table.plane_start[1] = next_binding_table_offset;
275 next_binding_table_offset += num_textures;
276
277 prog_data->binding_table.plane_start[2] = next_binding_table_offset;
278 next_binding_table_offset += num_textures;
279
280 /* Set the binding table size */
281 prog_data->binding_table.size_bytes = next_binding_table_offset * 4;
282
283 return next_binding_table_offset;
284 }
285
286 static void
287 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
288 unsigned offset, unsigned n)
289 {
290 assert(offset % sizeof(uint32_t) == 0);
291
292 for (unsigned i = 0; i < n; ++i)
293 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
294
295 for (unsigned i = n; i < 4; ++i)
296 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
297 }
298
299 /**
300 * Associate NIR uniform variables with the prog_data->param[] mechanism
301 * used by the backend. Also, decide which UBOs we'd like to push in an
302 * ideal situation (though the backend can reduce this).
303 */
304 static void
305 iris_setup_uniforms(const struct brw_compiler *compiler,
306 void *mem_ctx,
307 nir_shader *nir,
308 struct brw_stage_prog_data *prog_data,
309 enum brw_param_builtin **out_system_values,
310 unsigned *out_num_system_values,
311 unsigned *out_num_cbufs)
312 {
313 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
314
315 /* The intel compiler assumes that num_uniforms is in bytes. For
316 * scalar that means 4 bytes per uniform slot.
317 *
318 * Ref: brw_nir_lower_uniforms, type_size_scalar_bytes.
319 */
320 nir->num_uniforms *= 4;
321
322 const unsigned IRIS_MAX_SYSTEM_VALUES =
323 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
324 enum brw_param_builtin *system_values =
325 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
326 unsigned num_system_values = 0;
327
328 unsigned patch_vert_idx = -1;
329 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
330 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
331 memset(ucp_idx, -1, sizeof(ucp_idx));
332 memset(img_idx, -1, sizeof(img_idx));
333
334 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
335
336 nir_builder b;
337 nir_builder_init(&b, impl);
338
339 b.cursor = nir_before_block(nir_start_block(impl));
340 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
341
342 /* Turn system value intrinsics into uniforms */
343 nir_foreach_block(block, impl) {
344 nir_foreach_instr_safe(instr, block) {
345 if (instr->type != nir_instr_type_intrinsic)
346 continue;
347
348 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
349 nir_ssa_def *offset;
350
351 switch (intrin->intrinsic) {
352 case nir_intrinsic_load_user_clip_plane: {
353 unsigned ucp = nir_intrinsic_ucp_id(intrin);
354
355 if (ucp_idx[ucp] == -1) {
356 ucp_idx[ucp] = num_system_values;
357 num_system_values += 4;
358 }
359
360 for (int i = 0; i < 4; i++) {
361 system_values[ucp_idx[ucp] + i] =
362 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
363 }
364
365 b.cursor = nir_before_instr(instr);
366 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
367 break;
368 }
369 case nir_intrinsic_load_patch_vertices_in:
370 if (patch_vert_idx == -1)
371 patch_vert_idx = num_system_values++;
372
373 system_values[patch_vert_idx] =
374 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
375
376 b.cursor = nir_before_instr(instr);
377 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
378 break;
379 case nir_intrinsic_image_deref_load_param_intel: {
380 assert(devinfo->gen < 9);
381 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
382 nir_variable *var = nir_deref_instr_get_variable(deref);
383
384 /* XXX: var->data.binding is not set properly. We need to run
385 * some form of gl_nir_lower_samplers_as_deref() to get it.
386 * This breaks tests which use more than one image.
387 */
388 if (img_idx[var->data.binding] == -1) {
389 /* GL only allows arrays of arrays of images. */
390 assert(glsl_type_is_image(glsl_without_array(var->type)));
391 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
392
393 for (int i = 0; i < num_images; i++) {
394 const unsigned img = var->data.binding + i;
395
396 img_idx[img] = num_system_values;
397 num_system_values += BRW_IMAGE_PARAM_SIZE;
398
399 uint32_t *img_sv = &system_values[img_idx[img]];
400
401 setup_vec4_image_sysval(
402 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
403 offsetof(struct brw_image_param, offset), 2);
404 setup_vec4_image_sysval(
405 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
406 offsetof(struct brw_image_param, size), 3);
407 setup_vec4_image_sysval(
408 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
409 offsetof(struct brw_image_param, stride), 4);
410 setup_vec4_image_sysval(
411 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
412 offsetof(struct brw_image_param, tiling), 3);
413 setup_vec4_image_sysval(
414 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
415 offsetof(struct brw_image_param, swizzling), 2);
416 }
417 }
418
419 b.cursor = nir_before_instr(instr);
420 offset = nir_iadd(&b,
421 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
422 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
423 nir_intrinsic_base(intrin) * 16));
424 break;
425 }
426 default:
427 continue;
428 }
429
430 unsigned comps = nir_intrinsic_dest_components(intrin);
431
432 nir_intrinsic_instr *load =
433 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
434 load->num_components = comps;
435 load->src[0] = nir_src_for_ssa(temp_ubo_name);
436 load->src[1] = nir_src_for_ssa(offset);
437 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
438 nir_builder_instr_insert(&b, &load->instr);
439 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
440 nir_src_for_ssa(&load->dest.ssa));
441 nir_instr_remove(instr);
442 }
443 }
444
445 nir_validate_shader(nir, "before remapping");
446
447 /* Place the new params at the front of constant buffer 0. */
448 if (num_system_values > 0) {
449 nir->num_uniforms += num_system_values * sizeof(uint32_t);
450
451 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
452 num_system_values);
453
454 nir_foreach_block(block, impl) {
455 nir_foreach_instr_safe(instr, block) {
456 if (instr->type != nir_instr_type_intrinsic)
457 continue;
458
459 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
460
461 if (load->intrinsic != nir_intrinsic_load_ubo)
462 continue;
463
464 b.cursor = nir_before_instr(instr);
465
466 assert(load->src[0].is_ssa);
467
468 if (load->src[0].ssa == temp_ubo_name) {
469 nir_instr_rewrite_src(instr, &load->src[0],
470 nir_src_for_ssa(nir_imm_int(&b, 0)));
471 } else if (nir_src_as_uint(load->src[0]) == 0) {
472 nir_ssa_def *offset =
473 nir_iadd(&b, load->src[1].ssa,
474 nir_imm_int(&b, 4 * num_system_values));
475 nir_instr_rewrite_src(instr, &load->src[1],
476 nir_src_for_ssa(offset));
477 }
478 }
479 }
480
481 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
482 nir_opt_constant_folding(nir);
483 } else {
484 ralloc_free(system_values);
485 system_values = NULL;
486 }
487
488 nir_validate_shader(nir, "after remap");
489
490 if (nir->info.stage != MESA_SHADER_COMPUTE)
491 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
492
493 /* We don't use params[], but fs_visitor::nir_setup_uniforms() asserts
494 * about it for compute shaders, so go ahead and make some fake ones
495 * which the backend will dead code eliminate.
496 */
497 prog_data->nr_params = nir->num_uniforms / 4;
498 prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
499
500 /* System values and uniforms are stored in constant buffer 0, the
501 * user-facing UBOs are indexed by one. So if any constant buffer is
502 * needed, the constant buffer 0 will be needed, so account for it.
503 */
504 unsigned num_cbufs = nir->info.num_ubos;
505 if (num_cbufs || num_system_values || nir->num_uniforms)
506 num_cbufs++;
507
508 *out_system_values = system_values;
509 *out_num_system_values = num_system_values;
510 *out_num_cbufs = num_cbufs;
511 }
512
513 static void
514 iris_debug_recompile(struct iris_context *ice,
515 struct shader_info *info,
516 unsigned program_string_id,
517 const void *key)
518 {
519 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
520 const struct brw_compiler *c = screen->compiler;
521
522 if (!info)
523 return;
524
525 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
526 _mesa_shader_stage_to_string(info->stage),
527 info->name ? info->name : "(no identifier)",
528 info->label ? info->label : "");
529
530 const void *old_key =
531 iris_find_previous_compile(ice, info->stage, program_string_id);
532
533 brw_debug_key_recompile(c, &ice->dbg, info->stage, old_key, key);
534 }
535
536
537 /**
538 * Compile a vertex shader, and upload the assembly.
539 */
540 static struct iris_compiled_shader *
541 iris_compile_vs(struct iris_context *ice,
542 struct iris_uncompiled_shader *ish,
543 const struct brw_vs_prog_key *key)
544 {
545 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
546 const struct brw_compiler *compiler = screen->compiler;
547 const struct gen_device_info *devinfo = &screen->devinfo;
548 void *mem_ctx = ralloc_context(NULL);
549 struct brw_vs_prog_data *vs_prog_data =
550 rzalloc(mem_ctx, struct brw_vs_prog_data);
551 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
552 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
553 enum brw_param_builtin *system_values;
554 unsigned num_system_values;
555 unsigned num_cbufs;
556
557 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
558
559 if (key->nr_userclip_plane_consts) {
560 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
561 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true);
562 nir_lower_io_to_temporaries(nir, impl, true, false);
563 nir_lower_global_vars_to_local(nir);
564 nir_lower_vars_to_ssa(nir);
565 nir_shader_gather_info(nir, impl);
566 }
567
568 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
569 prog_data->use_alt_mode = true;
570
571 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
572 &num_system_values, &num_cbufs);
573
574 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
575 num_system_values, num_cbufs);
576
577 brw_compute_vue_map(devinfo,
578 &vue_prog_data->vue_map, nir->info.outputs_written,
579 nir->info.separate_shader);
580
581 /* Don't tell the backend about our clip plane constants, we've already
582 * lowered them in NIR and we don't want it doing it again.
583 */
584 struct brw_vs_prog_key key_no_ucp = *key;
585 key_no_ucp.nr_userclip_plane_consts = 0;
586
587 char *error_str = NULL;
588 const unsigned *program =
589 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &key_no_ucp, vs_prog_data,
590 nir, -1, &error_str);
591 if (program == NULL) {
592 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
593 ralloc_free(mem_ctx);
594 return false;
595 }
596
597 if (ish->compiled_once) {
598 iris_debug_recompile(ice, &nir->info, key->program_string_id, key);
599 } else {
600 ish->compiled_once = true;
601 }
602
603 uint32_t *so_decls =
604 ice->vtbl.create_so_decl_list(&ish->stream_output,
605 &vue_prog_data->vue_map);
606
607 struct iris_compiled_shader *shader =
608 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
609 prog_data, so_decls, system_values, num_system_values,
610 num_cbufs);
611
612 ralloc_free(mem_ctx);
613 return shader;
614 }
615
616 /**
617 * Update the current vertex shader variant.
618 *
619 * Fill out the key, look in the cache, compile and bind if needed.
620 */
621 static void
622 iris_update_compiled_vs(struct iris_context *ice)
623 {
624 struct iris_uncompiled_shader *ish =
625 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
626 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
627 const struct gen_device_info *devinfo = &screen->devinfo;
628
629 struct brw_vs_prog_key key = { KEY_INIT(devinfo->gen) };
630 ice->vtbl.populate_vs_key(ice, &ish->nir->info, &key);
631
632 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
633 struct iris_compiled_shader *shader =
634 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
635
636 if (!shader)
637 shader = iris_compile_vs(ice, ish, &key);
638
639 if (old != shader) {
640 ice->shaders.prog[IRIS_CACHE_VS] = shader;
641 ice->state.dirty |= IRIS_DIRTY_VS |
642 IRIS_DIRTY_BINDINGS_VS |
643 IRIS_DIRTY_CONSTANTS_VS |
644 IRIS_DIRTY_VF_SGVS;
645 const struct brw_vs_prog_data *vs_prog_data =
646 (void *) shader->prog_data;
647 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
648 vs_prog_data->uses_baseinstance;
649 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
650 vs_prog_data->uses_is_indexed_draw;
651 const bool needs_sgvs_element = uses_draw_params ||
652 vs_prog_data->uses_instanceid ||
653 vs_prog_data->uses_vertexid;
654 bool needs_edge_flag = false;
655 nir_foreach_variable(var, &ish->nir->inputs) {
656 if (var->data.location == VERT_ATTRIB_EDGEFLAG)
657 needs_edge_flag = true;
658 }
659
660 if (ice->state.vs_uses_draw_params != uses_draw_params ||
661 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
662 ice->state.vs_needs_edge_flag != needs_edge_flag) {
663 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
664 IRIS_DIRTY_VERTEX_ELEMENTS;
665 }
666 ice->state.vs_uses_draw_params = uses_draw_params;
667 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
668 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
669 ice->state.vs_needs_edge_flag = needs_edge_flag;
670 }
671 }
672
673 /**
674 * Get the shader_info for a given stage, or NULL if the stage is disabled.
675 */
676 const struct shader_info *
677 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
678 {
679 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
680
681 if (!ish)
682 return NULL;
683
684 const nir_shader *nir = ish->nir;
685 return &nir->info;
686 }
687
688 /**
689 * Get the union of TCS output and TES input slots.
690 *
691 * TCS and TES need to agree on a common URB entry layout. In particular,
692 * the data for all patch vertices is stored in a single URB entry (unlike
693 * GS which has one entry per input vertex). This means that per-vertex
694 * array indexing needs a stride.
695 *
696 * SSO requires locations to match, but doesn't require the number of
697 * outputs/inputs to match (in fact, the TCS often has extra outputs).
698 * So, we need to take the extra step of unifying these on the fly.
699 */
700 static void
701 get_unified_tess_slots(const struct iris_context *ice,
702 uint64_t *per_vertex_slots,
703 uint32_t *per_patch_slots)
704 {
705 const struct shader_info *tcs =
706 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
707 const struct shader_info *tes =
708 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
709
710 *per_vertex_slots = tes->inputs_read;
711 *per_patch_slots = tes->patch_inputs_read;
712
713 if (tcs) {
714 *per_vertex_slots |= tcs->outputs_written;
715 *per_patch_slots |= tcs->patch_outputs_written;
716 }
717 }
718
719 /**
720 * Compile a tessellation control shader, and upload the assembly.
721 */
722 static struct iris_compiled_shader *
723 iris_compile_tcs(struct iris_context *ice,
724 struct iris_uncompiled_shader *ish,
725 const struct brw_tcs_prog_key *key)
726 {
727 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
728 const struct brw_compiler *compiler = screen->compiler;
729 const struct nir_shader_compiler_options *options =
730 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
731 const struct gen_device_info *devinfo = &screen->devinfo;
732 void *mem_ctx = ralloc_context(NULL);
733 struct brw_tcs_prog_data *tcs_prog_data =
734 rzalloc(mem_ctx, struct brw_tcs_prog_data);
735 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
736 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
737 enum brw_param_builtin *system_values = NULL;
738 unsigned num_system_values = 0;
739 unsigned num_cbufs = 0;
740
741 nir_shader *nir;
742
743 if (ish) {
744 nir = nir_shader_clone(mem_ctx, ish->nir);
745
746 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
747 &num_system_values, &num_cbufs);
748 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
749 num_system_values, num_cbufs);
750 } else {
751 nir = brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, key);
752
753 /* Reserve space for passing the default tess levels as constants. */
754 num_system_values = 8;
755 system_values =
756 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
757 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
758 prog_data->nr_params = num_system_values;
759
760 if (key->tes_primitive_mode == GL_QUADS) {
761 for (int i = 0; i < 4; i++)
762 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
763
764 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
765 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
766 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
767 for (int i = 0; i < 3; i++)
768 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
769
770 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
771 } else {
772 assert(key->tes_primitive_mode == GL_ISOLINES);
773 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
774 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
775 }
776
777 prog_data->ubo_ranges[0].length = 1;
778 }
779
780 char *error_str = NULL;
781 const unsigned *program =
782 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, key, tcs_prog_data, nir,
783 -1, &error_str);
784 if (program == NULL) {
785 dbg_printf("Failed to compile control shader: %s\n", error_str);
786 ralloc_free(mem_ctx);
787 return false;
788 }
789
790 if (ish) {
791 if (ish->compiled_once) {
792 iris_debug_recompile(ice, &nir->info, key->program_string_id, key);
793 } else {
794 ish->compiled_once = true;
795 }
796 }
797
798 struct iris_compiled_shader *shader =
799 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
800 prog_data, NULL, system_values, num_system_values,
801 num_cbufs);
802
803 ralloc_free(mem_ctx);
804 return shader;
805 }
806
807 /**
808 * Update the current tessellation control shader variant.
809 *
810 * Fill out the key, look in the cache, compile and bind if needed.
811 */
812 static void
813 iris_update_compiled_tcs(struct iris_context *ice)
814 {
815 struct iris_uncompiled_shader *tcs =
816 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
817 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
818 const struct gen_device_info *devinfo = &screen->devinfo;
819
820 const struct shader_info *tes_info =
821 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
822 struct brw_tcs_prog_key key = {
823 KEY_INIT_NO_ID(devinfo->gen),
824 .program_string_id = tcs ? tcs->program_id : 0,
825 .tes_primitive_mode = tes_info->tess.primitive_mode,
826 .input_vertices = ice->state.vertices_per_patch,
827 };
828 get_unified_tess_slots(ice, &key.outputs_written,
829 &key.patch_outputs_written);
830 ice->vtbl.populate_tcs_key(ice, &key);
831
832 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
833 struct iris_compiled_shader *shader =
834 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
835
836 if (!shader)
837 shader = iris_compile_tcs(ice, tcs, &key);
838
839 if (old != shader) {
840 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
841 ice->state.dirty |= IRIS_DIRTY_TCS |
842 IRIS_DIRTY_BINDINGS_TCS |
843 IRIS_DIRTY_CONSTANTS_TCS;
844 }
845 }
846
847 /**
848 * Compile a tessellation evaluation shader, and upload the assembly.
849 */
850 static struct iris_compiled_shader *
851 iris_compile_tes(struct iris_context *ice,
852 struct iris_uncompiled_shader *ish,
853 const struct brw_tes_prog_key *key)
854 {
855 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
856 const struct brw_compiler *compiler = screen->compiler;
857 const struct gen_device_info *devinfo = &screen->devinfo;
858 void *mem_ctx = ralloc_context(NULL);
859 struct brw_tes_prog_data *tes_prog_data =
860 rzalloc(mem_ctx, struct brw_tes_prog_data);
861 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
862 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
863 enum brw_param_builtin *system_values;
864 unsigned num_system_values;
865 unsigned num_cbufs;
866
867 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
868
869 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
870 &num_system_values, &num_cbufs);
871
872 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
873 num_system_values, num_cbufs);
874
875 struct brw_vue_map input_vue_map;
876 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
877 key->patch_inputs_read);
878
879 char *error_str = NULL;
880 const unsigned *program =
881 brw_compile_tes(compiler, &ice->dbg, mem_ctx, key, &input_vue_map,
882 tes_prog_data, nir, NULL, -1, &error_str);
883 if (program == NULL) {
884 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
885 ralloc_free(mem_ctx);
886 return false;
887 }
888
889 if (ish->compiled_once) {
890 iris_debug_recompile(ice, &nir->info, key->program_string_id, key);
891 } else {
892 ish->compiled_once = true;
893 }
894
895 uint32_t *so_decls =
896 ice->vtbl.create_so_decl_list(&ish->stream_output,
897 &vue_prog_data->vue_map);
898
899
900 struct iris_compiled_shader *shader =
901 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
902 prog_data, so_decls, system_values, num_system_values,
903 num_cbufs);
904
905 ralloc_free(mem_ctx);
906 return shader;
907 }
908
909 /**
910 * Update the current tessellation evaluation shader variant.
911 *
912 * Fill out the key, look in the cache, compile and bind if needed.
913 */
914 static void
915 iris_update_compiled_tes(struct iris_context *ice)
916 {
917 struct iris_uncompiled_shader *ish =
918 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
919 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
920 const struct gen_device_info *devinfo = &screen->devinfo;
921
922 struct brw_tes_prog_key key = { KEY_INIT(devinfo->gen) };
923 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
924 ice->vtbl.populate_tes_key(ice, &key);
925
926 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
927 struct iris_compiled_shader *shader =
928 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
929
930 if (!shader)
931 shader = iris_compile_tes(ice, ish, &key);
932
933 if (old != shader) {
934 ice->shaders.prog[IRIS_CACHE_TES] = shader;
935 ice->state.dirty |= IRIS_DIRTY_TES |
936 IRIS_DIRTY_BINDINGS_TES |
937 IRIS_DIRTY_CONSTANTS_TES;
938 }
939
940 /* TODO: Could compare and avoid flagging this. */
941 const struct shader_info *tes_info = &ish->nir->info;
942 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
943 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
944 ice->state.shaders[MESA_SHADER_TESS_EVAL].cbuf0_needs_upload = true;
945 }
946 }
947
948 /**
949 * Compile a geometry shader, and upload the assembly.
950 */
951 static struct iris_compiled_shader *
952 iris_compile_gs(struct iris_context *ice,
953 struct iris_uncompiled_shader *ish,
954 const struct brw_gs_prog_key *key)
955 {
956 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
957 const struct brw_compiler *compiler = screen->compiler;
958 const struct gen_device_info *devinfo = &screen->devinfo;
959 void *mem_ctx = ralloc_context(NULL);
960 struct brw_gs_prog_data *gs_prog_data =
961 rzalloc(mem_ctx, struct brw_gs_prog_data);
962 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
963 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
964 enum brw_param_builtin *system_values;
965 unsigned num_system_values;
966 unsigned num_cbufs;
967
968 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
969
970 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
971 &num_system_values, &num_cbufs);
972
973 assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
974 num_system_values, num_cbufs);
975
976 brw_compute_vue_map(devinfo,
977 &vue_prog_data->vue_map, nir->info.outputs_written,
978 nir->info.separate_shader);
979
980 char *error_str = NULL;
981 const unsigned *program =
982 brw_compile_gs(compiler, &ice->dbg, mem_ctx, key, gs_prog_data, nir,
983 NULL, -1, &error_str);
984 if (program == NULL) {
985 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
986 ralloc_free(mem_ctx);
987 return false;
988 }
989
990 if (ish->compiled_once) {
991 iris_debug_recompile(ice, &nir->info, key->program_string_id, key);
992 } else {
993 ish->compiled_once = true;
994 }
995
996 uint32_t *so_decls =
997 ice->vtbl.create_so_decl_list(&ish->stream_output,
998 &vue_prog_data->vue_map);
999
1000 struct iris_compiled_shader *shader =
1001 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1002 prog_data, so_decls, system_values, num_system_values,
1003 num_cbufs);
1004
1005 ralloc_free(mem_ctx);
1006 return shader;
1007 }
1008
1009 /**
1010 * Update the current geometry shader variant.
1011 *
1012 * Fill out the key, look in the cache, compile and bind if needed.
1013 */
1014 static void
1015 iris_update_compiled_gs(struct iris_context *ice)
1016 {
1017 struct iris_uncompiled_shader *ish =
1018 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1019 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1020 struct iris_compiled_shader *shader = NULL;
1021
1022 if (ish) {
1023 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1024 const struct gen_device_info *devinfo = &screen->devinfo;
1025 struct brw_gs_prog_key key = { KEY_INIT(devinfo->gen) };
1026 ice->vtbl.populate_gs_key(ice, &key);
1027
1028 shader =
1029 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1030
1031 if (!shader)
1032 shader = iris_compile_gs(ice, ish, &key);
1033 }
1034
1035 if (old != shader) {
1036 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1037 ice->state.dirty |= IRIS_DIRTY_GS |
1038 IRIS_DIRTY_BINDINGS_GS |
1039 IRIS_DIRTY_CONSTANTS_GS;
1040 }
1041 }
1042
1043 /**
1044 * Compile a fragment (pixel) shader, and upload the assembly.
1045 */
1046 static struct iris_compiled_shader *
1047 iris_compile_fs(struct iris_context *ice,
1048 struct iris_uncompiled_shader *ish,
1049 const struct brw_wm_prog_key *key,
1050 struct brw_vue_map *vue_map)
1051 {
1052 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1053 const struct brw_compiler *compiler = screen->compiler;
1054 const struct gen_device_info *devinfo = &screen->devinfo;
1055 void *mem_ctx = ralloc_context(NULL);
1056 struct brw_wm_prog_data *fs_prog_data =
1057 rzalloc(mem_ctx, struct brw_wm_prog_data);
1058 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1059 enum brw_param_builtin *system_values;
1060 unsigned num_system_values;
1061 unsigned num_cbufs;
1062
1063 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1064
1065 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
1066 prog_data->use_alt_mode = true;
1067
1068 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1069 &num_system_values, &num_cbufs);
1070
1071 assign_common_binding_table_offsets(devinfo, nir, prog_data,
1072 MAX2(key->nr_color_regions, 1),
1073 num_system_values, num_cbufs);
1074 char *error_str = NULL;
1075 const unsigned *program =
1076 brw_compile_fs(compiler, &ice->dbg, mem_ctx, key, fs_prog_data,
1077 nir, NULL, -1, -1, -1, true, false, vue_map, &error_str);
1078 if (program == NULL) {
1079 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1080 ralloc_free(mem_ctx);
1081 return false;
1082 }
1083
1084 if (ish->compiled_once) {
1085 iris_debug_recompile(ice, &nir->info, key->program_string_id, key);
1086 } else {
1087 ish->compiled_once = true;
1088 }
1089
1090 struct iris_compiled_shader *shader =
1091 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1092 prog_data, NULL, system_values, num_system_values,
1093 num_cbufs);
1094
1095 ralloc_free(mem_ctx);
1096 return shader;
1097 }
1098
1099 /**
1100 * Update the current fragment shader variant.
1101 *
1102 * Fill out the key, look in the cache, compile and bind if needed.
1103 */
1104 static void
1105 iris_update_compiled_fs(struct iris_context *ice)
1106 {
1107 struct iris_uncompiled_shader *ish =
1108 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1109 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1110 const struct gen_device_info *devinfo = &screen->devinfo;
1111 struct brw_wm_prog_key key = { KEY_INIT(devinfo->gen) };
1112 ice->vtbl.populate_fs_key(ice, &key);
1113
1114 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1115 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1116
1117 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1118 struct iris_compiled_shader *shader =
1119 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1120
1121 if (!shader)
1122 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1123
1124 if (old != shader) {
1125 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1126 // toggles. might be able to avoid flagging SBE too.
1127 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1128 ice->state.dirty |= IRIS_DIRTY_FS |
1129 IRIS_DIRTY_BINDINGS_FS |
1130 IRIS_DIRTY_CONSTANTS_FS |
1131 IRIS_DIRTY_WM |
1132 IRIS_DIRTY_CLIP |
1133 IRIS_DIRTY_SBE;
1134 }
1135 }
1136
1137 /**
1138 * Get the compiled shader for the last enabled geometry stage.
1139 *
1140 * This stage is the one which will feed stream output and the rasterizer.
1141 */
1142 static gl_shader_stage
1143 last_vue_stage(struct iris_context *ice)
1144 {
1145 if (ice->shaders.prog[MESA_SHADER_GEOMETRY])
1146 return MESA_SHADER_GEOMETRY;
1147
1148 if (ice->shaders.prog[MESA_SHADER_TESS_EVAL])
1149 return MESA_SHADER_TESS_EVAL;
1150
1151 return MESA_SHADER_VERTEX;
1152 }
1153
1154 /**
1155 * Update the last enabled stage's VUE map.
1156 *
1157 * When the shader feeding the rasterizer's output interface changes, we
1158 * need to re-emit various packets.
1159 */
1160 static void
1161 update_last_vue_map(struct iris_context *ice,
1162 struct brw_stage_prog_data *prog_data)
1163 {
1164 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1165 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1166 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1167 const uint64_t changed_slots =
1168 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1169
1170 if (changed_slots & VARYING_BIT_VIEWPORT) {
1171 // XXX: could use ctx->Const.MaxViewports for old API efficiency
1172 ice->state.num_viewports =
1173 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1174 ice->state.dirty |= IRIS_DIRTY_CLIP |
1175 IRIS_DIRTY_SF_CL_VIEWPORT |
1176 IRIS_DIRTY_CC_VIEWPORT |
1177 IRIS_DIRTY_SCISSOR_RECT |
1178 IRIS_DIRTY_UNCOMPILED_FS |
1179 ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1180 // XXX: CC_VIEWPORT?
1181 }
1182
1183 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1184 ice->state.dirty |= IRIS_DIRTY_SBE;
1185 }
1186
1187 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1188 }
1189
1190 /**
1191 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1192 */
1193 static struct brw_vue_prog_data *
1194 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1195 {
1196 if (!ice->shaders.prog[stage])
1197 return NULL;
1198
1199 return (void *) ice->shaders.prog[stage]->prog_data;
1200 }
1201
1202 // XXX: iris_compiled_shaders are space-leaking :(
1203 // XXX: do remember to unbind them if deleting them.
1204
1205 /**
1206 * Update the current shader variants for the given state.
1207 *
1208 * This should be called on every draw call to ensure that the correct
1209 * shaders are bound. It will also flag any dirty state triggered by
1210 * swapping out those shaders.
1211 */
1212 void
1213 iris_update_compiled_shaders(struct iris_context *ice)
1214 {
1215 const uint64_t dirty = ice->state.dirty;
1216
1217 struct brw_vue_prog_data *old_prog_datas[4];
1218 if (!(dirty & IRIS_DIRTY_URB)) {
1219 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1220 old_prog_datas[i] = get_vue_prog_data(ice, i);
1221 }
1222
1223 if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
1224 struct iris_uncompiled_shader *tes =
1225 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1226 if (tes) {
1227 iris_update_compiled_tcs(ice);
1228 iris_update_compiled_tes(ice);
1229 } else {
1230 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1231 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1232 ice->state.dirty |=
1233 IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
1234 IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
1235 IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
1236 }
1237 }
1238
1239 if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
1240 iris_update_compiled_vs(ice);
1241 if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
1242 iris_update_compiled_gs(ice);
1243
1244 if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
1245 const struct iris_compiled_shader *gs =
1246 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1247 const struct iris_compiled_shader *tes =
1248 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1249
1250 bool points_or_lines = false;
1251
1252 if (gs) {
1253 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1254 points_or_lines =
1255 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1256 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1257 } else if (tes) {
1258 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1259 points_or_lines =
1260 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1261 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1262 }
1263
1264 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1265 /* Outbound to XY Clip enables */
1266 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1267 ice->state.dirty |= IRIS_DIRTY_CLIP;
1268 }
1269 }
1270
1271 gl_shader_stage last_stage = last_vue_stage(ice);
1272 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1273 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1274 update_last_vue_map(ice, shader->prog_data);
1275 if (ice->state.streamout != shader->streamout) {
1276 ice->state.streamout = shader->streamout;
1277 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1278 }
1279
1280 if (ice->state.streamout_active) {
1281 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1282 struct iris_stream_output_target *so =
1283 (void *) ice->state.so_target[i];
1284 if (so)
1285 so->stride = ish->stream_output.stride[i];
1286 }
1287 }
1288
1289 if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
1290 iris_update_compiled_fs(ice);
1291
1292 /* Changing shader interfaces may require a URB configuration. */
1293 if (!(dirty & IRIS_DIRTY_URB)) {
1294 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1295 struct brw_vue_prog_data *old = old_prog_datas[i];
1296 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1297 if (!!old != !!new ||
1298 (new && new->urb_entry_size != old->urb_entry_size)) {
1299 ice->state.dirty |= IRIS_DIRTY_URB;
1300 break;
1301 }
1302 }
1303 }
1304 }
1305
1306 static struct iris_compiled_shader *
1307 iris_compile_cs(struct iris_context *ice,
1308 struct iris_uncompiled_shader *ish,
1309 const struct brw_cs_prog_key *key)
1310 {
1311 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1312 const struct brw_compiler *compiler = screen->compiler;
1313 const struct gen_device_info *devinfo = &screen->devinfo;
1314 void *mem_ctx = ralloc_context(NULL);
1315 struct brw_cs_prog_data *cs_prog_data =
1316 rzalloc(mem_ctx, struct brw_cs_prog_data);
1317 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1318 enum brw_param_builtin *system_values;
1319 unsigned num_system_values;
1320 unsigned num_cbufs;
1321
1322 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1323
1324 cs_prog_data->binding_table.work_groups_start = 0;
1325
1326 prog_data->total_shared = nir->info.cs.shared_size;
1327
1328 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1329 &num_system_values, &num_cbufs);
1330
1331 assign_common_binding_table_offsets(devinfo, nir, prog_data, 1,
1332 num_system_values, num_cbufs);
1333
1334 char *error_str = NULL;
1335 const unsigned *program =
1336 brw_compile_cs(compiler, &ice->dbg, mem_ctx, key, cs_prog_data,
1337 nir, -1, &error_str);
1338 if (program == NULL) {
1339 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1340 ralloc_free(mem_ctx);
1341 return false;
1342 }
1343
1344 if (ish->compiled_once) {
1345 iris_debug_recompile(ice, &nir->info, key->program_string_id, key);
1346 } else {
1347 ish->compiled_once = true;
1348 }
1349
1350 struct iris_compiled_shader *shader =
1351 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1352 prog_data, NULL, system_values, num_system_values,
1353 num_cbufs);
1354
1355 ralloc_free(mem_ctx);
1356 return shader;
1357 }
1358
1359 void
1360 iris_update_compiled_compute_shader(struct iris_context *ice)
1361 {
1362 struct iris_uncompiled_shader *ish =
1363 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
1364
1365 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1366 const struct gen_device_info *devinfo = &screen->devinfo;
1367 struct brw_cs_prog_key key = { KEY_INIT(devinfo->gen) };
1368 ice->vtbl.populate_cs_key(ice, &key);
1369
1370 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
1371 struct iris_compiled_shader *shader =
1372 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
1373
1374 if (!shader)
1375 shader = iris_compile_cs(ice, ish, &key);
1376
1377 if (old != shader) {
1378 ice->shaders.prog[IRIS_CACHE_CS] = shader;
1379 ice->state.dirty |= IRIS_DIRTY_CS |
1380 IRIS_DIRTY_BINDINGS_CS |
1381 IRIS_DIRTY_CONSTANTS_CS;
1382 }
1383 }
1384
1385 void
1386 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
1387 uint32_t *dst)
1388 {
1389 assert(cs_prog_data->push.total.size > 0);
1390 assert(cs_prog_data->push.cross_thread.size == 0);
1391 assert(cs_prog_data->push.per_thread.dwords == 1);
1392 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
1393 for (unsigned t = 0; t < cs_prog_data->threads; t++)
1394 dst[8 * t] = t;
1395 }
1396
1397 /**
1398 * Allocate scratch BOs as needed for the given per-thread size and stage.
1399 */
1400 struct iris_bo *
1401 iris_get_scratch_space(struct iris_context *ice,
1402 unsigned per_thread_scratch,
1403 gl_shader_stage stage)
1404 {
1405 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1406 struct iris_bufmgr *bufmgr = screen->bufmgr;
1407 const struct gen_device_info *devinfo = &screen->devinfo;
1408
1409 unsigned encoded_size = ffs(per_thread_scratch) - 11;
1410 assert(encoded_size < (1 << 16));
1411
1412 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
1413
1414 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
1415 *
1416 * "Scratch Space per slice is computed based on 4 sub-slices. SW
1417 * must allocate scratch space enough so that each slice has 4
1418 * slices allowed."
1419 *
1420 * According to the other driver team, this applies to compute shaders
1421 * as well. This is not currently documented at all.
1422 *
1423 * This hack is no longer necessary on Gen11+.
1424 */
1425 unsigned subslice_total = screen->subslice_total;
1426 if (devinfo->gen < 11)
1427 subslice_total = 4 * devinfo->num_slices;
1428 assert(subslice_total >= screen->subslice_total);
1429
1430 if (!*bop) {
1431 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
1432 uint32_t max_threads[] = {
1433 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
1434 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
1435 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
1436 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
1437 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
1438 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
1439 };
1440
1441 uint32_t size = per_thread_scratch * max_threads[stage];
1442
1443 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
1444 }
1445
1446 return *bop;
1447 }
1448
1449 /* ------------------------------------------------------------------- */
1450
1451 /**
1452 * The pipe->create_[stage]_state() driver hooks.
1453 *
1454 * Performs basic NIR preprocessing, records any state dependencies, and
1455 * returns an iris_uncompiled_shader as the Gallium CSO.
1456 *
1457 * Actual shader compilation to assembly happens later, at first use.
1458 */
1459 static void *
1460 iris_create_uncompiled_shader(struct pipe_context *ctx,
1461 nir_shader *nir,
1462 const struct pipe_stream_output_info *so_info)
1463 {
1464 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
1465 const struct gen_device_info *devinfo = &screen->devinfo;
1466
1467 struct iris_uncompiled_shader *ish =
1468 calloc(1, sizeof(struct iris_uncompiled_shader));
1469 if (!ish)
1470 return NULL;
1471
1472 nir = brw_preprocess_nir(screen->compiler, nir, NULL);
1473
1474 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
1475 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
1476
1477 ish->program_id = get_new_program_id(screen);
1478 ish->nir = nir;
1479 if (so_info) {
1480 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
1481 update_so_info(&ish->stream_output, nir->info.outputs_written);
1482 }
1483
1484 return ish;
1485 }
1486
1487 static struct iris_uncompiled_shader *
1488 iris_create_shader_state(struct pipe_context *ctx,
1489 const struct pipe_shader_state *state)
1490 {
1491 struct nir_shader *nir;
1492
1493 if (state->type == PIPE_SHADER_IR_TGSI)
1494 nir = tgsi_to_nir(state->tokens, ctx->screen);
1495 else
1496 nir = state->ir.nir;
1497
1498 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
1499 }
1500
1501 static void *
1502 iris_create_vs_state(struct pipe_context *ctx,
1503 const struct pipe_shader_state *state)
1504 {
1505 struct iris_context *ice = (void *) ctx;
1506 struct iris_screen *screen = (void *) ctx->screen;
1507 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
1508
1509 /* User clip planes */
1510 if (ish->nir->info.clip_distance_array_size == 0)
1511 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
1512
1513 if (screen->precompile) {
1514 const struct gen_device_info *devinfo = &screen->devinfo;
1515 struct brw_vs_prog_key key = { KEY_INIT(devinfo->gen) };
1516
1517 iris_compile_vs(ice, ish, &key);
1518 }
1519
1520 return ish;
1521 }
1522
1523 static void *
1524 iris_create_tcs_state(struct pipe_context *ctx,
1525 const struct pipe_shader_state *state)
1526 {
1527 struct iris_context *ice = (void *) ctx;
1528 struct iris_screen *screen = (void *) ctx->screen;
1529 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
1530 struct shader_info *info = &ish->nir->info;
1531
1532 // XXX: NOS?
1533
1534 if (screen->precompile) {
1535 const unsigned _GL_TRIANGLES = 0x0004;
1536 const struct gen_device_info *devinfo = &screen->devinfo;
1537 struct brw_tcs_prog_key key = {
1538 KEY_INIT(devinfo->gen),
1539 // XXX: make sure the linker fills this out from the TES...
1540 .tes_primitive_mode =
1541 info->tess.primitive_mode ? info->tess.primitive_mode
1542 : _GL_TRIANGLES,
1543 .outputs_written = info->outputs_written,
1544 .patch_outputs_written = info->patch_outputs_written,
1545 };
1546
1547 iris_compile_tcs(ice, ish, &key);
1548 }
1549
1550 return ish;
1551 }
1552
1553 static void *
1554 iris_create_tes_state(struct pipe_context *ctx,
1555 const struct pipe_shader_state *state)
1556 {
1557 struct iris_context *ice = (void *) ctx;
1558 struct iris_screen *screen = (void *) ctx->screen;
1559 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
1560 struct shader_info *info = &ish->nir->info;
1561
1562 // XXX: NOS?
1563
1564 if (screen->precompile) {
1565 const struct gen_device_info *devinfo = &screen->devinfo;
1566 struct brw_tes_prog_key key = {
1567 KEY_INIT(devinfo->gen),
1568 // XXX: not ideal, need TCS output/TES input unification
1569 .inputs_read = info->inputs_read,
1570 .patch_inputs_read = info->patch_inputs_read,
1571 };
1572
1573 iris_compile_tes(ice, ish, &key);
1574 }
1575
1576 return ish;
1577 }
1578
1579 static void *
1580 iris_create_gs_state(struct pipe_context *ctx,
1581 const struct pipe_shader_state *state)
1582 {
1583 struct iris_context *ice = (void *) ctx;
1584 struct iris_screen *screen = (void *) ctx->screen;
1585 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
1586
1587 // XXX: NOS?
1588
1589 if (screen->precompile) {
1590 const struct gen_device_info *devinfo = &screen->devinfo;
1591 struct brw_gs_prog_key key = { KEY_INIT(devinfo->gen) };
1592
1593 iris_compile_gs(ice, ish, &key);
1594 }
1595
1596 return ish;
1597 }
1598
1599 static void *
1600 iris_create_fs_state(struct pipe_context *ctx,
1601 const struct pipe_shader_state *state)
1602 {
1603 struct iris_context *ice = (void *) ctx;
1604 struct iris_screen *screen = (void *) ctx->screen;
1605 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
1606 struct shader_info *info = &ish->nir->info;
1607
1608 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
1609 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
1610 (1ull << IRIS_NOS_RASTERIZER) |
1611 (1ull << IRIS_NOS_BLEND);
1612
1613 /* The program key needs the VUE map if there are > 16 inputs */
1614 if (util_bitcount64(ish->nir->info.inputs_read &
1615 BRW_FS_VARYING_INPUT_MASK) > 16) {
1616 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
1617 }
1618
1619 if (screen->precompile) {
1620 const uint64_t color_outputs = info->outputs_written &
1621 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
1622 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
1623 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
1624
1625 bool can_rearrange_varyings =
1626 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
1627
1628 const struct gen_device_info *devinfo = &screen->devinfo;
1629 struct brw_wm_prog_key key = {
1630 KEY_INIT(devinfo->gen),
1631 .nr_color_regions = util_bitcount(color_outputs),
1632 .coherent_fb_fetch = true,
1633 .input_slots_valid =
1634 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
1635 };
1636
1637 iris_compile_fs(ice, ish, &key, NULL);
1638 }
1639
1640 return ish;
1641 }
1642
1643 static void *
1644 iris_create_compute_state(struct pipe_context *ctx,
1645 const struct pipe_compute_state *state)
1646 {
1647 assert(state->ir_type == PIPE_SHADER_IR_NIR);
1648
1649 struct iris_context *ice = (void *) ctx;
1650 struct iris_screen *screen = (void *) ctx->screen;
1651 struct iris_uncompiled_shader *ish =
1652 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
1653
1654 // XXX: disallow more than 64KB of shared variables
1655
1656 if (screen->precompile) {
1657 const struct gen_device_info *devinfo = &screen->devinfo;
1658 struct brw_cs_prog_key key = { KEY_INIT(devinfo->gen) };
1659
1660 iris_compile_cs(ice, ish, &key);
1661 }
1662
1663 return ish;
1664 }
1665
1666 /**
1667 * The pipe->delete_[stage]_state() driver hooks.
1668 *
1669 * Frees the iris_uncompiled_shader.
1670 */
1671 static void
1672 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
1673 {
1674 struct iris_uncompiled_shader *ish = state;
1675 struct iris_context *ice = (void *) ctx;
1676
1677 if (ice->shaders.uncompiled[stage] == ish) {
1678 ice->shaders.uncompiled[stage] = NULL;
1679 ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
1680 }
1681
1682 ralloc_free(ish->nir);
1683 free(ish);
1684 }
1685
1686 static void
1687 iris_delete_vs_state(struct pipe_context *ctx, void *state)
1688 {
1689 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
1690 }
1691
1692 static void
1693 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
1694 {
1695 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
1696 }
1697
1698 static void
1699 iris_delete_tes_state(struct pipe_context *ctx, void *state)
1700 {
1701 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
1702 }
1703
1704 static void
1705 iris_delete_gs_state(struct pipe_context *ctx, void *state)
1706 {
1707 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
1708 }
1709
1710 static void
1711 iris_delete_fs_state(struct pipe_context *ctx, void *state)
1712 {
1713 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
1714 }
1715
1716 static void
1717 iris_delete_cs_state(struct pipe_context *ctx, void *state)
1718 {
1719 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
1720 }
1721
1722 /**
1723 * The pipe->bind_[stage]_state() driver hook.
1724 *
1725 * Binds an uncompiled shader as the current one for a particular stage.
1726 * Updates dirty tracking to account for the shader's NOS.
1727 */
1728 static void
1729 bind_state(struct iris_context *ice,
1730 struct iris_uncompiled_shader *ish,
1731 gl_shader_stage stage)
1732 {
1733 uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
1734 const uint64_t nos = ish ? ish->nos : 0;
1735
1736 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
1737 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
1738
1739 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
1740 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
1741 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
1742 }
1743
1744 ice->shaders.uncompiled[stage] = ish;
1745 ice->state.dirty |= dirty_bit;
1746
1747 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
1748 * (or that they no longer need to do so).
1749 */
1750 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
1751 if (nos & (1 << i))
1752 ice->state.dirty_for_nos[i] |= dirty_bit;
1753 else
1754 ice->state.dirty_for_nos[i] &= ~dirty_bit;
1755 }
1756 }
1757
1758 static void
1759 iris_bind_vs_state(struct pipe_context *ctx, void *state)
1760 {
1761 bind_state((void *) ctx, state, MESA_SHADER_VERTEX);
1762 }
1763
1764 static void
1765 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
1766 {
1767 bind_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
1768 }
1769
1770 static void
1771 iris_bind_tes_state(struct pipe_context *ctx, void *state)
1772 {
1773 struct iris_context *ice = (struct iris_context *)ctx;
1774
1775 /* Enabling/disabling optional stages requires a URB reconfiguration. */
1776 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1777 ice->state.dirty |= IRIS_DIRTY_URB;
1778
1779 bind_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
1780 }
1781
1782 static void
1783 iris_bind_gs_state(struct pipe_context *ctx, void *state)
1784 {
1785 struct iris_context *ice = (struct iris_context *)ctx;
1786
1787 /* Enabling/disabling optional stages requires a URB reconfiguration. */
1788 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1789 ice->state.dirty |= IRIS_DIRTY_URB;
1790
1791 bind_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
1792 }
1793
1794 static void
1795 iris_bind_fs_state(struct pipe_context *ctx, void *state)
1796 {
1797 struct iris_context *ice = (struct iris_context *) ctx;
1798 struct iris_uncompiled_shader *old_ish =
1799 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1800 struct iris_uncompiled_shader *new_ish = state;
1801
1802 const unsigned color_bits =
1803 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
1804 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
1805
1806 /* Fragment shader outputs influence HasWriteableRT */
1807 if (!old_ish || !new_ish ||
1808 (old_ish->nir->info.outputs_written & color_bits) !=
1809 (new_ish->nir->info.outputs_written & color_bits))
1810 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1811
1812 bind_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
1813 }
1814
1815 static void
1816 iris_bind_cs_state(struct pipe_context *ctx, void *state)
1817 {
1818 bind_state((void *) ctx, state, MESA_SHADER_COMPUTE);
1819 }
1820
1821 void
1822 iris_init_program_functions(struct pipe_context *ctx)
1823 {
1824 ctx->create_vs_state = iris_create_vs_state;
1825 ctx->create_tcs_state = iris_create_tcs_state;
1826 ctx->create_tes_state = iris_create_tes_state;
1827 ctx->create_gs_state = iris_create_gs_state;
1828 ctx->create_fs_state = iris_create_fs_state;
1829 ctx->create_compute_state = iris_create_compute_state;
1830
1831 ctx->delete_vs_state = iris_delete_vs_state;
1832 ctx->delete_tcs_state = iris_delete_tcs_state;
1833 ctx->delete_tes_state = iris_delete_tes_state;
1834 ctx->delete_gs_state = iris_delete_gs_state;
1835 ctx->delete_fs_state = iris_delete_fs_state;
1836 ctx->delete_compute_state = iris_delete_cs_state;
1837
1838 ctx->bind_vs_state = iris_bind_vs_state;
1839 ctx->bind_tcs_state = iris_bind_tcs_state;
1840 ctx->bind_tes_state = iris_bind_tes_state;
1841 ctx->bind_gs_state = iris_bind_gs_state;
1842 ctx->bind_fs_state = iris_bind_fs_state;
1843 ctx->bind_compute_state = iris_bind_cs_state;
1844 }