iris: Rename iris_seqno to iris_fine_fence
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 };
129 }
130
131 static struct brw_cs_prog_key
132 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
133 const struct iris_cs_prog_key *key)
134 {
135 return (struct brw_cs_prog_key) {
136 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
137 };
138 }
139
140 static void *
141 upload_state(struct u_upload_mgr *uploader,
142 struct iris_state_ref *ref,
143 unsigned size,
144 unsigned alignment)
145 {
146 void *p = NULL;
147 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
148 return p;
149 }
150
151 void
152 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
153 struct pipe_shader_buffer *buf,
154 struct iris_state_ref *surf_state,
155 bool ssbo)
156 {
157 struct pipe_context *ctx = &ice->ctx;
158 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
159
160 void *map =
161 upload_state(ice->state.surface_uploader, surf_state,
162 screen->isl_dev.ss.size, 64);
163 if (!unlikely(map)) {
164 surf_state->res = NULL;
165 return;
166 }
167
168 struct iris_resource *res = (void *) buf->buffer;
169 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
170 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
171
172 isl_buffer_fill_state(&screen->isl_dev, map,
173 .address = res->bo->gtt_offset + res->offset +
174 buf->buffer_offset,
175 .size_B = buf->buffer_size - res->offset,
176 .format = ssbo ? ISL_FORMAT_RAW
177 : ISL_FORMAT_R32G32B32A32_FLOAT,
178 .swizzle = ISL_SWIZZLE_IDENTITY,
179 .stride_B = 1,
180 .mocs = iris_mocs(res->bo, &screen->isl_dev));
181 }
182
183 static nir_ssa_def *
184 get_aoa_deref_offset(nir_builder *b,
185 nir_deref_instr *deref,
186 unsigned elem_size)
187 {
188 unsigned array_size = elem_size;
189 nir_ssa_def *offset = nir_imm_int(b, 0);
190
191 while (deref->deref_type != nir_deref_type_var) {
192 assert(deref->deref_type == nir_deref_type_array);
193
194 /* This level's element size is the previous level's array size */
195 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
196 assert(deref->arr.index.ssa);
197 offset = nir_iadd(b, offset,
198 nir_imul(b, index, nir_imm_int(b, array_size)));
199
200 deref = nir_deref_instr_parent(deref);
201 assert(glsl_type_is_array(deref->type));
202 array_size *= glsl_get_length(deref->type);
203 }
204
205 /* Accessing an invalid surface index with the dataport can result in a
206 * hang. According to the spec "if the index used to select an individual
207 * element is negative or greater than or equal to the size of the array,
208 * the results of the operation are undefined but may not lead to
209 * termination" -- which is one of the possible outcomes of the hang.
210 * Clamp the index to prevent access outside of the array bounds.
211 */
212 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
213 }
214
215 static void
216 iris_lower_storage_image_derefs(nir_shader *nir)
217 {
218 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
219
220 nir_builder b;
221 nir_builder_init(&b, impl);
222
223 nir_foreach_block(block, impl) {
224 nir_foreach_instr_safe(instr, block) {
225 if (instr->type != nir_instr_type_intrinsic)
226 continue;
227
228 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
229 switch (intrin->intrinsic) {
230 case nir_intrinsic_image_deref_load:
231 case nir_intrinsic_image_deref_store:
232 case nir_intrinsic_image_deref_atomic_add:
233 case nir_intrinsic_image_deref_atomic_imin:
234 case nir_intrinsic_image_deref_atomic_umin:
235 case nir_intrinsic_image_deref_atomic_imax:
236 case nir_intrinsic_image_deref_atomic_umax:
237 case nir_intrinsic_image_deref_atomic_and:
238 case nir_intrinsic_image_deref_atomic_or:
239 case nir_intrinsic_image_deref_atomic_xor:
240 case nir_intrinsic_image_deref_atomic_exchange:
241 case nir_intrinsic_image_deref_atomic_comp_swap:
242 case nir_intrinsic_image_deref_size:
243 case nir_intrinsic_image_deref_samples:
244 case nir_intrinsic_image_deref_load_raw_intel:
245 case nir_intrinsic_image_deref_store_raw_intel: {
246 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
247 nir_variable *var = nir_deref_instr_get_variable(deref);
248
249 b.cursor = nir_before_instr(&intrin->instr);
250 nir_ssa_def *index =
251 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
252 get_aoa_deref_offset(&b, deref, 1));
253 nir_rewrite_image_intrinsic(intrin, index, false);
254 break;
255 }
256
257 default:
258 break;
259 }
260 }
261 }
262 }
263
264 /**
265 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
266 */
267 static bool
268 iris_fix_edge_flags(nir_shader *nir)
269 {
270 if (nir->info.stage != MESA_SHADER_VERTEX)
271 return false;
272
273 nir_variable *var = NULL;
274 nir_foreach_variable(v, &nir->outputs) {
275 if (v->data.location == VARYING_SLOT_EDGE) {
276 var = v;
277 break;
278 }
279 }
280
281 if (!var)
282 return false;
283
284 exec_node_remove(&var->node);
285 var->data.mode = nir_var_shader_temp;
286 exec_list_push_tail(&nir->globals, &var->node);
287 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
288 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
289 nir_fixup_deref_modes(nir);
290
291 nir_foreach_function(f, nir) {
292 if (f->impl) {
293 nir_metadata_preserve(f->impl, nir_metadata_block_index |
294 nir_metadata_dominance |
295 nir_metadata_live_ssa_defs |
296 nir_metadata_loop_analysis);
297 }
298 }
299
300 return true;
301 }
302
303 /**
304 * Fix an uncompiled shader's stream output info.
305 *
306 * Core Gallium stores output->register_index as a "slot" number, where
307 * slots are assigned consecutively to all outputs in info->outputs_written.
308 * This naive packing of outputs doesn't work for us - we too have slots,
309 * but the layout is defined by the VUE map, which we won't have until we
310 * compile a specific shader variant. So, we remap these and simply store
311 * VARYING_SLOT_* in our copy's output->register_index fields.
312 *
313 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
314 * components of our VUE header. See brw_vue_map.c for the layout.
315 */
316 static void
317 update_so_info(struct pipe_stream_output_info *so_info,
318 uint64_t outputs_written)
319 {
320 uint8_t reverse_map[64] = {};
321 unsigned slot = 0;
322 while (outputs_written) {
323 reverse_map[slot++] = u_bit_scan64(&outputs_written);
324 }
325
326 for (unsigned i = 0; i < so_info->num_outputs; i++) {
327 struct pipe_stream_output *output = &so_info->output[i];
328
329 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
330 output->register_index = reverse_map[output->register_index];
331
332 /* The VUE header contains three scalar fields packed together:
333 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
334 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
335 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
336 */
337 switch (output->register_index) {
338 case VARYING_SLOT_LAYER:
339 assert(output->num_components == 1);
340 output->register_index = VARYING_SLOT_PSIZ;
341 output->start_component = 1;
342 break;
343 case VARYING_SLOT_VIEWPORT:
344 assert(output->num_components == 1);
345 output->register_index = VARYING_SLOT_PSIZ;
346 output->start_component = 2;
347 break;
348 case VARYING_SLOT_PSIZ:
349 assert(output->num_components == 1);
350 output->start_component = 3;
351 break;
352 }
353
354 //info->outputs_written |= 1ull << output->register_index;
355 }
356 }
357
358 static void
359 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
360 unsigned offset, unsigned n)
361 {
362 assert(offset % sizeof(uint32_t) == 0);
363
364 for (unsigned i = 0; i < n; ++i)
365 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
366
367 for (unsigned i = n; i < 4; ++i)
368 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
369 }
370
371 /**
372 * Associate NIR uniform variables with the prog_data->param[] mechanism
373 * used by the backend. Also, decide which UBOs we'd like to push in an
374 * ideal situation (though the backend can reduce this).
375 */
376 static void
377 iris_setup_uniforms(const struct brw_compiler *compiler,
378 void *mem_ctx,
379 nir_shader *nir,
380 struct brw_stage_prog_data *prog_data,
381 enum brw_param_builtin **out_system_values,
382 unsigned *out_num_system_values,
383 unsigned *out_num_cbufs)
384 {
385 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
386
387 const unsigned IRIS_MAX_SYSTEM_VALUES =
388 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
389 enum brw_param_builtin *system_values =
390 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
391 unsigned num_system_values = 0;
392
393 unsigned patch_vert_idx = -1;
394 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
395 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
396 unsigned variable_group_size_idx = -1;
397 memset(ucp_idx, -1, sizeof(ucp_idx));
398 memset(img_idx, -1, sizeof(img_idx));
399
400 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
401
402 nir_builder b;
403 nir_builder_init(&b, impl);
404
405 b.cursor = nir_before_block(nir_start_block(impl));
406 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
407 nir_ssa_def *temp_const_ubo_name = NULL;
408
409 /* Turn system value intrinsics into uniforms */
410 nir_foreach_block(block, impl) {
411 nir_foreach_instr_safe(instr, block) {
412 if (instr->type != nir_instr_type_intrinsic)
413 continue;
414
415 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
416 nir_ssa_def *offset;
417
418 switch (intrin->intrinsic) {
419 case nir_intrinsic_load_constant: {
420 /* This one is special because it reads from the shader constant
421 * data and not cbuf0 which gallium uploads for us.
422 */
423 b.cursor = nir_before_instr(instr);
424 nir_ssa_def *offset =
425 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
426 nir_intrinsic_base(intrin));
427
428 if (temp_const_ubo_name == NULL)
429 temp_const_ubo_name = nir_imm_int(&b, 0);
430
431 nir_intrinsic_instr *load_ubo =
432 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
433 load_ubo->num_components = intrin->num_components;
434 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
435 load_ubo->src[1] = nir_src_for_ssa(offset);
436 nir_intrinsic_set_align(load_ubo,
437 nir_intrinsic_align_mul(intrin),
438 nir_intrinsic_align_offset(intrin));
439 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
440 intrin->dest.ssa.num_components,
441 intrin->dest.ssa.bit_size,
442 intrin->dest.ssa.name);
443 nir_builder_instr_insert(&b, &load_ubo->instr);
444
445 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
446 nir_src_for_ssa(&load_ubo->dest.ssa));
447 nir_instr_remove(&intrin->instr);
448 continue;
449 }
450 case nir_intrinsic_load_user_clip_plane: {
451 unsigned ucp = nir_intrinsic_ucp_id(intrin);
452
453 if (ucp_idx[ucp] == -1) {
454 ucp_idx[ucp] = num_system_values;
455 num_system_values += 4;
456 }
457
458 for (int i = 0; i < 4; i++) {
459 system_values[ucp_idx[ucp] + i] =
460 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
461 }
462
463 b.cursor = nir_before_instr(instr);
464 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
465 break;
466 }
467 case nir_intrinsic_load_patch_vertices_in:
468 if (patch_vert_idx == -1)
469 patch_vert_idx = num_system_values++;
470
471 system_values[patch_vert_idx] =
472 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
473
474 b.cursor = nir_before_instr(instr);
475 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
476 break;
477 case nir_intrinsic_image_deref_load_param_intel: {
478 assert(devinfo->gen < 9);
479 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
480 nir_variable *var = nir_deref_instr_get_variable(deref);
481
482 if (img_idx[var->data.binding] == -1) {
483 /* GL only allows arrays of arrays of images. */
484 assert(glsl_type_is_image(glsl_without_array(var->type)));
485 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
486
487 for (int i = 0; i < num_images; i++) {
488 const unsigned img = var->data.binding + i;
489
490 img_idx[img] = num_system_values;
491 num_system_values += BRW_IMAGE_PARAM_SIZE;
492
493 uint32_t *img_sv = &system_values[img_idx[img]];
494
495 setup_vec4_image_sysval(
496 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
497 offsetof(struct brw_image_param, offset), 2);
498 setup_vec4_image_sysval(
499 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
500 offsetof(struct brw_image_param, size), 3);
501 setup_vec4_image_sysval(
502 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
503 offsetof(struct brw_image_param, stride), 4);
504 setup_vec4_image_sysval(
505 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
506 offsetof(struct brw_image_param, tiling), 3);
507 setup_vec4_image_sysval(
508 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
509 offsetof(struct brw_image_param, swizzling), 2);
510 }
511 }
512
513 b.cursor = nir_before_instr(instr);
514 offset = nir_iadd(&b,
515 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
516 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
517 nir_intrinsic_base(intrin) * 16));
518 break;
519 }
520 case nir_intrinsic_load_local_group_size: {
521 assert(nir->info.cs.local_size_variable);
522 if (variable_group_size_idx == -1) {
523 variable_group_size_idx = num_system_values;
524 num_system_values += 3;
525 for (int i = 0; i < 3; i++) {
526 system_values[variable_group_size_idx + i] =
527 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
528 }
529 }
530
531 b.cursor = nir_before_instr(instr);
532 offset = nir_imm_int(&b, variable_group_size_idx * sizeof(uint32_t));
533 break;
534 }
535 default:
536 continue;
537 }
538
539 unsigned comps = nir_intrinsic_dest_components(intrin);
540
541 nir_intrinsic_instr *load =
542 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
543 load->num_components = comps;
544 load->src[0] = nir_src_for_ssa(temp_ubo_name);
545 load->src[1] = nir_src_for_ssa(offset);
546 nir_intrinsic_set_align(load, 4, 0);
547 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
548 nir_builder_instr_insert(&b, &load->instr);
549 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
550 nir_src_for_ssa(&load->dest.ssa));
551 nir_instr_remove(instr);
552 }
553 }
554
555 nir_validate_shader(nir, "before remapping");
556
557 /* Uniforms are stored in constant buffer 0, the
558 * user-facing UBOs are indexed by one. So if any constant buffer is
559 * needed, the constant buffer 0 will be needed, so account for it.
560 */
561 unsigned num_cbufs = nir->info.num_ubos;
562 if (num_cbufs || nir->num_uniforms)
563 num_cbufs++;
564
565 /* Place the new params in a new cbuf. */
566 if (num_system_values > 0) {
567 unsigned sysval_cbuf_index = num_cbufs;
568 num_cbufs++;
569
570 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
571 num_system_values);
572
573 nir_foreach_block(block, impl) {
574 nir_foreach_instr_safe(instr, block) {
575 if (instr->type != nir_instr_type_intrinsic)
576 continue;
577
578 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
579
580 if (load->intrinsic != nir_intrinsic_load_ubo)
581 continue;
582
583 b.cursor = nir_before_instr(instr);
584
585 assert(load->src[0].is_ssa);
586
587 if (load->src[0].ssa == temp_ubo_name) {
588 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
589 nir_instr_rewrite_src(instr, &load->src[0],
590 nir_src_for_ssa(imm));
591 }
592 }
593 }
594
595 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
596 nir_opt_constant_folding(nir);
597 } else {
598 ralloc_free(system_values);
599 system_values = NULL;
600 }
601
602 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
603 nir_validate_shader(nir, "after remap");
604
605 /* We don't use params[] but gallium leaves num_uniforms set. We use this
606 * to detect when cbuf0 exists but we don't need it anymore when we get
607 * here. Instead, zero it out so that the back-end doesn't get confused
608 * when nr_params * 4 != num_uniforms != nr_params * 4.
609 */
610 nir->num_uniforms = 0;
611
612 /* Constant loads (if any) need to go at the end of the constant buffers so
613 * we need to know num_cbufs before we can lower to them.
614 */
615 if (temp_const_ubo_name != NULL) {
616 nir_load_const_instr *const_ubo_index =
617 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
618 assert(const_ubo_index->def.bit_size == 32);
619 const_ubo_index->value[0].u32 = num_cbufs;
620 }
621
622 *out_system_values = system_values;
623 *out_num_system_values = num_system_values;
624 *out_num_cbufs = num_cbufs;
625 }
626
627 static const char *surface_group_names[] = {
628 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
629 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
630 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
631 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
632 [IRIS_SURFACE_GROUP_UBO] = "ubo",
633 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
634 [IRIS_SURFACE_GROUP_IMAGE] = "image",
635 };
636
637 static void
638 iris_print_binding_table(FILE *fp, const char *name,
639 const struct iris_binding_table *bt)
640 {
641 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
642
643 uint32_t total = 0;
644 uint32_t compacted = 0;
645
646 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
647 uint32_t size = bt->sizes[i];
648 total += size;
649 if (size)
650 compacted += util_bitcount64(bt->used_mask[i]);
651 }
652
653 if (total == 0) {
654 fprintf(fp, "Binding table for %s is empty\n\n", name);
655 return;
656 }
657
658 if (total != compacted) {
659 fprintf(fp, "Binding table for %s "
660 "(compacted to %u entries from %u entries)\n",
661 name, compacted, total);
662 } else {
663 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
664 }
665
666 uint32_t entry = 0;
667 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
668 uint64_t mask = bt->used_mask[i];
669 while (mask) {
670 int index = u_bit_scan64(&mask);
671 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
672 }
673 }
674 fprintf(fp, "\n");
675 }
676
677 enum {
678 /* Max elements in a surface group. */
679 SURFACE_GROUP_MAX_ELEMENTS = 64,
680 };
681
682 /**
683 * Map a <group, index> pair to a binding table index.
684 *
685 * For example: <UBO, 5> => binding table index 12
686 */
687 uint32_t
688 iris_group_index_to_bti(const struct iris_binding_table *bt,
689 enum iris_surface_group group, uint32_t index)
690 {
691 assert(index < bt->sizes[group]);
692 uint64_t mask = bt->used_mask[group];
693 uint64_t bit = 1ull << index;
694 if (bit & mask) {
695 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
696 } else {
697 return IRIS_SURFACE_NOT_USED;
698 }
699 }
700
701 /**
702 * Map a binding table index back to a <group, index> pair.
703 *
704 * For example: binding table index 12 => <UBO, 5>
705 */
706 uint32_t
707 iris_bti_to_group_index(const struct iris_binding_table *bt,
708 enum iris_surface_group group, uint32_t bti)
709 {
710 uint64_t used_mask = bt->used_mask[group];
711 assert(bti >= bt->offsets[group]);
712
713 uint32_t c = bti - bt->offsets[group];
714 while (used_mask) {
715 int i = u_bit_scan64(&used_mask);
716 if (c == 0)
717 return i;
718 c--;
719 }
720
721 return IRIS_SURFACE_NOT_USED;
722 }
723
724 static void
725 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
726 nir_instr *instr, nir_src *src,
727 enum iris_surface_group group)
728 {
729 assert(bt->sizes[group] > 0);
730
731 b->cursor = nir_before_instr(instr);
732 nir_ssa_def *bti;
733 if (nir_src_is_const(*src)) {
734 uint32_t index = nir_src_as_uint(*src);
735 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
736 src->ssa->bit_size);
737 } else {
738 /* Indirect usage makes all the surfaces of the group to be available,
739 * so we can just add the base.
740 */
741 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
742 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
743 }
744 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
745 }
746
747 static void
748 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
749 enum iris_surface_group group)
750 {
751 assert(bt->sizes[group] > 0);
752
753 if (nir_src_is_const(*src)) {
754 uint64_t index = nir_src_as_uint(*src);
755 assert(index < bt->sizes[group]);
756 bt->used_mask[group] |= 1ull << index;
757 } else {
758 /* There's an indirect usage, we need all the surfaces. */
759 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
760 }
761 }
762
763 static bool
764 skip_compacting_binding_tables(void)
765 {
766 static int skip = -1;
767 if (skip < 0)
768 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
769 return skip;
770 }
771
772 /**
773 * Set up the binding table indices and apply to the shader.
774 */
775 static void
776 iris_setup_binding_table(const struct gen_device_info *devinfo,
777 struct nir_shader *nir,
778 struct iris_binding_table *bt,
779 unsigned num_render_targets,
780 unsigned num_system_values,
781 unsigned num_cbufs)
782 {
783 const struct shader_info *info = &nir->info;
784
785 memset(bt, 0, sizeof(*bt));
786
787 /* Set the sizes for each surface group. For some groups, we already know
788 * upfront how many will be used, so mark them.
789 */
790 if (info->stage == MESA_SHADER_FRAGMENT) {
791 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
792 /* All render targets used. */
793 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
794 BITFIELD64_MASK(num_render_targets);
795
796 /* Setup render target read surface group inorder to support non-coherent
797 * framebuffer fetch on Gen8
798 */
799 if (devinfo->gen == 8 && info->outputs_read) {
800 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
801 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
802 BITFIELD64_MASK(num_render_targets);
803 }
804 } else if (info->stage == MESA_SHADER_COMPUTE) {
805 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
806 }
807
808 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
809 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
810
811 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
812
813 /* Allocate an extra slot in the UBO section for NIR constants.
814 * Binding table compaction will remove it if unnecessary.
815 *
816 * We don't include them in iris_compiled_shader::num_cbufs because
817 * they are uploaded separately from shs->constbuf[], but from a shader
818 * point of view, they're another UBO (at the end of the section).
819 */
820 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
821
822 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
823
824 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
825 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
826
827 /* Mark surfaces used for the cases we don't have the information available
828 * upfront.
829 */
830 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
831 nir_foreach_block (block, impl) {
832 nir_foreach_instr (instr, block) {
833 if (instr->type != nir_instr_type_intrinsic)
834 continue;
835
836 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
837 switch (intrin->intrinsic) {
838 case nir_intrinsic_load_num_work_groups:
839 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
840 break;
841
842 case nir_intrinsic_load_output:
843 if (devinfo->gen == 8) {
844 mark_used_with_src(bt, &intrin->src[0],
845 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
846 }
847 break;
848
849 case nir_intrinsic_image_size:
850 case nir_intrinsic_image_load:
851 case nir_intrinsic_image_store:
852 case nir_intrinsic_image_atomic_add:
853 case nir_intrinsic_image_atomic_imin:
854 case nir_intrinsic_image_atomic_umin:
855 case nir_intrinsic_image_atomic_imax:
856 case nir_intrinsic_image_atomic_umax:
857 case nir_intrinsic_image_atomic_and:
858 case nir_intrinsic_image_atomic_or:
859 case nir_intrinsic_image_atomic_xor:
860 case nir_intrinsic_image_atomic_exchange:
861 case nir_intrinsic_image_atomic_comp_swap:
862 case nir_intrinsic_image_load_raw_intel:
863 case nir_intrinsic_image_store_raw_intel:
864 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
865 break;
866
867 case nir_intrinsic_load_ubo:
868 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
869 break;
870
871 case nir_intrinsic_store_ssbo:
872 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
873 break;
874
875 case nir_intrinsic_get_buffer_size:
876 case nir_intrinsic_ssbo_atomic_add:
877 case nir_intrinsic_ssbo_atomic_imin:
878 case nir_intrinsic_ssbo_atomic_umin:
879 case nir_intrinsic_ssbo_atomic_imax:
880 case nir_intrinsic_ssbo_atomic_umax:
881 case nir_intrinsic_ssbo_atomic_and:
882 case nir_intrinsic_ssbo_atomic_or:
883 case nir_intrinsic_ssbo_atomic_xor:
884 case nir_intrinsic_ssbo_atomic_exchange:
885 case nir_intrinsic_ssbo_atomic_comp_swap:
886 case nir_intrinsic_ssbo_atomic_fmin:
887 case nir_intrinsic_ssbo_atomic_fmax:
888 case nir_intrinsic_ssbo_atomic_fcomp_swap:
889 case nir_intrinsic_load_ssbo:
890 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
891 break;
892
893 default:
894 break;
895 }
896 }
897 }
898
899 /* When disable we just mark everything as used. */
900 if (unlikely(skip_compacting_binding_tables())) {
901 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
902 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
903 }
904
905 /* Calculate the offsets and the binding table size based on the used
906 * surfaces. After this point, the functions to go between "group indices"
907 * and binding table indices can be used.
908 */
909 uint32_t next = 0;
910 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
911 if (bt->used_mask[i] != 0) {
912 bt->offsets[i] = next;
913 next += util_bitcount64(bt->used_mask[i]);
914 }
915 }
916 bt->size_bytes = next * 4;
917
918 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
919 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
920 }
921
922 /* Apply the binding table indices. The backend compiler is not expected
923 * to change those, as we haven't set any of the *_start entries in brw
924 * binding_table.
925 */
926 nir_builder b;
927 nir_builder_init(&b, impl);
928
929 nir_foreach_block (block, impl) {
930 nir_foreach_instr (instr, block) {
931 if (instr->type == nir_instr_type_tex) {
932 nir_tex_instr *tex = nir_instr_as_tex(instr);
933 tex->texture_index =
934 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
935 tex->texture_index);
936 continue;
937 }
938
939 if (instr->type != nir_instr_type_intrinsic)
940 continue;
941
942 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
943 switch (intrin->intrinsic) {
944 case nir_intrinsic_image_size:
945 case nir_intrinsic_image_load:
946 case nir_intrinsic_image_store:
947 case nir_intrinsic_image_atomic_add:
948 case nir_intrinsic_image_atomic_imin:
949 case nir_intrinsic_image_atomic_umin:
950 case nir_intrinsic_image_atomic_imax:
951 case nir_intrinsic_image_atomic_umax:
952 case nir_intrinsic_image_atomic_and:
953 case nir_intrinsic_image_atomic_or:
954 case nir_intrinsic_image_atomic_xor:
955 case nir_intrinsic_image_atomic_exchange:
956 case nir_intrinsic_image_atomic_comp_swap:
957 case nir_intrinsic_image_load_raw_intel:
958 case nir_intrinsic_image_store_raw_intel:
959 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
960 IRIS_SURFACE_GROUP_IMAGE);
961 break;
962
963 case nir_intrinsic_load_ubo:
964 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
965 IRIS_SURFACE_GROUP_UBO);
966 break;
967
968 case nir_intrinsic_store_ssbo:
969 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
970 IRIS_SURFACE_GROUP_SSBO);
971 break;
972
973 case nir_intrinsic_load_output:
974 if (devinfo->gen == 8) {
975 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
976 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
977 }
978 break;
979
980 case nir_intrinsic_get_buffer_size:
981 case nir_intrinsic_ssbo_atomic_add:
982 case nir_intrinsic_ssbo_atomic_imin:
983 case nir_intrinsic_ssbo_atomic_umin:
984 case nir_intrinsic_ssbo_atomic_imax:
985 case nir_intrinsic_ssbo_atomic_umax:
986 case nir_intrinsic_ssbo_atomic_and:
987 case nir_intrinsic_ssbo_atomic_or:
988 case nir_intrinsic_ssbo_atomic_xor:
989 case nir_intrinsic_ssbo_atomic_exchange:
990 case nir_intrinsic_ssbo_atomic_comp_swap:
991 case nir_intrinsic_ssbo_atomic_fmin:
992 case nir_intrinsic_ssbo_atomic_fmax:
993 case nir_intrinsic_ssbo_atomic_fcomp_swap:
994 case nir_intrinsic_load_ssbo:
995 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
996 IRIS_SURFACE_GROUP_SSBO);
997 break;
998
999 default:
1000 break;
1001 }
1002 }
1003 }
1004 }
1005
1006 static void
1007 iris_debug_recompile(struct iris_context *ice,
1008 struct shader_info *info,
1009 const struct brw_base_prog_key *key)
1010 {
1011 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
1012 const struct gen_device_info *devinfo = &screen->devinfo;
1013 const struct brw_compiler *c = screen->compiler;
1014
1015 if (!info)
1016 return;
1017
1018 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1019 _mesa_shader_stage_to_string(info->stage),
1020 info->name ? info->name : "(no identifier)",
1021 info->label ? info->label : "");
1022
1023 const void *old_iris_key =
1024 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1025
1026 union brw_any_prog_key old_key;
1027
1028 switch (info->stage) {
1029 case MESA_SHADER_VERTEX:
1030 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1031 break;
1032 case MESA_SHADER_TESS_CTRL:
1033 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1034 break;
1035 case MESA_SHADER_TESS_EVAL:
1036 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1037 break;
1038 case MESA_SHADER_GEOMETRY:
1039 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1040 break;
1041 case MESA_SHADER_FRAGMENT:
1042 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1043 break;
1044 case MESA_SHADER_COMPUTE:
1045 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1046 break;
1047 default:
1048 unreachable("invalid shader stage");
1049 }
1050
1051 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1052 }
1053
1054 /**
1055 * Get the shader for the last enabled geometry stage.
1056 *
1057 * This stage is the one which will feed stream output and the rasterizer.
1058 */
1059 static gl_shader_stage
1060 last_vue_stage(struct iris_context *ice)
1061 {
1062 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1063 return MESA_SHADER_GEOMETRY;
1064
1065 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1066 return MESA_SHADER_TESS_EVAL;
1067
1068 return MESA_SHADER_VERTEX;
1069 }
1070
1071 /**
1072 * Compile a vertex shader, and upload the assembly.
1073 */
1074 static struct iris_compiled_shader *
1075 iris_compile_vs(struct iris_context *ice,
1076 struct iris_uncompiled_shader *ish,
1077 const struct iris_vs_prog_key *key)
1078 {
1079 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1080 const struct brw_compiler *compiler = screen->compiler;
1081 const struct gen_device_info *devinfo = &screen->devinfo;
1082 void *mem_ctx = ralloc_context(NULL);
1083 struct brw_vs_prog_data *vs_prog_data =
1084 rzalloc(mem_ctx, struct brw_vs_prog_data);
1085 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1086 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1087 enum brw_param_builtin *system_values;
1088 unsigned num_system_values;
1089 unsigned num_cbufs;
1090
1091 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1092
1093 if (key->vue.nr_userclip_plane_consts) {
1094 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1095 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1096 true, false, NULL);
1097 nir_lower_io_to_temporaries(nir, impl, true, false);
1098 nir_lower_global_vars_to_local(nir);
1099 nir_lower_vars_to_ssa(nir);
1100 nir_shader_gather_info(nir, impl);
1101 }
1102
1103 prog_data->use_alt_mode = ish->use_alt_mode;
1104
1105 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1106 &num_system_values, &num_cbufs);
1107
1108 struct iris_binding_table bt;
1109 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1110 num_system_values, num_cbufs);
1111
1112 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1113
1114 brw_compute_vue_map(devinfo,
1115 &vue_prog_data->vue_map, nir->info.outputs_written,
1116 nir->info.separate_shader, /* pos_slots */ 1);
1117
1118 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1119
1120 char *error_str = NULL;
1121 const unsigned *program =
1122 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1123 nir, -1, NULL, &error_str);
1124 if (program == NULL) {
1125 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1126 ralloc_free(mem_ctx);
1127 return false;
1128 }
1129
1130 if (ish->compiled_once) {
1131 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1132 } else {
1133 ish->compiled_once = true;
1134 }
1135
1136 uint32_t *so_decls =
1137 screen->vtbl.create_so_decl_list(&ish->stream_output,
1138 &vue_prog_data->vue_map);
1139
1140 struct iris_compiled_shader *shader =
1141 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1142 prog_data, so_decls, system_values, num_system_values,
1143 num_cbufs, &bt);
1144
1145 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1146
1147 ralloc_free(mem_ctx);
1148 return shader;
1149 }
1150
1151 /**
1152 * Update the current vertex shader variant.
1153 *
1154 * Fill out the key, look in the cache, compile and bind if needed.
1155 */
1156 static void
1157 iris_update_compiled_vs(struct iris_context *ice)
1158 {
1159 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1160 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1161 struct iris_uncompiled_shader *ish =
1162 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1163
1164 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1165 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1166
1167 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1168 struct iris_compiled_shader *shader =
1169 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1170
1171 if (!shader)
1172 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1173
1174 if (!shader)
1175 shader = iris_compile_vs(ice, ish, &key);
1176
1177 if (old != shader) {
1178 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1179 ice->state.dirty |= IRIS_DIRTY_VS |
1180 IRIS_DIRTY_BINDINGS_VS |
1181 IRIS_DIRTY_CONSTANTS_VS |
1182 IRIS_DIRTY_VF_SGVS;
1183 shs->sysvals_need_upload = true;
1184
1185 const struct brw_vs_prog_data *vs_prog_data =
1186 (void *) shader->prog_data;
1187 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1188 vs_prog_data->uses_baseinstance;
1189 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1190 vs_prog_data->uses_is_indexed_draw;
1191 const bool needs_sgvs_element = uses_draw_params ||
1192 vs_prog_data->uses_instanceid ||
1193 vs_prog_data->uses_vertexid;
1194
1195 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1196 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1197 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1198 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1199 IRIS_DIRTY_VERTEX_ELEMENTS;
1200 }
1201 ice->state.vs_uses_draw_params = uses_draw_params;
1202 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1203 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1204 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1205 }
1206 }
1207
1208 /**
1209 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1210 */
1211 const struct shader_info *
1212 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1213 {
1214 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1215
1216 if (!ish)
1217 return NULL;
1218
1219 const nir_shader *nir = ish->nir;
1220 return &nir->info;
1221 }
1222
1223 /**
1224 * Get the union of TCS output and TES input slots.
1225 *
1226 * TCS and TES need to agree on a common URB entry layout. In particular,
1227 * the data for all patch vertices is stored in a single URB entry (unlike
1228 * GS which has one entry per input vertex). This means that per-vertex
1229 * array indexing needs a stride.
1230 *
1231 * SSO requires locations to match, but doesn't require the number of
1232 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1233 * So, we need to take the extra step of unifying these on the fly.
1234 */
1235 static void
1236 get_unified_tess_slots(const struct iris_context *ice,
1237 uint64_t *per_vertex_slots,
1238 uint32_t *per_patch_slots)
1239 {
1240 const struct shader_info *tcs =
1241 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1242 const struct shader_info *tes =
1243 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1244
1245 *per_vertex_slots = tes->inputs_read;
1246 *per_patch_slots = tes->patch_inputs_read;
1247
1248 if (tcs) {
1249 *per_vertex_slots |= tcs->outputs_written;
1250 *per_patch_slots |= tcs->patch_outputs_written;
1251 }
1252 }
1253
1254 /**
1255 * Compile a tessellation control shader, and upload the assembly.
1256 */
1257 static struct iris_compiled_shader *
1258 iris_compile_tcs(struct iris_context *ice,
1259 struct iris_uncompiled_shader *ish,
1260 const struct iris_tcs_prog_key *key)
1261 {
1262 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1263 const struct brw_compiler *compiler = screen->compiler;
1264 const struct nir_shader_compiler_options *options =
1265 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1266 void *mem_ctx = ralloc_context(NULL);
1267 struct brw_tcs_prog_data *tcs_prog_data =
1268 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1269 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1270 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1271 const struct gen_device_info *devinfo = &screen->devinfo;
1272 enum brw_param_builtin *system_values = NULL;
1273 unsigned num_system_values = 0;
1274 unsigned num_cbufs = 0;
1275
1276 nir_shader *nir;
1277
1278 struct iris_binding_table bt;
1279
1280 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1281
1282 if (ish) {
1283 nir = nir_shader_clone(mem_ctx, ish->nir);
1284
1285 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1286 &num_system_values, &num_cbufs);
1287 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1288 num_system_values, num_cbufs);
1289 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1290 } else {
1291 nir =
1292 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1293
1294 /* Reserve space for passing the default tess levels as constants. */
1295 num_cbufs = 1;
1296 num_system_values = 8;
1297 system_values =
1298 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1299 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1300 prog_data->nr_params = num_system_values;
1301
1302 if (key->tes_primitive_mode == GL_QUADS) {
1303 for (int i = 0; i < 4; i++)
1304 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1305
1306 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1307 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1308 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1309 for (int i = 0; i < 3; i++)
1310 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1311
1312 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1313 } else {
1314 assert(key->tes_primitive_mode == GL_ISOLINES);
1315 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1316 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1317 }
1318
1319 /* Manually setup the TCS binding table. */
1320 memset(&bt, 0, sizeof(bt));
1321 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1322 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1323 bt.size_bytes = 4;
1324
1325 prog_data->ubo_ranges[0].length = 1;
1326 }
1327
1328 char *error_str = NULL;
1329 const unsigned *program =
1330 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1331 nir, -1, NULL, &error_str);
1332 if (program == NULL) {
1333 dbg_printf("Failed to compile control shader: %s\n", error_str);
1334 ralloc_free(mem_ctx);
1335 return false;
1336 }
1337
1338 if (ish) {
1339 if (ish->compiled_once) {
1340 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1341 } else {
1342 ish->compiled_once = true;
1343 }
1344 }
1345
1346 struct iris_compiled_shader *shader =
1347 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1348 prog_data, NULL, system_values, num_system_values,
1349 num_cbufs, &bt);
1350
1351 if (ish)
1352 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1353
1354 ralloc_free(mem_ctx);
1355 return shader;
1356 }
1357
1358 /**
1359 * Update the current tessellation control shader variant.
1360 *
1361 * Fill out the key, look in the cache, compile and bind if needed.
1362 */
1363 static void
1364 iris_update_compiled_tcs(struct iris_context *ice)
1365 {
1366 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1367 struct iris_uncompiled_shader *tcs =
1368 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1369 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1370 const struct brw_compiler *compiler = screen->compiler;
1371 const struct gen_device_info *devinfo = &screen->devinfo;
1372
1373 const struct shader_info *tes_info =
1374 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1375 struct iris_tcs_prog_key key = {
1376 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1377 .tes_primitive_mode = tes_info->tess.primitive_mode,
1378 .input_vertices =
1379 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1380 .quads_workaround = devinfo->gen < 9 &&
1381 tes_info->tess.primitive_mode == GL_QUADS &&
1382 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1383 };
1384 get_unified_tess_slots(ice, &key.outputs_written,
1385 &key.patch_outputs_written);
1386 screen->vtbl.populate_tcs_key(ice, &key);
1387
1388 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1389 struct iris_compiled_shader *shader =
1390 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1391
1392 if (tcs && !shader)
1393 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1394
1395 if (!shader)
1396 shader = iris_compile_tcs(ice, tcs, &key);
1397
1398 if (old != shader) {
1399 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1400 ice->state.dirty |= IRIS_DIRTY_TCS |
1401 IRIS_DIRTY_BINDINGS_TCS |
1402 IRIS_DIRTY_CONSTANTS_TCS;
1403 shs->sysvals_need_upload = true;
1404 }
1405 }
1406
1407 /**
1408 * Compile a tessellation evaluation shader, and upload the assembly.
1409 */
1410 static struct iris_compiled_shader *
1411 iris_compile_tes(struct iris_context *ice,
1412 struct iris_uncompiled_shader *ish,
1413 const struct iris_tes_prog_key *key)
1414 {
1415 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1416 const struct brw_compiler *compiler = screen->compiler;
1417 void *mem_ctx = ralloc_context(NULL);
1418 struct brw_tes_prog_data *tes_prog_data =
1419 rzalloc(mem_ctx, struct brw_tes_prog_data);
1420 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1421 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1422 enum brw_param_builtin *system_values;
1423 const struct gen_device_info *devinfo = &screen->devinfo;
1424 unsigned num_system_values;
1425 unsigned num_cbufs;
1426
1427 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1428
1429 if (key->vue.nr_userclip_plane_consts) {
1430 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1431 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1432 true, false, NULL);
1433 nir_lower_io_to_temporaries(nir, impl, true, false);
1434 nir_lower_global_vars_to_local(nir);
1435 nir_lower_vars_to_ssa(nir);
1436 nir_shader_gather_info(nir, impl);
1437 }
1438
1439 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1440 &num_system_values, &num_cbufs);
1441
1442 struct iris_binding_table bt;
1443 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1444 num_system_values, num_cbufs);
1445
1446 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1447
1448 struct brw_vue_map input_vue_map;
1449 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1450 key->patch_inputs_read);
1451
1452 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1453
1454 char *error_str = NULL;
1455 const unsigned *program =
1456 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1457 tes_prog_data, nir, -1, NULL, &error_str);
1458 if (program == NULL) {
1459 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1460 ralloc_free(mem_ctx);
1461 return false;
1462 }
1463
1464 if (ish->compiled_once) {
1465 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1466 } else {
1467 ish->compiled_once = true;
1468 }
1469
1470 uint32_t *so_decls =
1471 screen->vtbl.create_so_decl_list(&ish->stream_output,
1472 &vue_prog_data->vue_map);
1473
1474
1475 struct iris_compiled_shader *shader =
1476 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1477 prog_data, so_decls, system_values, num_system_values,
1478 num_cbufs, &bt);
1479
1480 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1481
1482 ralloc_free(mem_ctx);
1483 return shader;
1484 }
1485
1486 /**
1487 * Update the current tessellation evaluation shader variant.
1488 *
1489 * Fill out the key, look in the cache, compile and bind if needed.
1490 */
1491 static void
1492 iris_update_compiled_tes(struct iris_context *ice)
1493 {
1494 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1495 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1496 struct iris_uncompiled_shader *ish =
1497 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1498
1499 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1500 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1501 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1502
1503 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1504 struct iris_compiled_shader *shader =
1505 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1506
1507 if (!shader)
1508 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1509
1510 if (!shader)
1511 shader = iris_compile_tes(ice, ish, &key);
1512
1513 if (old != shader) {
1514 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1515 ice->state.dirty |= IRIS_DIRTY_TES |
1516 IRIS_DIRTY_BINDINGS_TES |
1517 IRIS_DIRTY_CONSTANTS_TES;
1518 shs->sysvals_need_upload = true;
1519 }
1520
1521 /* TODO: Could compare and avoid flagging this. */
1522 const struct shader_info *tes_info = &ish->nir->info;
1523 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1524 ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
1525 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1526 }
1527 }
1528
1529 /**
1530 * Compile a geometry shader, and upload the assembly.
1531 */
1532 static struct iris_compiled_shader *
1533 iris_compile_gs(struct iris_context *ice,
1534 struct iris_uncompiled_shader *ish,
1535 const struct iris_gs_prog_key *key)
1536 {
1537 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1538 const struct brw_compiler *compiler = screen->compiler;
1539 const struct gen_device_info *devinfo = &screen->devinfo;
1540 void *mem_ctx = ralloc_context(NULL);
1541 struct brw_gs_prog_data *gs_prog_data =
1542 rzalloc(mem_ctx, struct brw_gs_prog_data);
1543 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1544 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1545 enum brw_param_builtin *system_values;
1546 unsigned num_system_values;
1547 unsigned num_cbufs;
1548
1549 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1550
1551 if (key->vue.nr_userclip_plane_consts) {
1552 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1553 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1554 false, NULL);
1555 nir_lower_io_to_temporaries(nir, impl, true, false);
1556 nir_lower_global_vars_to_local(nir);
1557 nir_lower_vars_to_ssa(nir);
1558 nir_shader_gather_info(nir, impl);
1559 }
1560
1561 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1562 &num_system_values, &num_cbufs);
1563
1564 struct iris_binding_table bt;
1565 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1566 num_system_values, num_cbufs);
1567
1568 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1569
1570 brw_compute_vue_map(devinfo,
1571 &vue_prog_data->vue_map, nir->info.outputs_written,
1572 nir->info.separate_shader, /* pos_slots */ 1);
1573
1574 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1575
1576 char *error_str = NULL;
1577 const unsigned *program =
1578 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1579 nir, NULL, -1, NULL, &error_str);
1580 if (program == NULL) {
1581 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1582 ralloc_free(mem_ctx);
1583 return false;
1584 }
1585
1586 if (ish->compiled_once) {
1587 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1588 } else {
1589 ish->compiled_once = true;
1590 }
1591
1592 uint32_t *so_decls =
1593 screen->vtbl.create_so_decl_list(&ish->stream_output,
1594 &vue_prog_data->vue_map);
1595
1596 struct iris_compiled_shader *shader =
1597 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1598 prog_data, so_decls, system_values, num_system_values,
1599 num_cbufs, &bt);
1600
1601 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1602
1603 ralloc_free(mem_ctx);
1604 return shader;
1605 }
1606
1607 /**
1608 * Update the current geometry shader variant.
1609 *
1610 * Fill out the key, look in the cache, compile and bind if needed.
1611 */
1612 static void
1613 iris_update_compiled_gs(struct iris_context *ice)
1614 {
1615 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1616 struct iris_uncompiled_shader *ish =
1617 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1618 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1619 struct iris_compiled_shader *shader = NULL;
1620 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1621
1622 if (ish) {
1623 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1624 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1625
1626 shader =
1627 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1628
1629 if (!shader)
1630 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1631
1632 if (!shader)
1633 shader = iris_compile_gs(ice, ish, &key);
1634 }
1635
1636 if (old != shader) {
1637 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1638 ice->state.dirty |= IRIS_DIRTY_GS |
1639 IRIS_DIRTY_BINDINGS_GS |
1640 IRIS_DIRTY_CONSTANTS_GS;
1641 shs->sysvals_need_upload = true;
1642 }
1643 }
1644
1645 /**
1646 * Compile a fragment (pixel) shader, and upload the assembly.
1647 */
1648 static struct iris_compiled_shader *
1649 iris_compile_fs(struct iris_context *ice,
1650 struct iris_uncompiled_shader *ish,
1651 const struct iris_fs_prog_key *key,
1652 struct brw_vue_map *vue_map)
1653 {
1654 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1655 const struct brw_compiler *compiler = screen->compiler;
1656 void *mem_ctx = ralloc_context(NULL);
1657 struct brw_wm_prog_data *fs_prog_data =
1658 rzalloc(mem_ctx, struct brw_wm_prog_data);
1659 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1660 enum brw_param_builtin *system_values;
1661 const struct gen_device_info *devinfo = &screen->devinfo;
1662 unsigned num_system_values;
1663 unsigned num_cbufs;
1664
1665 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1666
1667 prog_data->use_alt_mode = ish->use_alt_mode;
1668
1669 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1670 &num_system_values, &num_cbufs);
1671
1672 /* Lower output variables to load_output intrinsics before setting up
1673 * binding tables, so iris_setup_binding_table can map any load_output
1674 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1675 * non-coherent framebuffer fetches.
1676 */
1677 brw_nir_lower_fs_outputs(nir);
1678
1679 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1680 * and do not need a binding table entry with a null surface. Earlier
1681 * generations need an entry for a null surface.
1682 */
1683 int null_rts = devinfo->gen < 11 ? 1 : 0;
1684
1685 struct iris_binding_table bt;
1686 iris_setup_binding_table(devinfo, nir, &bt,
1687 MAX2(key->nr_color_regions, null_rts),
1688 num_system_values, num_cbufs);
1689
1690 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1691
1692 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1693
1694 char *error_str = NULL;
1695 const unsigned *program =
1696 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1697 nir, -1, -1, -1, true, false, vue_map,
1698 NULL, &error_str);
1699 if (program == NULL) {
1700 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1701 ralloc_free(mem_ctx);
1702 return false;
1703 }
1704
1705 if (ish->compiled_once) {
1706 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1707 } else {
1708 ish->compiled_once = true;
1709 }
1710
1711 struct iris_compiled_shader *shader =
1712 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1713 prog_data, NULL, system_values, num_system_values,
1714 num_cbufs, &bt);
1715
1716 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1717
1718 ralloc_free(mem_ctx);
1719 return shader;
1720 }
1721
1722 /**
1723 * Update the current fragment shader variant.
1724 *
1725 * Fill out the key, look in the cache, compile and bind if needed.
1726 */
1727 static void
1728 iris_update_compiled_fs(struct iris_context *ice)
1729 {
1730 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1731 struct iris_uncompiled_shader *ish =
1732 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1733 struct iris_fs_prog_key key = { KEY_ID(base) };
1734 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1735 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1736
1737 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1738 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1739
1740 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1741 struct iris_compiled_shader *shader =
1742 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1743
1744 if (!shader)
1745 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1746
1747 if (!shader)
1748 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1749
1750 if (old != shader) {
1751 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1752 // toggles. might be able to avoid flagging SBE too.
1753 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1754 ice->state.dirty |= IRIS_DIRTY_FS |
1755 IRIS_DIRTY_BINDINGS_FS |
1756 IRIS_DIRTY_CONSTANTS_FS |
1757 IRIS_DIRTY_WM |
1758 IRIS_DIRTY_CLIP |
1759 IRIS_DIRTY_SBE;
1760 shs->sysvals_need_upload = true;
1761 }
1762 }
1763
1764 /**
1765 * Update the last enabled stage's VUE map.
1766 *
1767 * When the shader feeding the rasterizer's output interface changes, we
1768 * need to re-emit various packets.
1769 */
1770 static void
1771 update_last_vue_map(struct iris_context *ice,
1772 struct brw_stage_prog_data *prog_data)
1773 {
1774 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1775 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1776 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1777 const uint64_t changed_slots =
1778 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1779
1780 if (changed_slots & VARYING_BIT_VIEWPORT) {
1781 ice->state.num_viewports =
1782 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1783 ice->state.dirty |= IRIS_DIRTY_CLIP |
1784 IRIS_DIRTY_SF_CL_VIEWPORT |
1785 IRIS_DIRTY_CC_VIEWPORT |
1786 IRIS_DIRTY_SCISSOR_RECT |
1787 IRIS_DIRTY_UNCOMPILED_FS |
1788 ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1789 }
1790
1791 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1792 ice->state.dirty |= IRIS_DIRTY_SBE;
1793 }
1794
1795 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1796 }
1797
1798 static void
1799 iris_update_pull_constant_descriptors(struct iris_context *ice,
1800 gl_shader_stage stage)
1801 {
1802 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1803
1804 if (!shader || !shader->prog_data->has_ubo_pull)
1805 return;
1806
1807 struct iris_shader_state *shs = &ice->state.shaders[stage];
1808 bool any_new_descriptors =
1809 shader->num_system_values > 0 && shs->sysvals_need_upload;
1810
1811 unsigned bound_cbufs = shs->bound_cbufs;
1812
1813 while (bound_cbufs) {
1814 const int i = u_bit_scan(&bound_cbufs);
1815 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1816 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1817 if (!surf_state->res && cbuf->buffer) {
1818 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1819 any_new_descriptors = true;
1820 }
1821 }
1822
1823 if (any_new_descriptors)
1824 ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
1825 }
1826
1827 /**
1828 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1829 */
1830 static struct brw_vue_prog_data *
1831 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1832 {
1833 if (!ice->shaders.prog[stage])
1834 return NULL;
1835
1836 return (void *) ice->shaders.prog[stage]->prog_data;
1837 }
1838
1839 // XXX: iris_compiled_shaders are space-leaking :(
1840 // XXX: do remember to unbind them if deleting them.
1841
1842 /**
1843 * Update the current shader variants for the given state.
1844 *
1845 * This should be called on every draw call to ensure that the correct
1846 * shaders are bound. It will also flag any dirty state triggered by
1847 * swapping out those shaders.
1848 */
1849 void
1850 iris_update_compiled_shaders(struct iris_context *ice)
1851 {
1852 const uint64_t dirty = ice->state.dirty;
1853
1854 struct brw_vue_prog_data *old_prog_datas[4];
1855 if (!(dirty & IRIS_DIRTY_URB)) {
1856 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1857 old_prog_datas[i] = get_vue_prog_data(ice, i);
1858 }
1859
1860 if (dirty & (IRIS_DIRTY_UNCOMPILED_TCS | IRIS_DIRTY_UNCOMPILED_TES)) {
1861 struct iris_uncompiled_shader *tes =
1862 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1863 if (tes) {
1864 iris_update_compiled_tcs(ice);
1865 iris_update_compiled_tes(ice);
1866 } else {
1867 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1868 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1869 ice->state.dirty |=
1870 IRIS_DIRTY_TCS | IRIS_DIRTY_TES |
1871 IRIS_DIRTY_BINDINGS_TCS | IRIS_DIRTY_BINDINGS_TES |
1872 IRIS_DIRTY_CONSTANTS_TCS | IRIS_DIRTY_CONSTANTS_TES;
1873 }
1874 }
1875
1876 if (dirty & IRIS_DIRTY_UNCOMPILED_VS)
1877 iris_update_compiled_vs(ice);
1878 if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
1879 iris_update_compiled_gs(ice);
1880
1881 if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
1882 const struct iris_compiled_shader *gs =
1883 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1884 const struct iris_compiled_shader *tes =
1885 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1886
1887 bool points_or_lines = false;
1888
1889 if (gs) {
1890 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1891 points_or_lines =
1892 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1893 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1894 } else if (tes) {
1895 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1896 points_or_lines =
1897 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1898 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1899 }
1900
1901 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1902 /* Outbound to XY Clip enables */
1903 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1904 ice->state.dirty |= IRIS_DIRTY_CLIP;
1905 }
1906 }
1907
1908 gl_shader_stage last_stage = last_vue_stage(ice);
1909 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1910 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1911 update_last_vue_map(ice, shader->prog_data);
1912 if (ice->state.streamout != shader->streamout) {
1913 ice->state.streamout = shader->streamout;
1914 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1915 }
1916
1917 if (ice->state.streamout_active) {
1918 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1919 struct iris_stream_output_target *so =
1920 (void *) ice->state.so_target[i];
1921 if (so)
1922 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1923 }
1924 }
1925
1926 if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
1927 iris_update_compiled_fs(ice);
1928
1929 /* Changing shader interfaces may require a URB configuration. */
1930 if (!(dirty & IRIS_DIRTY_URB)) {
1931 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1932 struct brw_vue_prog_data *old = old_prog_datas[i];
1933 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1934 if (!!old != !!new ||
1935 (new && new->urb_entry_size != old->urb_entry_size)) {
1936 ice->state.dirty |= IRIS_DIRTY_URB;
1937 break;
1938 }
1939 }
1940 }
1941
1942 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1943 if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
1944 iris_update_pull_constant_descriptors(ice, i);
1945 }
1946 }
1947
1948 static struct iris_compiled_shader *
1949 iris_compile_cs(struct iris_context *ice,
1950 struct iris_uncompiled_shader *ish,
1951 const struct iris_cs_prog_key *key)
1952 {
1953 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1954 const struct brw_compiler *compiler = screen->compiler;
1955 void *mem_ctx = ralloc_context(NULL);
1956 struct brw_cs_prog_data *cs_prog_data =
1957 rzalloc(mem_ctx, struct brw_cs_prog_data);
1958 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1959 enum brw_param_builtin *system_values;
1960 const struct gen_device_info *devinfo = &screen->devinfo;
1961 unsigned num_system_values;
1962 unsigned num_cbufs;
1963
1964 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1965
1966 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
1967
1968 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1969 &num_system_values, &num_cbufs);
1970
1971 struct iris_binding_table bt;
1972 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1973 num_system_values, num_cbufs);
1974
1975 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1976
1977 char *error_str = NULL;
1978 const unsigned *program =
1979 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1980 nir, -1, NULL, &error_str);
1981 if (program == NULL) {
1982 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1983 ralloc_free(mem_ctx);
1984 return false;
1985 }
1986
1987 if (ish->compiled_once) {
1988 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1989 } else {
1990 ish->compiled_once = true;
1991 }
1992
1993 struct iris_compiled_shader *shader =
1994 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
1995 prog_data, NULL, system_values, num_system_values,
1996 num_cbufs, &bt);
1997
1998 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1999
2000 ralloc_free(mem_ctx);
2001 return shader;
2002 }
2003
2004 static void
2005 iris_update_compiled_cs(struct iris_context *ice)
2006 {
2007 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2008 struct iris_uncompiled_shader *ish =
2009 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2010
2011 struct iris_cs_prog_key key = { KEY_ID(base) };
2012 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2013 screen->vtbl.populate_cs_key(ice, &key);
2014
2015 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2016 struct iris_compiled_shader *shader =
2017 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
2018
2019 if (!shader)
2020 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2021
2022 if (!shader)
2023 shader = iris_compile_cs(ice, ish, &key);
2024
2025 if (old != shader) {
2026 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2027 ice->state.dirty |= IRIS_DIRTY_CS |
2028 IRIS_DIRTY_BINDINGS_CS |
2029 IRIS_DIRTY_CONSTANTS_CS;
2030 shs->sysvals_need_upload = true;
2031 }
2032 }
2033
2034 void
2035 iris_update_compiled_compute_shader(struct iris_context *ice)
2036 {
2037 if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
2038 iris_update_compiled_cs(ice);
2039
2040 if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
2041 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2042 }
2043
2044 void
2045 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2046 unsigned threads,
2047 uint32_t *dst)
2048 {
2049 assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2050 assert(cs_prog_data->push.cross_thread.size == 0);
2051 assert(cs_prog_data->push.per_thread.dwords == 1);
2052 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2053 for (unsigned t = 0; t < threads; t++)
2054 dst[8 * t] = t;
2055 }
2056
2057 /**
2058 * Allocate scratch BOs as needed for the given per-thread size and stage.
2059 */
2060 struct iris_bo *
2061 iris_get_scratch_space(struct iris_context *ice,
2062 unsigned per_thread_scratch,
2063 gl_shader_stage stage)
2064 {
2065 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2066 struct iris_bufmgr *bufmgr = screen->bufmgr;
2067 const struct gen_device_info *devinfo = &screen->devinfo;
2068
2069 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2070 assert(encoded_size < (1 << 16));
2071
2072 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2073
2074 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2075 *
2076 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2077 * must allocate scratch space enough so that each slice has 4
2078 * slices allowed."
2079 *
2080 * According to the other driver team, this applies to compute shaders
2081 * as well. This is not currently documented at all.
2082 *
2083 * This hack is no longer necessary on Gen11+.
2084 *
2085 * For, Gen11+, scratch space allocation is based on the number of threads
2086 * in the base configuration.
2087 */
2088 unsigned subslice_total = screen->subslice_total;
2089 if (devinfo->gen >= 12)
2090 subslice_total = devinfo->num_subslices[0];
2091 else if (devinfo->gen == 11)
2092 subslice_total = 8;
2093 else if (devinfo->gen < 11)
2094 subslice_total = 4 * devinfo->num_slices;
2095 assert(subslice_total >= screen->subslice_total);
2096
2097 if (!*bop) {
2098 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2099
2100 if (devinfo->gen >= 12) {
2101 /* Same as ICL below, but with 16 EUs. */
2102 scratch_ids_per_subslice = 16 * 8;
2103 } else if (devinfo->gen == 11) {
2104 /* The MEDIA_VFE_STATE docs say:
2105 *
2106 * "Starting with this configuration, the Maximum Number of
2107 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2108 *
2109 * Although there are only 7 threads per EU in the configuration,
2110 * the FFTID is calculated as if there are 8 threads per EU,
2111 * which in turn requires a larger amount of Scratch Space to be
2112 * allocated by the driver."
2113 */
2114 scratch_ids_per_subslice = 8 * 8;
2115 }
2116
2117 uint32_t max_threads[] = {
2118 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2119 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2120 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2121 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2122 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2123 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2124 };
2125
2126 uint32_t size = per_thread_scratch * max_threads[stage];
2127
2128 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2129 }
2130
2131 return *bop;
2132 }
2133
2134 /* ------------------------------------------------------------------- */
2135
2136 /**
2137 * The pipe->create_[stage]_state() driver hooks.
2138 *
2139 * Performs basic NIR preprocessing, records any state dependencies, and
2140 * returns an iris_uncompiled_shader as the Gallium CSO.
2141 *
2142 * Actual shader compilation to assembly happens later, at first use.
2143 */
2144 static void *
2145 iris_create_uncompiled_shader(struct pipe_context *ctx,
2146 nir_shader *nir,
2147 const struct pipe_stream_output_info *so_info)
2148 {
2149 struct iris_context *ice = (void *)ctx;
2150 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2151 const struct gen_device_info *devinfo = &screen->devinfo;
2152
2153 struct iris_uncompiled_shader *ish =
2154 calloc(1, sizeof(struct iris_uncompiled_shader));
2155 if (!ish)
2156 return NULL;
2157
2158 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2159
2160 brw_preprocess_nir(screen->compiler, nir, NULL);
2161
2162 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2163 &ish->uses_atomic_load_store);
2164 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2165
2166 nir_sweep(nir);
2167
2168 if (nir->constant_data_size > 0) {
2169 unsigned data_offset;
2170 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2171 32, nir->constant_data, &data_offset, &ish->const_data);
2172
2173 struct pipe_shader_buffer psb = {
2174 .buffer = ish->const_data,
2175 .buffer_offset = data_offset,
2176 .buffer_size = nir->constant_data_size,
2177 };
2178 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2179 }
2180
2181 ish->program_id = get_new_program_id(screen);
2182 ish->nir = nir;
2183 if (so_info) {
2184 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2185 update_so_info(&ish->stream_output, nir->info.outputs_written);
2186 }
2187
2188 /* Save this now before potentially dropping nir->info.name */
2189 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2190 ish->use_alt_mode = true;
2191
2192 if (screen->disk_cache) {
2193 /* Serialize the NIR to a binary blob that we can hash for the disk
2194 * cache. Drop unnecessary information (like variable names)
2195 * so the serialized NIR is smaller, and also to let us detect more
2196 * isomorphic shaders when hashing, increasing cache hits.
2197 */
2198 struct blob blob;
2199 blob_init(&blob);
2200 nir_serialize(&blob, nir, true);
2201 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2202 blob_finish(&blob);
2203 }
2204
2205 return ish;
2206 }
2207
2208 static struct iris_uncompiled_shader *
2209 iris_create_shader_state(struct pipe_context *ctx,
2210 const struct pipe_shader_state *state)
2211 {
2212 struct nir_shader *nir;
2213
2214 if (state->type == PIPE_SHADER_IR_TGSI)
2215 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2216 else
2217 nir = state->ir.nir;
2218
2219 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2220 }
2221
2222 static void *
2223 iris_create_vs_state(struct pipe_context *ctx,
2224 const struct pipe_shader_state *state)
2225 {
2226 struct iris_context *ice = (void *) ctx;
2227 struct iris_screen *screen = (void *) ctx->screen;
2228 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2229
2230 /* User clip planes */
2231 if (ish->nir->info.clip_distance_array_size == 0)
2232 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2233
2234 if (screen->precompile) {
2235 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2236
2237 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2238 iris_compile_vs(ice, ish, &key);
2239 }
2240
2241 return ish;
2242 }
2243
2244 static void *
2245 iris_create_tcs_state(struct pipe_context *ctx,
2246 const struct pipe_shader_state *state)
2247 {
2248 struct iris_context *ice = (void *) ctx;
2249 struct iris_screen *screen = (void *) ctx->screen;
2250 const struct brw_compiler *compiler = screen->compiler;
2251 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2252 struct shader_info *info = &ish->nir->info;
2253
2254 if (screen->precompile) {
2255 const unsigned _GL_TRIANGLES = 0x0004;
2256 struct iris_tcs_prog_key key = {
2257 KEY_ID(vue.base),
2258 // XXX: make sure the linker fills this out from the TES...
2259 .tes_primitive_mode =
2260 info->tess.primitive_mode ? info->tess.primitive_mode
2261 : _GL_TRIANGLES,
2262 .outputs_written = info->outputs_written,
2263 .patch_outputs_written = info->patch_outputs_written,
2264 };
2265
2266 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2267 * We don't have that information, so we randomly guess that the input
2268 * and output patches are the same size. This is a bad guess, but we
2269 * can't do much better.
2270 */
2271 if (compiler->use_tcs_8_patch)
2272 key.input_vertices = info->tess.tcs_vertices_out;
2273
2274 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2275 iris_compile_tcs(ice, ish, &key);
2276 }
2277
2278 return ish;
2279 }
2280
2281 static void *
2282 iris_create_tes_state(struct pipe_context *ctx,
2283 const struct pipe_shader_state *state)
2284 {
2285 struct iris_context *ice = (void *) ctx;
2286 struct iris_screen *screen = (void *) ctx->screen;
2287 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2288 struct shader_info *info = &ish->nir->info;
2289
2290 /* User clip planes */
2291 if (ish->nir->info.clip_distance_array_size == 0)
2292 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2293
2294 if (screen->precompile) {
2295 struct iris_tes_prog_key key = {
2296 KEY_ID(vue.base),
2297 // XXX: not ideal, need TCS output/TES input unification
2298 .inputs_read = info->inputs_read,
2299 .patch_inputs_read = info->patch_inputs_read,
2300 };
2301
2302 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2303 iris_compile_tes(ice, ish, &key);
2304 }
2305
2306 return ish;
2307 }
2308
2309 static void *
2310 iris_create_gs_state(struct pipe_context *ctx,
2311 const struct pipe_shader_state *state)
2312 {
2313 struct iris_context *ice = (void *) ctx;
2314 struct iris_screen *screen = (void *) ctx->screen;
2315 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2316
2317 /* User clip planes */
2318 if (ish->nir->info.clip_distance_array_size == 0)
2319 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2320
2321 if (screen->precompile) {
2322 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2323
2324 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2325 iris_compile_gs(ice, ish, &key);
2326 }
2327
2328 return ish;
2329 }
2330
2331 static void *
2332 iris_create_fs_state(struct pipe_context *ctx,
2333 const struct pipe_shader_state *state)
2334 {
2335 struct iris_context *ice = (void *) ctx;
2336 struct iris_screen *screen = (void *) ctx->screen;
2337 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2338 struct shader_info *info = &ish->nir->info;
2339
2340 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2341 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2342 (1ull << IRIS_NOS_RASTERIZER) |
2343 (1ull << IRIS_NOS_BLEND);
2344
2345 /* The program key needs the VUE map if there are > 16 inputs */
2346 if (util_bitcount64(ish->nir->info.inputs_read &
2347 BRW_FS_VARYING_INPUT_MASK) > 16) {
2348 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2349 }
2350
2351 if (screen->precompile) {
2352 const uint64_t color_outputs = info->outputs_written &
2353 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2354 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2355 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2356
2357 bool can_rearrange_varyings =
2358 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2359
2360 const struct gen_device_info *devinfo = &screen->devinfo;
2361 struct iris_fs_prog_key key = {
2362 KEY_ID(base),
2363 .nr_color_regions = util_bitcount(color_outputs),
2364 .coherent_fb_fetch = devinfo->gen >= 9,
2365 .input_slots_valid =
2366 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2367 };
2368
2369 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2370 iris_compile_fs(ice, ish, &key, NULL);
2371 }
2372
2373 return ish;
2374 }
2375
2376 static void *
2377 iris_create_compute_state(struct pipe_context *ctx,
2378 const struct pipe_compute_state *state)
2379 {
2380 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2381
2382 struct iris_context *ice = (void *) ctx;
2383 struct iris_screen *screen = (void *) ctx->screen;
2384 struct iris_uncompiled_shader *ish =
2385 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2386
2387 // XXX: disallow more than 64KB of shared variables
2388
2389 if (screen->precompile) {
2390 struct iris_cs_prog_key key = { KEY_ID(base) };
2391
2392 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2393 iris_compile_cs(ice, ish, &key);
2394 }
2395
2396 return ish;
2397 }
2398
2399 /**
2400 * The pipe->delete_[stage]_state() driver hooks.
2401 *
2402 * Frees the iris_uncompiled_shader.
2403 */
2404 static void
2405 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2406 {
2407 struct iris_uncompiled_shader *ish = state;
2408 struct iris_context *ice = (void *) ctx;
2409
2410 if (ice->shaders.uncompiled[stage] == ish) {
2411 ice->shaders.uncompiled[stage] = NULL;
2412 ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
2413 }
2414
2415 if (ish->const_data) {
2416 pipe_resource_reference(&ish->const_data, NULL);
2417 pipe_resource_reference(&ish->const_data_state.res, NULL);
2418 }
2419
2420 ralloc_free(ish->nir);
2421 free(ish);
2422 }
2423
2424 static void
2425 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2426 {
2427 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2428 }
2429
2430 static void
2431 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2432 {
2433 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2434 }
2435
2436 static void
2437 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2438 {
2439 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2440 }
2441
2442 static void
2443 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2444 {
2445 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2446 }
2447
2448 static void
2449 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2450 {
2451 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2452 }
2453
2454 static void
2455 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2456 {
2457 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2458 }
2459
2460 /**
2461 * The pipe->bind_[stage]_state() driver hook.
2462 *
2463 * Binds an uncompiled shader as the current one for a particular stage.
2464 * Updates dirty tracking to account for the shader's NOS.
2465 */
2466 static void
2467 bind_shader_state(struct iris_context *ice,
2468 struct iris_uncompiled_shader *ish,
2469 gl_shader_stage stage)
2470 {
2471 uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
2472 const uint64_t nos = ish ? ish->nos : 0;
2473
2474 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2475 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2476
2477 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2478 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2479 ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
2480 }
2481
2482 ice->shaders.uncompiled[stage] = ish;
2483 ice->state.dirty |= dirty_bit;
2484
2485 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2486 * (or that they no longer need to do so).
2487 */
2488 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2489 if (nos & (1 << i))
2490 ice->state.dirty_for_nos[i] |= dirty_bit;
2491 else
2492 ice->state.dirty_for_nos[i] &= ~dirty_bit;
2493 }
2494 }
2495
2496 static void
2497 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2498 {
2499 struct iris_context *ice = (struct iris_context *)ctx;
2500 struct iris_uncompiled_shader *new_ish = state;
2501
2502 if (new_ish &&
2503 ice->state.window_space_position !=
2504 new_ish->nir->info.vs.window_space_position) {
2505 ice->state.window_space_position =
2506 new_ish->nir->info.vs.window_space_position;
2507
2508 ice->state.dirty |= IRIS_DIRTY_CLIP |
2509 IRIS_DIRTY_RASTER |
2510 IRIS_DIRTY_CC_VIEWPORT;
2511 }
2512
2513 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2514 }
2515
2516 static void
2517 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2518 {
2519 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2520 }
2521
2522 static void
2523 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2524 {
2525 struct iris_context *ice = (struct iris_context *)ctx;
2526
2527 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2528 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2529 ice->state.dirty |= IRIS_DIRTY_URB;
2530
2531 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2532 }
2533
2534 static void
2535 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2536 {
2537 struct iris_context *ice = (struct iris_context *)ctx;
2538
2539 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2540 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2541 ice->state.dirty |= IRIS_DIRTY_URB;
2542
2543 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2544 }
2545
2546 static void
2547 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2548 {
2549 struct iris_context *ice = (struct iris_context *) ctx;
2550 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2551 const struct gen_device_info *devinfo = &screen->devinfo;
2552 struct iris_uncompiled_shader *old_ish =
2553 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2554 struct iris_uncompiled_shader *new_ish = state;
2555
2556 const unsigned color_bits =
2557 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2558 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2559
2560 /* Fragment shader outputs influence HasWriteableRT */
2561 if (!old_ish || !new_ish ||
2562 (old_ish->nir->info.outputs_written & color_bits) !=
2563 (new_ish->nir->info.outputs_written & color_bits))
2564 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2565
2566 if (devinfo->gen == 8)
2567 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2568
2569 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2570 }
2571
2572 static void
2573 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2574 {
2575 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2576 }
2577
2578 void
2579 iris_init_program_functions(struct pipe_context *ctx)
2580 {
2581 ctx->create_vs_state = iris_create_vs_state;
2582 ctx->create_tcs_state = iris_create_tcs_state;
2583 ctx->create_tes_state = iris_create_tes_state;
2584 ctx->create_gs_state = iris_create_gs_state;
2585 ctx->create_fs_state = iris_create_fs_state;
2586 ctx->create_compute_state = iris_create_compute_state;
2587
2588 ctx->delete_vs_state = iris_delete_vs_state;
2589 ctx->delete_tcs_state = iris_delete_tcs_state;
2590 ctx->delete_tes_state = iris_delete_tes_state;
2591 ctx->delete_gs_state = iris_delete_gs_state;
2592 ctx->delete_fs_state = iris_delete_fs_state;
2593 ctx->delete_compute_state = iris_delete_cs_state;
2594
2595 ctx->bind_vs_state = iris_bind_vs_state;
2596 ctx->bind_tcs_state = iris_bind_tcs_state;
2597 ctx->bind_tes_state = iris_bind_tes_state;
2598 ctx->bind_gs_state = iris_bind_gs_state;
2599 ctx->bind_fs_state = iris_bind_fs_state;
2600 ctx->bind_compute_state = iris_bind_cs_state;
2601 }