iris: silence maybe-uninitialized for stc_dst_aux_usage variable
[mesa.git] / src / gallium / drivers / iris / iris_program.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
48
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
56
57 static unsigned
58 get_new_program_id(struct iris_screen *screen)
59 {
60 return p_atomic_inc_return(&screen->program_id);
61 }
62
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info *devinfo,
65 const struct iris_vs_prog_key *key)
66 {
67 return (struct brw_vs_prog_key) {
68 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
69
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
72 */
73 .nr_userclip_plane_consts = 0,
74 };
75 }
76
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
79 const struct iris_tcs_prog_key *key)
80 {
81 return (struct brw_tcs_prog_key) {
82 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
83 .tes_primitive_mode = key->tes_primitive_mode,
84 .input_vertices = key->input_vertices,
85 .patch_outputs_written = key->patch_outputs_written,
86 .outputs_written = key->outputs_written,
87 .quads_workaround = key->quads_workaround,
88 };
89 }
90
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info *devinfo,
93 const struct iris_tes_prog_key *key)
94 {
95 return (struct brw_tes_prog_key) {
96 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
97 .patch_inputs_read = key->patch_inputs_read,
98 .inputs_read = key->inputs_read,
99 };
100 }
101
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info *devinfo,
104 const struct iris_gs_prog_key *key)
105 {
106 return (struct brw_gs_prog_key) {
107 BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
108 };
109 }
110
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info *devinfo,
113 const struct iris_fs_prog_key *key)
114 {
115 return (struct brw_wm_prog_key) {
116 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
117 .nr_color_regions = key->nr_color_regions,
118 .flat_shade = key->flat_shade,
119 .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
120 .alpha_to_coverage = key->alpha_to_coverage,
121 .clamp_fragment_color = key->clamp_fragment_color,
122 .persample_interp = key->persample_interp,
123 .multisample_fbo = key->multisample_fbo,
124 .force_dual_color_blend = key->force_dual_color_blend,
125 .coherent_fb_fetch = key->coherent_fb_fetch,
126 .color_outputs_valid = key->color_outputs_valid,
127 .input_slots_valid = key->input_slots_valid,
128 .ignore_sample_mask_out = !key->multisample_fbo,
129 };
130 }
131
132 static struct brw_cs_prog_key
133 iris_to_brw_cs_key(const struct gen_device_info *devinfo,
134 const struct iris_cs_prog_key *key)
135 {
136 return (struct brw_cs_prog_key) {
137 BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
138 };
139 }
140
141 static void *
142 upload_state(struct u_upload_mgr *uploader,
143 struct iris_state_ref *ref,
144 unsigned size,
145 unsigned alignment)
146 {
147 void *p = NULL;
148 u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
149 return p;
150 }
151
152 void
153 iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
154 struct pipe_shader_buffer *buf,
155 struct iris_state_ref *surf_state,
156 bool ssbo)
157 {
158 struct pipe_context *ctx = &ice->ctx;
159 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
160
161 void *map =
162 upload_state(ice->state.surface_uploader, surf_state,
163 screen->isl_dev.ss.size, 64);
164 if (!unlikely(map)) {
165 surf_state->res = NULL;
166 return;
167 }
168
169 struct iris_resource *res = (void *) buf->buffer;
170 struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
171 surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
172
173 isl_buffer_fill_state(&screen->isl_dev, map,
174 .address = res->bo->gtt_offset + res->offset +
175 buf->buffer_offset,
176 .size_B = buf->buffer_size - res->offset,
177 .format = ssbo ? ISL_FORMAT_RAW
178 : ISL_FORMAT_R32G32B32A32_FLOAT,
179 .swizzle = ISL_SWIZZLE_IDENTITY,
180 .stride_B = 1,
181 .mocs = iris_mocs(res->bo, &screen->isl_dev));
182 }
183
184 static nir_ssa_def *
185 get_aoa_deref_offset(nir_builder *b,
186 nir_deref_instr *deref,
187 unsigned elem_size)
188 {
189 unsigned array_size = elem_size;
190 nir_ssa_def *offset = nir_imm_int(b, 0);
191
192 while (deref->deref_type != nir_deref_type_var) {
193 assert(deref->deref_type == nir_deref_type_array);
194
195 /* This level's element size is the previous level's array size */
196 nir_ssa_def *index = nir_ssa_for_src(b, deref->arr.index, 1);
197 assert(deref->arr.index.ssa);
198 offset = nir_iadd(b, offset,
199 nir_imul(b, index, nir_imm_int(b, array_size)));
200
201 deref = nir_deref_instr_parent(deref);
202 assert(glsl_type_is_array(deref->type));
203 array_size *= glsl_get_length(deref->type);
204 }
205
206 /* Accessing an invalid surface index with the dataport can result in a
207 * hang. According to the spec "if the index used to select an individual
208 * element is negative or greater than or equal to the size of the array,
209 * the results of the operation are undefined but may not lead to
210 * termination" -- which is one of the possible outcomes of the hang.
211 * Clamp the index to prevent access outside of the array bounds.
212 */
213 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
214 }
215
216 static void
217 iris_lower_storage_image_derefs(nir_shader *nir)
218 {
219 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
220
221 nir_builder b;
222 nir_builder_init(&b, impl);
223
224 nir_foreach_block(block, impl) {
225 nir_foreach_instr_safe(instr, block) {
226 if (instr->type != nir_instr_type_intrinsic)
227 continue;
228
229 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
230 switch (intrin->intrinsic) {
231 case nir_intrinsic_image_deref_load:
232 case nir_intrinsic_image_deref_store:
233 case nir_intrinsic_image_deref_atomic_add:
234 case nir_intrinsic_image_deref_atomic_imin:
235 case nir_intrinsic_image_deref_atomic_umin:
236 case nir_intrinsic_image_deref_atomic_imax:
237 case nir_intrinsic_image_deref_atomic_umax:
238 case nir_intrinsic_image_deref_atomic_and:
239 case nir_intrinsic_image_deref_atomic_or:
240 case nir_intrinsic_image_deref_atomic_xor:
241 case nir_intrinsic_image_deref_atomic_exchange:
242 case nir_intrinsic_image_deref_atomic_comp_swap:
243 case nir_intrinsic_image_deref_size:
244 case nir_intrinsic_image_deref_samples:
245 case nir_intrinsic_image_deref_load_raw_intel:
246 case nir_intrinsic_image_deref_store_raw_intel: {
247 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
248 nir_variable *var = nir_deref_instr_get_variable(deref);
249
250 b.cursor = nir_before_instr(&intrin->instr);
251 nir_ssa_def *index =
252 nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
253 get_aoa_deref_offset(&b, deref, 1));
254 nir_rewrite_image_intrinsic(intrin, index, false);
255 break;
256 }
257
258 default:
259 break;
260 }
261 }
262 }
263 }
264
265 /**
266 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
267 */
268 static bool
269 iris_fix_edge_flags(nir_shader *nir)
270 {
271 if (nir->info.stage != MESA_SHADER_VERTEX) {
272 nir_shader_preserve_all_metadata(nir);
273 return false;
274 }
275
276 nir_variable *var = NULL;
277 nir_foreach_variable(v, &nir->outputs) {
278 if (v->data.location == VARYING_SLOT_EDGE) {
279 var = v;
280 break;
281 }
282 }
283
284 if (!var) {
285 nir_shader_preserve_all_metadata(nir);
286 return false;
287 }
288
289 exec_node_remove(&var->node);
290 var->data.mode = nir_var_shader_temp;
291 exec_list_push_tail(&nir->globals, &var->node);
292 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
293 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
294 nir_fixup_deref_modes(nir);
295
296 nir_foreach_function(f, nir) {
297 if (f->impl) {
298 nir_metadata_preserve(f->impl, nir_metadata_block_index |
299 nir_metadata_dominance |
300 nir_metadata_live_ssa_defs |
301 nir_metadata_loop_analysis);
302 } else {
303 nir_metadata_preserve(f->impl, nir_metadata_all);
304 }
305 }
306
307 return true;
308 }
309
310 /**
311 * Fix an uncompiled shader's stream output info.
312 *
313 * Core Gallium stores output->register_index as a "slot" number, where
314 * slots are assigned consecutively to all outputs in info->outputs_written.
315 * This naive packing of outputs doesn't work for us - we too have slots,
316 * but the layout is defined by the VUE map, which we won't have until we
317 * compile a specific shader variant. So, we remap these and simply store
318 * VARYING_SLOT_* in our copy's output->register_index fields.
319 *
320 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
321 * components of our VUE header. See brw_vue_map.c for the layout.
322 */
323 static void
324 update_so_info(struct pipe_stream_output_info *so_info,
325 uint64_t outputs_written)
326 {
327 uint8_t reverse_map[64] = {};
328 unsigned slot = 0;
329 while (outputs_written) {
330 reverse_map[slot++] = u_bit_scan64(&outputs_written);
331 }
332
333 for (unsigned i = 0; i < so_info->num_outputs; i++) {
334 struct pipe_stream_output *output = &so_info->output[i];
335
336 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
337 output->register_index = reverse_map[output->register_index];
338
339 /* The VUE header contains three scalar fields packed together:
340 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
341 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
342 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
343 */
344 switch (output->register_index) {
345 case VARYING_SLOT_LAYER:
346 assert(output->num_components == 1);
347 output->register_index = VARYING_SLOT_PSIZ;
348 output->start_component = 1;
349 break;
350 case VARYING_SLOT_VIEWPORT:
351 assert(output->num_components == 1);
352 output->register_index = VARYING_SLOT_PSIZ;
353 output->start_component = 2;
354 break;
355 case VARYING_SLOT_PSIZ:
356 assert(output->num_components == 1);
357 output->start_component = 3;
358 break;
359 }
360
361 //info->outputs_written |= 1ull << output->register_index;
362 }
363 }
364
365 static void
366 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
367 unsigned offset, unsigned n)
368 {
369 assert(offset % sizeof(uint32_t) == 0);
370
371 for (unsigned i = 0; i < n; ++i)
372 sysvals[i] = BRW_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
373
374 for (unsigned i = n; i < 4; ++i)
375 sysvals[i] = BRW_PARAM_BUILTIN_ZERO;
376 }
377
378 /**
379 * Associate NIR uniform variables with the prog_data->param[] mechanism
380 * used by the backend. Also, decide which UBOs we'd like to push in an
381 * ideal situation (though the backend can reduce this).
382 */
383 static void
384 iris_setup_uniforms(const struct brw_compiler *compiler,
385 void *mem_ctx,
386 nir_shader *nir,
387 struct brw_stage_prog_data *prog_data,
388 enum brw_param_builtin **out_system_values,
389 unsigned *out_num_system_values,
390 unsigned *out_num_cbufs)
391 {
392 UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
393
394 const unsigned IRIS_MAX_SYSTEM_VALUES =
395 PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
396 enum brw_param_builtin *system_values =
397 rzalloc_array(mem_ctx, enum brw_param_builtin, IRIS_MAX_SYSTEM_VALUES);
398 unsigned num_system_values = 0;
399
400 unsigned patch_vert_idx = -1;
401 unsigned ucp_idx[IRIS_MAX_CLIP_PLANES];
402 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
403 unsigned variable_group_size_idx = -1;
404 memset(ucp_idx, -1, sizeof(ucp_idx));
405 memset(img_idx, -1, sizeof(img_idx));
406
407 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
408
409 nir_builder b;
410 nir_builder_init(&b, impl);
411
412 b.cursor = nir_before_block(nir_start_block(impl));
413 nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
414 nir_ssa_def *temp_const_ubo_name = NULL;
415
416 /* Turn system value intrinsics into uniforms */
417 nir_foreach_block(block, impl) {
418 nir_foreach_instr_safe(instr, block) {
419 if (instr->type != nir_instr_type_intrinsic)
420 continue;
421
422 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
423 nir_ssa_def *offset;
424
425 switch (intrin->intrinsic) {
426 case nir_intrinsic_load_constant: {
427 /* This one is special because it reads from the shader constant
428 * data and not cbuf0 which gallium uploads for us.
429 */
430 b.cursor = nir_before_instr(instr);
431 nir_ssa_def *offset =
432 nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
433 nir_intrinsic_base(intrin));
434
435 if (temp_const_ubo_name == NULL)
436 temp_const_ubo_name = nir_imm_int(&b, 0);
437
438 nir_intrinsic_instr *load_ubo =
439 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
440 load_ubo->num_components = intrin->num_components;
441 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
442 load_ubo->src[1] = nir_src_for_ssa(offset);
443 nir_intrinsic_set_align(load_ubo,
444 nir_intrinsic_align_mul(intrin),
445 nir_intrinsic_align_offset(intrin));
446 nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
447 intrin->dest.ssa.num_components,
448 intrin->dest.ssa.bit_size,
449 intrin->dest.ssa.name);
450 nir_builder_instr_insert(&b, &load_ubo->instr);
451
452 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
453 nir_src_for_ssa(&load_ubo->dest.ssa));
454 nir_instr_remove(&intrin->instr);
455 continue;
456 }
457 case nir_intrinsic_load_user_clip_plane: {
458 unsigned ucp = nir_intrinsic_ucp_id(intrin);
459
460 if (ucp_idx[ucp] == -1) {
461 ucp_idx[ucp] = num_system_values;
462 num_system_values += 4;
463 }
464
465 for (int i = 0; i < 4; i++) {
466 system_values[ucp_idx[ucp] + i] =
467 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
468 }
469
470 b.cursor = nir_before_instr(instr);
471 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
472 break;
473 }
474 case nir_intrinsic_load_patch_vertices_in:
475 if (patch_vert_idx == -1)
476 patch_vert_idx = num_system_values++;
477
478 system_values[patch_vert_idx] =
479 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN;
480
481 b.cursor = nir_before_instr(instr);
482 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
483 break;
484 case nir_intrinsic_image_deref_load_param_intel: {
485 assert(devinfo->gen < 9);
486 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
487 nir_variable *var = nir_deref_instr_get_variable(deref);
488
489 if (img_idx[var->data.binding] == -1) {
490 /* GL only allows arrays of arrays of images. */
491 assert(glsl_type_is_image(glsl_without_array(var->type)));
492 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
493
494 for (int i = 0; i < num_images; i++) {
495 const unsigned img = var->data.binding + i;
496
497 img_idx[img] = num_system_values;
498 num_system_values += BRW_IMAGE_PARAM_SIZE;
499
500 uint32_t *img_sv = &system_values[img_idx[img]];
501
502 setup_vec4_image_sysval(
503 img_sv + BRW_IMAGE_PARAM_OFFSET_OFFSET, img,
504 offsetof(struct brw_image_param, offset), 2);
505 setup_vec4_image_sysval(
506 img_sv + BRW_IMAGE_PARAM_SIZE_OFFSET, img,
507 offsetof(struct brw_image_param, size), 3);
508 setup_vec4_image_sysval(
509 img_sv + BRW_IMAGE_PARAM_STRIDE_OFFSET, img,
510 offsetof(struct brw_image_param, stride), 4);
511 setup_vec4_image_sysval(
512 img_sv + BRW_IMAGE_PARAM_TILING_OFFSET, img,
513 offsetof(struct brw_image_param, tiling), 3);
514 setup_vec4_image_sysval(
515 img_sv + BRW_IMAGE_PARAM_SWIZZLING_OFFSET, img,
516 offsetof(struct brw_image_param, swizzling), 2);
517 }
518 }
519
520 b.cursor = nir_before_instr(instr);
521 offset = nir_iadd(&b,
522 get_aoa_deref_offset(&b, deref, BRW_IMAGE_PARAM_SIZE * 4),
523 nir_imm_int(&b, img_idx[var->data.binding] * 4 +
524 nir_intrinsic_base(intrin) * 16));
525 break;
526 }
527 case nir_intrinsic_load_local_group_size: {
528 assert(nir->info.cs.local_size_variable);
529 if (variable_group_size_idx == -1) {
530 variable_group_size_idx = num_system_values;
531 num_system_values += 3;
532 for (int i = 0; i < 3; i++) {
533 system_values[variable_group_size_idx + i] =
534 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
535 }
536 }
537
538 b.cursor = nir_before_instr(instr);
539 offset = nir_imm_int(&b, variable_group_size_idx * sizeof(uint32_t));
540 break;
541 }
542 default:
543 continue;
544 }
545
546 unsigned comps = nir_intrinsic_dest_components(intrin);
547
548 nir_intrinsic_instr *load =
549 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
550 load->num_components = comps;
551 load->src[0] = nir_src_for_ssa(temp_ubo_name);
552 load->src[1] = nir_src_for_ssa(offset);
553 nir_intrinsic_set_align(load, 4, 0);
554 nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
555 nir_builder_instr_insert(&b, &load->instr);
556 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
557 nir_src_for_ssa(&load->dest.ssa));
558 nir_instr_remove(instr);
559 }
560 }
561
562 nir_validate_shader(nir, "before remapping");
563
564 /* Uniforms are stored in constant buffer 0, the
565 * user-facing UBOs are indexed by one. So if any constant buffer is
566 * needed, the constant buffer 0 will be needed, so account for it.
567 */
568 unsigned num_cbufs = nir->info.num_ubos;
569 if (num_cbufs || nir->num_uniforms)
570 num_cbufs++;
571
572 /* Place the new params in a new cbuf. */
573 if (num_system_values > 0) {
574 unsigned sysval_cbuf_index = num_cbufs;
575 num_cbufs++;
576
577 system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
578 num_system_values);
579
580 nir_foreach_block(block, impl) {
581 nir_foreach_instr_safe(instr, block) {
582 if (instr->type != nir_instr_type_intrinsic)
583 continue;
584
585 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
586
587 if (load->intrinsic != nir_intrinsic_load_ubo)
588 continue;
589
590 b.cursor = nir_before_instr(instr);
591
592 assert(load->src[0].is_ssa);
593
594 if (load->src[0].ssa == temp_ubo_name) {
595 nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
596 nir_instr_rewrite_src(instr, &load->src[0],
597 nir_src_for_ssa(imm));
598 }
599 }
600 }
601
602 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
603 nir_opt_constant_folding(nir);
604 } else {
605 ralloc_free(system_values);
606 system_values = NULL;
607 }
608
609 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
610 nir_validate_shader(nir, "after remap");
611
612 /* We don't use params[] but gallium leaves num_uniforms set. We use this
613 * to detect when cbuf0 exists but we don't need it anymore when we get
614 * here. Instead, zero it out so that the back-end doesn't get confused
615 * when nr_params * 4 != num_uniforms != nr_params * 4.
616 */
617 nir->num_uniforms = 0;
618
619 /* Constant loads (if any) need to go at the end of the constant buffers so
620 * we need to know num_cbufs before we can lower to them.
621 */
622 if (temp_const_ubo_name != NULL) {
623 nir_load_const_instr *const_ubo_index =
624 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
625 assert(const_ubo_index->def.bit_size == 32);
626 const_ubo_index->value[0].u32 = num_cbufs;
627 }
628
629 *out_system_values = system_values;
630 *out_num_system_values = num_system_values;
631 *out_num_cbufs = num_cbufs;
632 }
633
634 static const char *surface_group_names[] = {
635 [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
636 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
637 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
638 [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
639 [IRIS_SURFACE_GROUP_UBO] = "ubo",
640 [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
641 [IRIS_SURFACE_GROUP_IMAGE] = "image",
642 };
643
644 static void
645 iris_print_binding_table(FILE *fp, const char *name,
646 const struct iris_binding_table *bt)
647 {
648 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
649
650 uint32_t total = 0;
651 uint32_t compacted = 0;
652
653 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
654 uint32_t size = bt->sizes[i];
655 total += size;
656 if (size)
657 compacted += util_bitcount64(bt->used_mask[i]);
658 }
659
660 if (total == 0) {
661 fprintf(fp, "Binding table for %s is empty\n\n", name);
662 return;
663 }
664
665 if (total != compacted) {
666 fprintf(fp, "Binding table for %s "
667 "(compacted to %u entries from %u entries)\n",
668 name, compacted, total);
669 } else {
670 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
671 }
672
673 uint32_t entry = 0;
674 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
675 uint64_t mask = bt->used_mask[i];
676 while (mask) {
677 int index = u_bit_scan64(&mask);
678 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
679 }
680 }
681 fprintf(fp, "\n");
682 }
683
684 enum {
685 /* Max elements in a surface group. */
686 SURFACE_GROUP_MAX_ELEMENTS = 64,
687 };
688
689 /**
690 * Map a <group, index> pair to a binding table index.
691 *
692 * For example: <UBO, 5> => binding table index 12
693 */
694 uint32_t
695 iris_group_index_to_bti(const struct iris_binding_table *bt,
696 enum iris_surface_group group, uint32_t index)
697 {
698 assert(index < bt->sizes[group]);
699 uint64_t mask = bt->used_mask[group];
700 uint64_t bit = 1ull << index;
701 if (bit & mask) {
702 return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
703 } else {
704 return IRIS_SURFACE_NOT_USED;
705 }
706 }
707
708 /**
709 * Map a binding table index back to a <group, index> pair.
710 *
711 * For example: binding table index 12 => <UBO, 5>
712 */
713 uint32_t
714 iris_bti_to_group_index(const struct iris_binding_table *bt,
715 enum iris_surface_group group, uint32_t bti)
716 {
717 uint64_t used_mask = bt->used_mask[group];
718 assert(bti >= bt->offsets[group]);
719
720 uint32_t c = bti - bt->offsets[group];
721 while (used_mask) {
722 int i = u_bit_scan64(&used_mask);
723 if (c == 0)
724 return i;
725 c--;
726 }
727
728 return IRIS_SURFACE_NOT_USED;
729 }
730
731 static void
732 rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
733 nir_instr *instr, nir_src *src,
734 enum iris_surface_group group)
735 {
736 assert(bt->sizes[group] > 0);
737
738 b->cursor = nir_before_instr(instr);
739 nir_ssa_def *bti;
740 if (nir_src_is_const(*src)) {
741 uint32_t index = nir_src_as_uint(*src);
742 bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
743 src->ssa->bit_size);
744 } else {
745 /* Indirect usage makes all the surfaces of the group to be available,
746 * so we can just add the base.
747 */
748 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
749 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
750 }
751 nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
752 }
753
754 static void
755 mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
756 enum iris_surface_group group)
757 {
758 assert(bt->sizes[group] > 0);
759
760 if (nir_src_is_const(*src)) {
761 uint64_t index = nir_src_as_uint(*src);
762 assert(index < bt->sizes[group]);
763 bt->used_mask[group] |= 1ull << index;
764 } else {
765 /* There's an indirect usage, we need all the surfaces. */
766 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
767 }
768 }
769
770 static bool
771 skip_compacting_binding_tables(void)
772 {
773 static int skip = -1;
774 if (skip < 0)
775 skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
776 return skip;
777 }
778
779 /**
780 * Set up the binding table indices and apply to the shader.
781 */
782 static void
783 iris_setup_binding_table(const struct gen_device_info *devinfo,
784 struct nir_shader *nir,
785 struct iris_binding_table *bt,
786 unsigned num_render_targets,
787 unsigned num_system_values,
788 unsigned num_cbufs)
789 {
790 const struct shader_info *info = &nir->info;
791
792 memset(bt, 0, sizeof(*bt));
793
794 /* Set the sizes for each surface group. For some groups, we already know
795 * upfront how many will be used, so mark them.
796 */
797 if (info->stage == MESA_SHADER_FRAGMENT) {
798 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
799 /* All render targets used. */
800 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
801 BITFIELD64_MASK(num_render_targets);
802
803 /* Setup render target read surface group inorder to support non-coherent
804 * framebuffer fetch on Gen8
805 */
806 if (devinfo->gen == 8 && info->outputs_read) {
807 bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
808 bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
809 BITFIELD64_MASK(num_render_targets);
810 }
811 } else if (info->stage == MESA_SHADER_COMPUTE) {
812 bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
813 }
814
815 bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
816 bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
817
818 bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
819
820 /* Allocate an extra slot in the UBO section for NIR constants.
821 * Binding table compaction will remove it if unnecessary.
822 *
823 * We don't include them in iris_compiled_shader::num_cbufs because
824 * they are uploaded separately from shs->constbuf[], but from a shader
825 * point of view, they're another UBO (at the end of the section).
826 */
827 bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
828
829 bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
830
831 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
832 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
833
834 /* Mark surfaces used for the cases we don't have the information available
835 * upfront.
836 */
837 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
838 nir_foreach_block (block, impl) {
839 nir_foreach_instr (instr, block) {
840 if (instr->type != nir_instr_type_intrinsic)
841 continue;
842
843 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
844 switch (intrin->intrinsic) {
845 case nir_intrinsic_load_num_work_groups:
846 bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
847 break;
848
849 case nir_intrinsic_load_output:
850 if (devinfo->gen == 8) {
851 mark_used_with_src(bt, &intrin->src[0],
852 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
853 }
854 break;
855
856 case nir_intrinsic_image_size:
857 case nir_intrinsic_image_load:
858 case nir_intrinsic_image_store:
859 case nir_intrinsic_image_atomic_add:
860 case nir_intrinsic_image_atomic_imin:
861 case nir_intrinsic_image_atomic_umin:
862 case nir_intrinsic_image_atomic_imax:
863 case nir_intrinsic_image_atomic_umax:
864 case nir_intrinsic_image_atomic_and:
865 case nir_intrinsic_image_atomic_or:
866 case nir_intrinsic_image_atomic_xor:
867 case nir_intrinsic_image_atomic_exchange:
868 case nir_intrinsic_image_atomic_comp_swap:
869 case nir_intrinsic_image_load_raw_intel:
870 case nir_intrinsic_image_store_raw_intel:
871 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
872 break;
873
874 case nir_intrinsic_load_ubo:
875 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
876 break;
877
878 case nir_intrinsic_store_ssbo:
879 mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
880 break;
881
882 case nir_intrinsic_get_buffer_size:
883 case nir_intrinsic_ssbo_atomic_add:
884 case nir_intrinsic_ssbo_atomic_imin:
885 case nir_intrinsic_ssbo_atomic_umin:
886 case nir_intrinsic_ssbo_atomic_imax:
887 case nir_intrinsic_ssbo_atomic_umax:
888 case nir_intrinsic_ssbo_atomic_and:
889 case nir_intrinsic_ssbo_atomic_or:
890 case nir_intrinsic_ssbo_atomic_xor:
891 case nir_intrinsic_ssbo_atomic_exchange:
892 case nir_intrinsic_ssbo_atomic_comp_swap:
893 case nir_intrinsic_ssbo_atomic_fmin:
894 case nir_intrinsic_ssbo_atomic_fmax:
895 case nir_intrinsic_ssbo_atomic_fcomp_swap:
896 case nir_intrinsic_load_ssbo:
897 mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
898 break;
899
900 default:
901 break;
902 }
903 }
904 }
905
906 /* When disable we just mark everything as used. */
907 if (unlikely(skip_compacting_binding_tables())) {
908 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
909 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
910 }
911
912 /* Calculate the offsets and the binding table size based on the used
913 * surfaces. After this point, the functions to go between "group indices"
914 * and binding table indices can be used.
915 */
916 uint32_t next = 0;
917 for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
918 if (bt->used_mask[i] != 0) {
919 bt->offsets[i] = next;
920 next += util_bitcount64(bt->used_mask[i]);
921 }
922 }
923 bt->size_bytes = next * 4;
924
925 if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
926 iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
927 }
928
929 /* Apply the binding table indices. The backend compiler is not expected
930 * to change those, as we haven't set any of the *_start entries in brw
931 * binding_table.
932 */
933 nir_builder b;
934 nir_builder_init(&b, impl);
935
936 nir_foreach_block (block, impl) {
937 nir_foreach_instr (instr, block) {
938 if (instr->type == nir_instr_type_tex) {
939 nir_tex_instr *tex = nir_instr_as_tex(instr);
940 tex->texture_index =
941 iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
942 tex->texture_index);
943 continue;
944 }
945
946 if (instr->type != nir_instr_type_intrinsic)
947 continue;
948
949 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
950 switch (intrin->intrinsic) {
951 case nir_intrinsic_image_size:
952 case nir_intrinsic_image_load:
953 case nir_intrinsic_image_store:
954 case nir_intrinsic_image_atomic_add:
955 case nir_intrinsic_image_atomic_imin:
956 case nir_intrinsic_image_atomic_umin:
957 case nir_intrinsic_image_atomic_imax:
958 case nir_intrinsic_image_atomic_umax:
959 case nir_intrinsic_image_atomic_and:
960 case nir_intrinsic_image_atomic_or:
961 case nir_intrinsic_image_atomic_xor:
962 case nir_intrinsic_image_atomic_exchange:
963 case nir_intrinsic_image_atomic_comp_swap:
964 case nir_intrinsic_image_load_raw_intel:
965 case nir_intrinsic_image_store_raw_intel:
966 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
967 IRIS_SURFACE_GROUP_IMAGE);
968 break;
969
970 case nir_intrinsic_load_ubo:
971 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
972 IRIS_SURFACE_GROUP_UBO);
973 break;
974
975 case nir_intrinsic_store_ssbo:
976 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
977 IRIS_SURFACE_GROUP_SSBO);
978 break;
979
980 case nir_intrinsic_load_output:
981 if (devinfo->gen == 8) {
982 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
983 IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
984 }
985 break;
986
987 case nir_intrinsic_get_buffer_size:
988 case nir_intrinsic_ssbo_atomic_add:
989 case nir_intrinsic_ssbo_atomic_imin:
990 case nir_intrinsic_ssbo_atomic_umin:
991 case nir_intrinsic_ssbo_atomic_imax:
992 case nir_intrinsic_ssbo_atomic_umax:
993 case nir_intrinsic_ssbo_atomic_and:
994 case nir_intrinsic_ssbo_atomic_or:
995 case nir_intrinsic_ssbo_atomic_xor:
996 case nir_intrinsic_ssbo_atomic_exchange:
997 case nir_intrinsic_ssbo_atomic_comp_swap:
998 case nir_intrinsic_ssbo_atomic_fmin:
999 case nir_intrinsic_ssbo_atomic_fmax:
1000 case nir_intrinsic_ssbo_atomic_fcomp_swap:
1001 case nir_intrinsic_load_ssbo:
1002 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1003 IRIS_SURFACE_GROUP_SSBO);
1004 break;
1005
1006 default:
1007 break;
1008 }
1009 }
1010 }
1011 }
1012
1013 static void
1014 iris_debug_recompile(struct iris_context *ice,
1015 struct shader_info *info,
1016 const struct brw_base_prog_key *key)
1017 {
1018 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
1019 const struct gen_device_info *devinfo = &screen->devinfo;
1020 const struct brw_compiler *c = screen->compiler;
1021
1022 if (!info)
1023 return;
1024
1025 c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
1026 _mesa_shader_stage_to_string(info->stage),
1027 info->name ? info->name : "(no identifier)",
1028 info->label ? info->label : "");
1029
1030 const void *old_iris_key =
1031 iris_find_previous_compile(ice, info->stage, key->program_string_id);
1032
1033 union brw_any_prog_key old_key;
1034
1035 switch (info->stage) {
1036 case MESA_SHADER_VERTEX:
1037 old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
1038 break;
1039 case MESA_SHADER_TESS_CTRL:
1040 old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
1041 break;
1042 case MESA_SHADER_TESS_EVAL:
1043 old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
1044 break;
1045 case MESA_SHADER_GEOMETRY:
1046 old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
1047 break;
1048 case MESA_SHADER_FRAGMENT:
1049 old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
1050 break;
1051 case MESA_SHADER_COMPUTE:
1052 old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
1053 break;
1054 default:
1055 unreachable("invalid shader stage");
1056 }
1057
1058 brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
1059 }
1060
1061 /**
1062 * Get the shader for the last enabled geometry stage.
1063 *
1064 * This stage is the one which will feed stream output and the rasterizer.
1065 */
1066 static gl_shader_stage
1067 last_vue_stage(struct iris_context *ice)
1068 {
1069 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1070 return MESA_SHADER_GEOMETRY;
1071
1072 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1073 return MESA_SHADER_TESS_EVAL;
1074
1075 return MESA_SHADER_VERTEX;
1076 }
1077
1078 /**
1079 * Compile a vertex shader, and upload the assembly.
1080 */
1081 static struct iris_compiled_shader *
1082 iris_compile_vs(struct iris_context *ice,
1083 struct iris_uncompiled_shader *ish,
1084 const struct iris_vs_prog_key *key)
1085 {
1086 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1087 const struct brw_compiler *compiler = screen->compiler;
1088 const struct gen_device_info *devinfo = &screen->devinfo;
1089 void *mem_ctx = ralloc_context(NULL);
1090 struct brw_vs_prog_data *vs_prog_data =
1091 rzalloc(mem_ctx, struct brw_vs_prog_data);
1092 struct brw_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1093 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1094 enum brw_param_builtin *system_values;
1095 unsigned num_system_values;
1096 unsigned num_cbufs;
1097
1098 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1099
1100 if (key->vue.nr_userclip_plane_consts) {
1101 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1102 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1103 true, false, NULL);
1104 nir_lower_io_to_temporaries(nir, impl, true, false);
1105 nir_lower_global_vars_to_local(nir);
1106 nir_lower_vars_to_ssa(nir);
1107 nir_shader_gather_info(nir, impl);
1108 }
1109
1110 prog_data->use_alt_mode = ish->use_alt_mode;
1111
1112 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1113 &num_system_values, &num_cbufs);
1114
1115 struct iris_binding_table bt;
1116 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1117 num_system_values, num_cbufs);
1118
1119 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1120
1121 brw_compute_vue_map(devinfo,
1122 &vue_prog_data->vue_map, nir->info.outputs_written,
1123 nir->info.separate_shader, /* pos_slots */ 1);
1124
1125 struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
1126
1127 char *error_str = NULL;
1128 const unsigned *program =
1129 brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
1130 nir, -1, NULL, &error_str);
1131 if (program == NULL) {
1132 dbg_printf("Failed to compile vertex shader: %s\n", error_str);
1133 ralloc_free(mem_ctx);
1134 return false;
1135 }
1136
1137 if (ish->compiled_once) {
1138 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1139 } else {
1140 ish->compiled_once = true;
1141 }
1142
1143 uint32_t *so_decls =
1144 screen->vtbl.create_so_decl_list(&ish->stream_output,
1145 &vue_prog_data->vue_map);
1146
1147 struct iris_compiled_shader *shader =
1148 iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
1149 prog_data, so_decls, system_values, num_system_values,
1150 num_cbufs, &bt);
1151
1152 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1153
1154 ralloc_free(mem_ctx);
1155 return shader;
1156 }
1157
1158 /**
1159 * Update the current vertex shader variant.
1160 *
1161 * Fill out the key, look in the cache, compile and bind if needed.
1162 */
1163 static void
1164 iris_update_compiled_vs(struct iris_context *ice)
1165 {
1166 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1167 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1168 struct iris_uncompiled_shader *ish =
1169 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1170
1171 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
1172 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1173
1174 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
1175 struct iris_compiled_shader *shader =
1176 iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
1177
1178 if (!shader)
1179 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1180
1181 if (!shader)
1182 shader = iris_compile_vs(ice, ish, &key);
1183
1184 if (old != shader) {
1185 ice->shaders.prog[IRIS_CACHE_VS] = shader;
1186 ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
1187 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_VS |
1188 IRIS_STAGE_DIRTY_BINDINGS_VS |
1189 IRIS_STAGE_DIRTY_CONSTANTS_VS;
1190 shs->sysvals_need_upload = true;
1191
1192 const struct brw_vs_prog_data *vs_prog_data =
1193 (void *) shader->prog_data;
1194 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1195 vs_prog_data->uses_baseinstance;
1196 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1197 vs_prog_data->uses_is_indexed_draw;
1198 const bool needs_sgvs_element = uses_draw_params ||
1199 vs_prog_data->uses_instanceid ||
1200 vs_prog_data->uses_vertexid;
1201
1202 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1203 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1204 ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
1205 ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
1206 IRIS_DIRTY_VERTEX_ELEMENTS;
1207 }
1208 ice->state.vs_uses_draw_params = uses_draw_params;
1209 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1210 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1211 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1212 }
1213 }
1214
1215 /**
1216 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1217 */
1218 const struct shader_info *
1219 iris_get_shader_info(const struct iris_context *ice, gl_shader_stage stage)
1220 {
1221 const struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1222
1223 if (!ish)
1224 return NULL;
1225
1226 const nir_shader *nir = ish->nir;
1227 return &nir->info;
1228 }
1229
1230 /**
1231 * Get the union of TCS output and TES input slots.
1232 *
1233 * TCS and TES need to agree on a common URB entry layout. In particular,
1234 * the data for all patch vertices is stored in a single URB entry (unlike
1235 * GS which has one entry per input vertex). This means that per-vertex
1236 * array indexing needs a stride.
1237 *
1238 * SSO requires locations to match, but doesn't require the number of
1239 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1240 * So, we need to take the extra step of unifying these on the fly.
1241 */
1242 static void
1243 get_unified_tess_slots(const struct iris_context *ice,
1244 uint64_t *per_vertex_slots,
1245 uint32_t *per_patch_slots)
1246 {
1247 const struct shader_info *tcs =
1248 iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1249 const struct shader_info *tes =
1250 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1251
1252 *per_vertex_slots = tes->inputs_read;
1253 *per_patch_slots = tes->patch_inputs_read;
1254
1255 if (tcs) {
1256 *per_vertex_slots |= tcs->outputs_written;
1257 *per_patch_slots |= tcs->patch_outputs_written;
1258 }
1259 }
1260
1261 /**
1262 * Compile a tessellation control shader, and upload the assembly.
1263 */
1264 static struct iris_compiled_shader *
1265 iris_compile_tcs(struct iris_context *ice,
1266 struct iris_uncompiled_shader *ish,
1267 const struct iris_tcs_prog_key *key)
1268 {
1269 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1270 const struct brw_compiler *compiler = screen->compiler;
1271 const struct nir_shader_compiler_options *options =
1272 compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
1273 void *mem_ctx = ralloc_context(NULL);
1274 struct brw_tcs_prog_data *tcs_prog_data =
1275 rzalloc(mem_ctx, struct brw_tcs_prog_data);
1276 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1277 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1278 const struct gen_device_info *devinfo = &screen->devinfo;
1279 enum brw_param_builtin *system_values = NULL;
1280 unsigned num_system_values = 0;
1281 unsigned num_cbufs = 0;
1282
1283 nir_shader *nir;
1284
1285 struct iris_binding_table bt;
1286
1287 struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
1288
1289 if (ish) {
1290 nir = nir_shader_clone(mem_ctx, ish->nir);
1291
1292 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1293 &num_system_values, &num_cbufs);
1294 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1295 num_system_values, num_cbufs);
1296 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1297 } else {
1298 nir =
1299 brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
1300
1301 /* Reserve space for passing the default tess levels as constants. */
1302 num_cbufs = 1;
1303 num_system_values = 8;
1304 system_values =
1305 rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
1306 prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
1307 prog_data->nr_params = num_system_values;
1308
1309 if (key->tes_primitive_mode == GL_QUADS) {
1310 for (int i = 0; i < 4; i++)
1311 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1312
1313 system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1314 system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
1315 } else if (key->tes_primitive_mode == GL_TRIANGLES) {
1316 for (int i = 0; i < 3; i++)
1317 system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
1318
1319 system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
1320 } else {
1321 assert(key->tes_primitive_mode == GL_ISOLINES);
1322 system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
1323 system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
1324 }
1325
1326 /* Manually setup the TCS binding table. */
1327 memset(&bt, 0, sizeof(bt));
1328 bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
1329 bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
1330 bt.size_bytes = 4;
1331
1332 prog_data->ubo_ranges[0].length = 1;
1333 }
1334
1335 char *error_str = NULL;
1336 const unsigned *program =
1337 brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
1338 nir, -1, NULL, &error_str);
1339 if (program == NULL) {
1340 dbg_printf("Failed to compile control shader: %s\n", error_str);
1341 ralloc_free(mem_ctx);
1342 return false;
1343 }
1344
1345 if (ish) {
1346 if (ish->compiled_once) {
1347 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1348 } else {
1349 ish->compiled_once = true;
1350 }
1351 }
1352
1353 struct iris_compiled_shader *shader =
1354 iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
1355 prog_data, NULL, system_values, num_system_values,
1356 num_cbufs, &bt);
1357
1358 if (ish)
1359 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1360
1361 ralloc_free(mem_ctx);
1362 return shader;
1363 }
1364
1365 /**
1366 * Update the current tessellation control shader variant.
1367 *
1368 * Fill out the key, look in the cache, compile and bind if needed.
1369 */
1370 static void
1371 iris_update_compiled_tcs(struct iris_context *ice)
1372 {
1373 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1374 struct iris_uncompiled_shader *tcs =
1375 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1376 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1377 const struct brw_compiler *compiler = screen->compiler;
1378 const struct gen_device_info *devinfo = &screen->devinfo;
1379
1380 const struct shader_info *tes_info =
1381 iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1382 struct iris_tcs_prog_key key = {
1383 .vue.base.program_string_id = tcs ? tcs->program_id : 0,
1384 .tes_primitive_mode = tes_info->tess.primitive_mode,
1385 .input_vertices =
1386 !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
1387 .quads_workaround = devinfo->gen < 9 &&
1388 tes_info->tess.primitive_mode == GL_QUADS &&
1389 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1390 };
1391 get_unified_tess_slots(ice, &key.outputs_written,
1392 &key.patch_outputs_written);
1393 screen->vtbl.populate_tcs_key(ice, &key);
1394
1395 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
1396 struct iris_compiled_shader *shader =
1397 iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
1398
1399 if (tcs && !shader)
1400 shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1401
1402 if (!shader)
1403 shader = iris_compile_tcs(ice, tcs, &key);
1404
1405 if (old != shader) {
1406 ice->shaders.prog[IRIS_CACHE_TCS] = shader;
1407 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TCS |
1408 IRIS_STAGE_DIRTY_BINDINGS_TCS |
1409 IRIS_STAGE_DIRTY_CONSTANTS_TCS;
1410 shs->sysvals_need_upload = true;
1411 }
1412 }
1413
1414 /**
1415 * Compile a tessellation evaluation shader, and upload the assembly.
1416 */
1417 static struct iris_compiled_shader *
1418 iris_compile_tes(struct iris_context *ice,
1419 struct iris_uncompiled_shader *ish,
1420 const struct iris_tes_prog_key *key)
1421 {
1422 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1423 const struct brw_compiler *compiler = screen->compiler;
1424 void *mem_ctx = ralloc_context(NULL);
1425 struct brw_tes_prog_data *tes_prog_data =
1426 rzalloc(mem_ctx, struct brw_tes_prog_data);
1427 struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1428 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1429 enum brw_param_builtin *system_values;
1430 const struct gen_device_info *devinfo = &screen->devinfo;
1431 unsigned num_system_values;
1432 unsigned num_cbufs;
1433
1434 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1435
1436 if (key->vue.nr_userclip_plane_consts) {
1437 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1438 nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1439 true, false, NULL);
1440 nir_lower_io_to_temporaries(nir, impl, true, false);
1441 nir_lower_global_vars_to_local(nir);
1442 nir_lower_vars_to_ssa(nir);
1443 nir_shader_gather_info(nir, impl);
1444 }
1445
1446 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1447 &num_system_values, &num_cbufs);
1448
1449 struct iris_binding_table bt;
1450 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1451 num_system_values, num_cbufs);
1452
1453 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1454
1455 struct brw_vue_map input_vue_map;
1456 brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1457 key->patch_inputs_read);
1458
1459 struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
1460
1461 char *error_str = NULL;
1462 const unsigned *program =
1463 brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
1464 tes_prog_data, nir, -1, NULL, &error_str);
1465 if (program == NULL) {
1466 dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
1467 ralloc_free(mem_ctx);
1468 return false;
1469 }
1470
1471 if (ish->compiled_once) {
1472 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1473 } else {
1474 ish->compiled_once = true;
1475 }
1476
1477 uint32_t *so_decls =
1478 screen->vtbl.create_so_decl_list(&ish->stream_output,
1479 &vue_prog_data->vue_map);
1480
1481
1482 struct iris_compiled_shader *shader =
1483 iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
1484 prog_data, so_decls, system_values, num_system_values,
1485 num_cbufs, &bt);
1486
1487 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1488
1489 ralloc_free(mem_ctx);
1490 return shader;
1491 }
1492
1493 /**
1494 * Update the current tessellation evaluation shader variant.
1495 *
1496 * Fill out the key, look in the cache, compile and bind if needed.
1497 */
1498 static void
1499 iris_update_compiled_tes(struct iris_context *ice)
1500 {
1501 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1502 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1503 struct iris_uncompiled_shader *ish =
1504 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1505
1506 struct iris_tes_prog_key key = { KEY_ID(vue.base) };
1507 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1508 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1509
1510 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
1511 struct iris_compiled_shader *shader =
1512 iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
1513
1514 if (!shader)
1515 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1516
1517 if (!shader)
1518 shader = iris_compile_tes(ice, ish, &key);
1519
1520 if (old != shader) {
1521 ice->shaders.prog[IRIS_CACHE_TES] = shader;
1522 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_TES |
1523 IRIS_STAGE_DIRTY_BINDINGS_TES |
1524 IRIS_STAGE_DIRTY_CONSTANTS_TES;
1525 shs->sysvals_need_upload = true;
1526 }
1527
1528 /* TODO: Could compare and avoid flagging this. */
1529 const struct shader_info *tes_info = &ish->nir->info;
1530 if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
1531 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TES;
1532 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1533 }
1534 }
1535
1536 /**
1537 * Compile a geometry shader, and upload the assembly.
1538 */
1539 static struct iris_compiled_shader *
1540 iris_compile_gs(struct iris_context *ice,
1541 struct iris_uncompiled_shader *ish,
1542 const struct iris_gs_prog_key *key)
1543 {
1544 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1545 const struct brw_compiler *compiler = screen->compiler;
1546 const struct gen_device_info *devinfo = &screen->devinfo;
1547 void *mem_ctx = ralloc_context(NULL);
1548 struct brw_gs_prog_data *gs_prog_data =
1549 rzalloc(mem_ctx, struct brw_gs_prog_data);
1550 struct brw_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1551 struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
1552 enum brw_param_builtin *system_values;
1553 unsigned num_system_values;
1554 unsigned num_cbufs;
1555
1556 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1557
1558 if (key->vue.nr_userclip_plane_consts) {
1559 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1560 nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
1561 false, NULL);
1562 nir_lower_io_to_temporaries(nir, impl, true, false);
1563 nir_lower_global_vars_to_local(nir);
1564 nir_lower_vars_to_ssa(nir);
1565 nir_shader_gather_info(nir, impl);
1566 }
1567
1568 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1569 &num_system_values, &num_cbufs);
1570
1571 struct iris_binding_table bt;
1572 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1573 num_system_values, num_cbufs);
1574
1575 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1576
1577 brw_compute_vue_map(devinfo,
1578 &vue_prog_data->vue_map, nir->info.outputs_written,
1579 nir->info.separate_shader, /* pos_slots */ 1);
1580
1581 struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
1582
1583 char *error_str = NULL;
1584 const unsigned *program =
1585 brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
1586 nir, NULL, -1, NULL, &error_str);
1587 if (program == NULL) {
1588 dbg_printf("Failed to compile geometry shader: %s\n", error_str);
1589 ralloc_free(mem_ctx);
1590 return false;
1591 }
1592
1593 if (ish->compiled_once) {
1594 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1595 } else {
1596 ish->compiled_once = true;
1597 }
1598
1599 uint32_t *so_decls =
1600 screen->vtbl.create_so_decl_list(&ish->stream_output,
1601 &vue_prog_data->vue_map);
1602
1603 struct iris_compiled_shader *shader =
1604 iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
1605 prog_data, so_decls, system_values, num_system_values,
1606 num_cbufs, &bt);
1607
1608 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1609
1610 ralloc_free(mem_ctx);
1611 return shader;
1612 }
1613
1614 /**
1615 * Update the current geometry shader variant.
1616 *
1617 * Fill out the key, look in the cache, compile and bind if needed.
1618 */
1619 static void
1620 iris_update_compiled_gs(struct iris_context *ice)
1621 {
1622 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1623 struct iris_uncompiled_shader *ish =
1624 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1625 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
1626 struct iris_compiled_shader *shader = NULL;
1627 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1628
1629 if (ish) {
1630 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
1631 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1632
1633 shader =
1634 iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
1635
1636 if (!shader)
1637 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1638
1639 if (!shader)
1640 shader = iris_compile_gs(ice, ish, &key);
1641 }
1642
1643 if (old != shader) {
1644 ice->shaders.prog[IRIS_CACHE_GS] = shader;
1645 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
1646 IRIS_STAGE_DIRTY_BINDINGS_GS |
1647 IRIS_STAGE_DIRTY_CONSTANTS_GS;
1648 shs->sysvals_need_upload = true;
1649 }
1650 }
1651
1652 /**
1653 * Compile a fragment (pixel) shader, and upload the assembly.
1654 */
1655 static struct iris_compiled_shader *
1656 iris_compile_fs(struct iris_context *ice,
1657 struct iris_uncompiled_shader *ish,
1658 const struct iris_fs_prog_key *key,
1659 struct brw_vue_map *vue_map)
1660 {
1661 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1662 const struct brw_compiler *compiler = screen->compiler;
1663 void *mem_ctx = ralloc_context(NULL);
1664 struct brw_wm_prog_data *fs_prog_data =
1665 rzalloc(mem_ctx, struct brw_wm_prog_data);
1666 struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
1667 enum brw_param_builtin *system_values;
1668 const struct gen_device_info *devinfo = &screen->devinfo;
1669 unsigned num_system_values;
1670 unsigned num_cbufs;
1671
1672 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1673
1674 prog_data->use_alt_mode = ish->use_alt_mode;
1675
1676 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1677 &num_system_values, &num_cbufs);
1678
1679 /* Lower output variables to load_output intrinsics before setting up
1680 * binding tables, so iris_setup_binding_table can map any load_output
1681 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1682 * non-coherent framebuffer fetches.
1683 */
1684 brw_nir_lower_fs_outputs(nir);
1685
1686 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1687 * and do not need a binding table entry with a null surface. Earlier
1688 * generations need an entry for a null surface.
1689 */
1690 int null_rts = devinfo->gen < 11 ? 1 : 0;
1691
1692 struct iris_binding_table bt;
1693 iris_setup_binding_table(devinfo, nir, &bt,
1694 MAX2(key->nr_color_regions, null_rts),
1695 num_system_values, num_cbufs);
1696
1697 brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
1698
1699 struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
1700
1701 char *error_str = NULL;
1702 const unsigned *program =
1703 brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
1704 nir, -1, -1, -1, true, false, vue_map,
1705 NULL, &error_str);
1706 if (program == NULL) {
1707 dbg_printf("Failed to compile fragment shader: %s\n", error_str);
1708 ralloc_free(mem_ctx);
1709 return false;
1710 }
1711
1712 if (ish->compiled_once) {
1713 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1714 } else {
1715 ish->compiled_once = true;
1716 }
1717
1718 struct iris_compiled_shader *shader =
1719 iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
1720 prog_data, NULL, system_values, num_system_values,
1721 num_cbufs, &bt);
1722
1723 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
1724
1725 ralloc_free(mem_ctx);
1726 return shader;
1727 }
1728
1729 /**
1730 * Update the current fragment shader variant.
1731 *
1732 * Fill out the key, look in the cache, compile and bind if needed.
1733 */
1734 static void
1735 iris_update_compiled_fs(struct iris_context *ice)
1736 {
1737 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1738 struct iris_uncompiled_shader *ish =
1739 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1740 struct iris_fs_prog_key key = { KEY_ID(base) };
1741 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1742 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1743
1744 if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
1745 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1746
1747 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_FS];
1748 struct iris_compiled_shader *shader =
1749 iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
1750
1751 if (!shader)
1752 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1753
1754 if (!shader)
1755 shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1756
1757 if (old != shader) {
1758 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1759 // toggles. might be able to avoid flagging SBE too.
1760 ice->shaders.prog[IRIS_CACHE_FS] = shader;
1761 ice->state.dirty |= IRIS_DIRTY_WM |
1762 IRIS_DIRTY_CLIP |
1763 IRIS_DIRTY_SBE;
1764 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS |
1765 IRIS_STAGE_DIRTY_BINDINGS_FS |
1766 IRIS_STAGE_DIRTY_CONSTANTS_FS;
1767 shs->sysvals_need_upload = true;
1768 }
1769 }
1770
1771 /**
1772 * Update the last enabled stage's VUE map.
1773 *
1774 * When the shader feeding the rasterizer's output interface changes, we
1775 * need to re-emit various packets.
1776 */
1777 static void
1778 update_last_vue_map(struct iris_context *ice,
1779 struct brw_stage_prog_data *prog_data)
1780 {
1781 struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
1782 struct brw_vue_map *vue_map = &vue_prog_data->vue_map;
1783 struct brw_vue_map *old_map = ice->shaders.last_vue_map;
1784 const uint64_t changed_slots =
1785 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1786
1787 if (changed_slots & VARYING_BIT_VIEWPORT) {
1788 ice->state.num_viewports =
1789 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
1790 ice->state.dirty |= IRIS_DIRTY_CLIP |
1791 IRIS_DIRTY_SF_CL_VIEWPORT |
1792 IRIS_DIRTY_CC_VIEWPORT |
1793 IRIS_DIRTY_SCISSOR_RECT;
1794 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_FS |
1795 ice->state.stage_dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
1796 }
1797
1798 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1799 ice->state.dirty |= IRIS_DIRTY_SBE;
1800 }
1801
1802 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1803 }
1804
1805 static void
1806 iris_update_pull_constant_descriptors(struct iris_context *ice,
1807 gl_shader_stage stage)
1808 {
1809 struct iris_compiled_shader *shader = ice->shaders.prog[stage];
1810
1811 if (!shader || !shader->prog_data->has_ubo_pull)
1812 return;
1813
1814 struct iris_shader_state *shs = &ice->state.shaders[stage];
1815 bool any_new_descriptors =
1816 shader->num_system_values > 0 && shs->sysvals_need_upload;
1817
1818 unsigned bound_cbufs = shs->bound_cbufs;
1819
1820 while (bound_cbufs) {
1821 const int i = u_bit_scan(&bound_cbufs);
1822 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
1823 struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
1824 if (!surf_state->res && cbuf->buffer) {
1825 iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
1826 any_new_descriptors = true;
1827 }
1828 }
1829
1830 if (any_new_descriptors)
1831 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
1832 }
1833
1834 /**
1835 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1836 */
1837 static struct brw_vue_prog_data *
1838 get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
1839 {
1840 if (!ice->shaders.prog[stage])
1841 return NULL;
1842
1843 return (void *) ice->shaders.prog[stage]->prog_data;
1844 }
1845
1846 // XXX: iris_compiled_shaders are space-leaking :(
1847 // XXX: do remember to unbind them if deleting them.
1848
1849 /**
1850 * Update the current shader variants for the given state.
1851 *
1852 * This should be called on every draw call to ensure that the correct
1853 * shaders are bound. It will also flag any dirty state triggered by
1854 * swapping out those shaders.
1855 */
1856 void
1857 iris_update_compiled_shaders(struct iris_context *ice)
1858 {
1859 const uint64_t dirty = ice->state.dirty;
1860 const uint64_t stage_dirty = ice->state.stage_dirty;
1861
1862 struct brw_vue_prog_data *old_prog_datas[4];
1863 if (!(dirty & IRIS_DIRTY_URB)) {
1864 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
1865 old_prog_datas[i] = get_vue_prog_data(ice, i);
1866 }
1867
1868 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_TCS |
1869 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1870 struct iris_uncompiled_shader *tes =
1871 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1872 if (tes) {
1873 iris_update_compiled_tcs(ice);
1874 iris_update_compiled_tes(ice);
1875 } else {
1876 ice->shaders.prog[IRIS_CACHE_TCS] = NULL;
1877 ice->shaders.prog[IRIS_CACHE_TES] = NULL;
1878 ice->state.stage_dirty |=
1879 IRIS_STAGE_DIRTY_TCS | IRIS_STAGE_DIRTY_TES |
1880 IRIS_STAGE_DIRTY_BINDINGS_TCS | IRIS_STAGE_DIRTY_BINDINGS_TES |
1881 IRIS_STAGE_DIRTY_CONSTANTS_TCS | IRIS_STAGE_DIRTY_CONSTANTS_TES;
1882 }
1883 }
1884
1885 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_VS)
1886 iris_update_compiled_vs(ice);
1887 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_GS)
1888 iris_update_compiled_gs(ice);
1889
1890 if (stage_dirty & (IRIS_STAGE_DIRTY_UNCOMPILED_GS |
1891 IRIS_STAGE_DIRTY_UNCOMPILED_TES)) {
1892 const struct iris_compiled_shader *gs =
1893 ice->shaders.prog[MESA_SHADER_GEOMETRY];
1894 const struct iris_compiled_shader *tes =
1895 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
1896
1897 bool points_or_lines = false;
1898
1899 if (gs) {
1900 const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
1901 points_or_lines =
1902 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
1903 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
1904 } else if (tes) {
1905 const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
1906 points_or_lines =
1907 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
1908 tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
1909 }
1910
1911 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
1912 /* Outbound to XY Clip enables */
1913 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
1914 ice->state.dirty |= IRIS_DIRTY_CLIP;
1915 }
1916 }
1917
1918 gl_shader_stage last_stage = last_vue_stage(ice);
1919 struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
1920 struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
1921 update_last_vue_map(ice, shader->prog_data);
1922 if (ice->state.streamout != shader->streamout) {
1923 ice->state.streamout = shader->streamout;
1924 ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST | IRIS_DIRTY_STREAMOUT;
1925 }
1926
1927 if (ice->state.streamout_active) {
1928 for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
1929 struct iris_stream_output_target *so =
1930 (void *) ice->state.so_target[i];
1931 if (so)
1932 so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
1933 }
1934 }
1935
1936 if (stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_FS)
1937 iris_update_compiled_fs(ice);
1938
1939 /* Changing shader interfaces may require a URB configuration. */
1940 if (!(dirty & IRIS_DIRTY_URB)) {
1941 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
1942 struct brw_vue_prog_data *old = old_prog_datas[i];
1943 struct brw_vue_prog_data *new = get_vue_prog_data(ice, i);
1944 if (!!old != !!new ||
1945 (new && new->urb_entry_size != old->urb_entry_size)) {
1946 ice->state.dirty |= IRIS_DIRTY_URB;
1947 break;
1948 }
1949 }
1950 }
1951
1952 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
1953 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << i))
1954 iris_update_pull_constant_descriptors(ice, i);
1955 }
1956 }
1957
1958 static struct iris_compiled_shader *
1959 iris_compile_cs(struct iris_context *ice,
1960 struct iris_uncompiled_shader *ish,
1961 const struct iris_cs_prog_key *key)
1962 {
1963 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
1964 const struct brw_compiler *compiler = screen->compiler;
1965 void *mem_ctx = ralloc_context(NULL);
1966 struct brw_cs_prog_data *cs_prog_data =
1967 rzalloc(mem_ctx, struct brw_cs_prog_data);
1968 struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1969 enum brw_param_builtin *system_values;
1970 const struct gen_device_info *devinfo = &screen->devinfo;
1971 unsigned num_system_values;
1972 unsigned num_cbufs;
1973
1974 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1975
1976 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
1977
1978 iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
1979 &num_system_values, &num_cbufs);
1980
1981 struct iris_binding_table bt;
1982 iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1983 num_system_values, num_cbufs);
1984
1985 struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
1986
1987 char *error_str = NULL;
1988 const unsigned *program =
1989 brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
1990 nir, -1, NULL, &error_str);
1991 if (program == NULL) {
1992 dbg_printf("Failed to compile compute shader: %s\n", error_str);
1993 ralloc_free(mem_ctx);
1994 return false;
1995 }
1996
1997 if (ish->compiled_once) {
1998 iris_debug_recompile(ice, &nir->info, &brw_key.base);
1999 } else {
2000 ish->compiled_once = true;
2001 }
2002
2003 struct iris_compiled_shader *shader =
2004 iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
2005 prog_data, NULL, system_values, num_system_values,
2006 num_cbufs, &bt);
2007
2008 iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
2009
2010 ralloc_free(mem_ctx);
2011 return shader;
2012 }
2013
2014 static void
2015 iris_update_compiled_cs(struct iris_context *ice)
2016 {
2017 struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2018 struct iris_uncompiled_shader *ish =
2019 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2020
2021 struct iris_cs_prog_key key = { KEY_ID(base) };
2022 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2023 screen->vtbl.populate_cs_key(ice, &key);
2024
2025 struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
2026 struct iris_compiled_shader *shader =
2027 iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
2028
2029 if (!shader)
2030 shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2031
2032 if (!shader)
2033 shader = iris_compile_cs(ice, ish, &key);
2034
2035 if (old != shader) {
2036 ice->shaders.prog[IRIS_CACHE_CS] = shader;
2037 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CS |
2038 IRIS_STAGE_DIRTY_BINDINGS_CS |
2039 IRIS_STAGE_DIRTY_CONSTANTS_CS;
2040 shs->sysvals_need_upload = true;
2041 }
2042 }
2043
2044 void
2045 iris_update_compiled_compute_shader(struct iris_context *ice)
2046 {
2047 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_UNCOMPILED_CS)
2048 iris_update_compiled_cs(ice);
2049
2050 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS)
2051 iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2052 }
2053
2054 void
2055 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
2056 unsigned threads,
2057 uint32_t *dst)
2058 {
2059 assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
2060 assert(cs_prog_data->push.cross_thread.size == 0);
2061 assert(cs_prog_data->push.per_thread.dwords == 1);
2062 assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
2063 for (unsigned t = 0; t < threads; t++)
2064 dst[8 * t] = t;
2065 }
2066
2067 /**
2068 * Allocate scratch BOs as needed for the given per-thread size and stage.
2069 */
2070 struct iris_bo *
2071 iris_get_scratch_space(struct iris_context *ice,
2072 unsigned per_thread_scratch,
2073 gl_shader_stage stage)
2074 {
2075 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
2076 struct iris_bufmgr *bufmgr = screen->bufmgr;
2077 const struct gen_device_info *devinfo = &screen->devinfo;
2078
2079 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2080 assert(encoded_size < (1 << 16));
2081
2082 struct iris_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2083
2084 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2085 *
2086 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2087 * must allocate scratch space enough so that each slice has 4
2088 * slices allowed."
2089 *
2090 * According to the other driver team, this applies to compute shaders
2091 * as well. This is not currently documented at all.
2092 *
2093 * This hack is no longer necessary on Gen11+.
2094 *
2095 * For, Gen11+, scratch space allocation is based on the number of threads
2096 * in the base configuration.
2097 */
2098 unsigned subslice_total = screen->subslice_total;
2099 if (devinfo->gen >= 12)
2100 subslice_total = devinfo->num_subslices[0];
2101 else if (devinfo->gen == 11)
2102 subslice_total = 8;
2103 else if (devinfo->gen < 11)
2104 subslice_total = 4 * devinfo->num_slices;
2105 assert(subslice_total >= screen->subslice_total);
2106
2107 if (!*bop) {
2108 unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
2109
2110 if (devinfo->gen >= 12) {
2111 /* Same as ICL below, but with 16 EUs. */
2112 scratch_ids_per_subslice = 16 * 8;
2113 } else if (devinfo->gen == 11) {
2114 /* The MEDIA_VFE_STATE docs say:
2115 *
2116 * "Starting with this configuration, the Maximum Number of
2117 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2118 *
2119 * Although there are only 7 threads per EU in the configuration,
2120 * the FFTID is calculated as if there are 8 threads per EU,
2121 * which in turn requires a larger amount of Scratch Space to be
2122 * allocated by the driver."
2123 */
2124 scratch_ids_per_subslice = 8 * 8;
2125 }
2126
2127 uint32_t max_threads[] = {
2128 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
2129 [MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
2130 [MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
2131 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
2132 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
2133 [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslice_total,
2134 };
2135
2136 uint32_t size = per_thread_scratch * max_threads[stage];
2137
2138 *bop = iris_bo_alloc(bufmgr, "scratch", size, IRIS_MEMZONE_SHADER);
2139 }
2140
2141 return *bop;
2142 }
2143
2144 /* ------------------------------------------------------------------- */
2145
2146 /**
2147 * The pipe->create_[stage]_state() driver hooks.
2148 *
2149 * Performs basic NIR preprocessing, records any state dependencies, and
2150 * returns an iris_uncompiled_shader as the Gallium CSO.
2151 *
2152 * Actual shader compilation to assembly happens later, at first use.
2153 */
2154 static void *
2155 iris_create_uncompiled_shader(struct pipe_context *ctx,
2156 nir_shader *nir,
2157 const struct pipe_stream_output_info *so_info)
2158 {
2159 struct iris_context *ice = (void *)ctx;
2160 struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2161 const struct gen_device_info *devinfo = &screen->devinfo;
2162
2163 struct iris_uncompiled_shader *ish =
2164 calloc(1, sizeof(struct iris_uncompiled_shader));
2165 if (!ish)
2166 return NULL;
2167
2168 NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
2169
2170 brw_preprocess_nir(screen->compiler, nir, NULL);
2171
2172 NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
2173 &ish->uses_atomic_load_store);
2174 NIR_PASS_V(nir, iris_lower_storage_image_derefs);
2175
2176 nir_sweep(nir);
2177
2178 if (nir->constant_data_size > 0) {
2179 unsigned data_offset;
2180 u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
2181 32, nir->constant_data, &data_offset, &ish->const_data);
2182
2183 struct pipe_shader_buffer psb = {
2184 .buffer = ish->const_data,
2185 .buffer_offset = data_offset,
2186 .buffer_size = nir->constant_data_size,
2187 };
2188 iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
2189 }
2190
2191 ish->program_id = get_new_program_id(screen);
2192 ish->nir = nir;
2193 if (so_info) {
2194 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2195 update_so_info(&ish->stream_output, nir->info.outputs_written);
2196 }
2197
2198 /* Save this now before potentially dropping nir->info.name */
2199 if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
2200 ish->use_alt_mode = true;
2201
2202 if (screen->disk_cache) {
2203 /* Serialize the NIR to a binary blob that we can hash for the disk
2204 * cache. Drop unnecessary information (like variable names)
2205 * so the serialized NIR is smaller, and also to let us detect more
2206 * isomorphic shaders when hashing, increasing cache hits.
2207 */
2208 struct blob blob;
2209 blob_init(&blob);
2210 nir_serialize(&blob, nir, true);
2211 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2212 blob_finish(&blob);
2213 }
2214
2215 return ish;
2216 }
2217
2218 static struct iris_uncompiled_shader *
2219 iris_create_shader_state(struct pipe_context *ctx,
2220 const struct pipe_shader_state *state)
2221 {
2222 struct nir_shader *nir;
2223
2224 if (state->type == PIPE_SHADER_IR_TGSI)
2225 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2226 else
2227 nir = state->ir.nir;
2228
2229 return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
2230 }
2231
2232 static void *
2233 iris_create_vs_state(struct pipe_context *ctx,
2234 const struct pipe_shader_state *state)
2235 {
2236 struct iris_context *ice = (void *) ctx;
2237 struct iris_screen *screen = (void *) ctx->screen;
2238 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2239
2240 /* User clip planes */
2241 if (ish->nir->info.clip_distance_array_size == 0)
2242 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2243
2244 if (screen->precompile) {
2245 struct iris_vs_prog_key key = { KEY_ID(vue.base) };
2246
2247 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2248 iris_compile_vs(ice, ish, &key);
2249 }
2250
2251 return ish;
2252 }
2253
2254 static void *
2255 iris_create_tcs_state(struct pipe_context *ctx,
2256 const struct pipe_shader_state *state)
2257 {
2258 struct iris_context *ice = (void *) ctx;
2259 struct iris_screen *screen = (void *) ctx->screen;
2260 const struct brw_compiler *compiler = screen->compiler;
2261 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2262 struct shader_info *info = &ish->nir->info;
2263
2264 if (screen->precompile) {
2265 const unsigned _GL_TRIANGLES = 0x0004;
2266 struct iris_tcs_prog_key key = {
2267 KEY_ID(vue.base),
2268 // XXX: make sure the linker fills this out from the TES...
2269 .tes_primitive_mode =
2270 info->tess.primitive_mode ? info->tess.primitive_mode
2271 : _GL_TRIANGLES,
2272 .outputs_written = info->outputs_written,
2273 .patch_outputs_written = info->patch_outputs_written,
2274 };
2275
2276 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2277 * We don't have that information, so we randomly guess that the input
2278 * and output patches are the same size. This is a bad guess, but we
2279 * can't do much better.
2280 */
2281 if (compiler->use_tcs_8_patch)
2282 key.input_vertices = info->tess.tcs_vertices_out;
2283
2284 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2285 iris_compile_tcs(ice, ish, &key);
2286 }
2287
2288 return ish;
2289 }
2290
2291 static void *
2292 iris_create_tes_state(struct pipe_context *ctx,
2293 const struct pipe_shader_state *state)
2294 {
2295 struct iris_context *ice = (void *) ctx;
2296 struct iris_screen *screen = (void *) ctx->screen;
2297 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2298 struct shader_info *info = &ish->nir->info;
2299
2300 /* User clip planes */
2301 if (ish->nir->info.clip_distance_array_size == 0)
2302 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2303
2304 if (screen->precompile) {
2305 struct iris_tes_prog_key key = {
2306 KEY_ID(vue.base),
2307 // XXX: not ideal, need TCS output/TES input unification
2308 .inputs_read = info->inputs_read,
2309 .patch_inputs_read = info->patch_inputs_read,
2310 };
2311
2312 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2313 iris_compile_tes(ice, ish, &key);
2314 }
2315
2316 return ish;
2317 }
2318
2319 static void *
2320 iris_create_gs_state(struct pipe_context *ctx,
2321 const struct pipe_shader_state *state)
2322 {
2323 struct iris_context *ice = (void *) ctx;
2324 struct iris_screen *screen = (void *) ctx->screen;
2325 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2326
2327 /* User clip planes */
2328 if (ish->nir->info.clip_distance_array_size == 0)
2329 ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
2330
2331 if (screen->precompile) {
2332 struct iris_gs_prog_key key = { KEY_ID(vue.base) };
2333
2334 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2335 iris_compile_gs(ice, ish, &key);
2336 }
2337
2338 return ish;
2339 }
2340
2341 static void *
2342 iris_create_fs_state(struct pipe_context *ctx,
2343 const struct pipe_shader_state *state)
2344 {
2345 struct iris_context *ice = (void *) ctx;
2346 struct iris_screen *screen = (void *) ctx->screen;
2347 struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
2348 struct shader_info *info = &ish->nir->info;
2349
2350 ish->nos |= (1ull << IRIS_NOS_FRAMEBUFFER) |
2351 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA) |
2352 (1ull << IRIS_NOS_RASTERIZER) |
2353 (1ull << IRIS_NOS_BLEND);
2354
2355 /* The program key needs the VUE map if there are > 16 inputs */
2356 if (util_bitcount64(ish->nir->info.inputs_read &
2357 BRW_FS_VARYING_INPUT_MASK) > 16) {
2358 ish->nos |= (1ull << IRIS_NOS_LAST_VUE_MAP);
2359 }
2360
2361 if (screen->precompile) {
2362 const uint64_t color_outputs = info->outputs_written &
2363 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2364 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2365 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2366
2367 bool can_rearrange_varyings =
2368 util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
2369
2370 const struct gen_device_info *devinfo = &screen->devinfo;
2371 struct iris_fs_prog_key key = {
2372 KEY_ID(base),
2373 .nr_color_regions = util_bitcount(color_outputs),
2374 .coherent_fb_fetch = devinfo->gen >= 9,
2375 .input_slots_valid =
2376 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2377 };
2378
2379 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2380 iris_compile_fs(ice, ish, &key, NULL);
2381 }
2382
2383 return ish;
2384 }
2385
2386 static void *
2387 iris_create_compute_state(struct pipe_context *ctx,
2388 const struct pipe_compute_state *state)
2389 {
2390 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2391
2392 struct iris_context *ice = (void *) ctx;
2393 struct iris_screen *screen = (void *) ctx->screen;
2394 struct iris_uncompiled_shader *ish =
2395 iris_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2396
2397 // XXX: disallow more than 64KB of shared variables
2398
2399 if (screen->precompile) {
2400 struct iris_cs_prog_key key = { KEY_ID(base) };
2401
2402 if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2403 iris_compile_cs(ice, ish, &key);
2404 }
2405
2406 return ish;
2407 }
2408
2409 /**
2410 * The pipe->delete_[stage]_state() driver hooks.
2411 *
2412 * Frees the iris_uncompiled_shader.
2413 */
2414 static void
2415 iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2416 {
2417 struct iris_uncompiled_shader *ish = state;
2418 struct iris_context *ice = (void *) ctx;
2419
2420 if (ice->shaders.uncompiled[stage] == ish) {
2421 ice->shaders.uncompiled[stage] = NULL;
2422 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2423 }
2424
2425 if (ish->const_data) {
2426 pipe_resource_reference(&ish->const_data, NULL);
2427 pipe_resource_reference(&ish->const_data_state.res, NULL);
2428 }
2429
2430 ralloc_free(ish->nir);
2431 free(ish);
2432 }
2433
2434 static void
2435 iris_delete_vs_state(struct pipe_context *ctx, void *state)
2436 {
2437 iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2438 }
2439
2440 static void
2441 iris_delete_tcs_state(struct pipe_context *ctx, void *state)
2442 {
2443 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2444 }
2445
2446 static void
2447 iris_delete_tes_state(struct pipe_context *ctx, void *state)
2448 {
2449 iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2450 }
2451
2452 static void
2453 iris_delete_gs_state(struct pipe_context *ctx, void *state)
2454 {
2455 iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2456 }
2457
2458 static void
2459 iris_delete_fs_state(struct pipe_context *ctx, void *state)
2460 {
2461 iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2462 }
2463
2464 static void
2465 iris_delete_cs_state(struct pipe_context *ctx, void *state)
2466 {
2467 iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2468 }
2469
2470 /**
2471 * The pipe->bind_[stage]_state() driver hook.
2472 *
2473 * Binds an uncompiled shader as the current one for a particular stage.
2474 * Updates dirty tracking to account for the shader's NOS.
2475 */
2476 static void
2477 bind_shader_state(struct iris_context *ice,
2478 struct iris_uncompiled_shader *ish,
2479 gl_shader_stage stage)
2480 {
2481 uint64_t stage_dirty_bit = IRIS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2482 const uint64_t nos = ish ? ish->nos : 0;
2483
2484 const struct shader_info *old_info = iris_get_shader_info(ice, stage);
2485 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2486
2487 if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
2488 (new_info ? util_last_bit(new_info->textures_used) : 0)) {
2489 ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2490 }
2491
2492 ice->shaders.uncompiled[stage] = ish;
2493 ice->state.stage_dirty |= stage_dirty_bit;
2494
2495 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2496 * (or that they no longer need to do so).
2497 */
2498 for (int i = 0; i < IRIS_NOS_COUNT; i++) {
2499 if (nos & (1 << i))
2500 ice->state.stage_dirty_for_nos[i] |= stage_dirty_bit;
2501 else
2502 ice->state.stage_dirty_for_nos[i] &= ~stage_dirty_bit;
2503 }
2504 }
2505
2506 static void
2507 iris_bind_vs_state(struct pipe_context *ctx, void *state)
2508 {
2509 struct iris_context *ice = (struct iris_context *)ctx;
2510 struct iris_uncompiled_shader *new_ish = state;
2511
2512 if (new_ish &&
2513 ice->state.window_space_position !=
2514 new_ish->nir->info.vs.window_space_position) {
2515 ice->state.window_space_position =
2516 new_ish->nir->info.vs.window_space_position;
2517
2518 ice->state.dirty |= IRIS_DIRTY_CLIP |
2519 IRIS_DIRTY_RASTER |
2520 IRIS_DIRTY_CC_VIEWPORT;
2521 }
2522
2523 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
2524 }
2525
2526 static void
2527 iris_bind_tcs_state(struct pipe_context *ctx, void *state)
2528 {
2529 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
2530 }
2531
2532 static void
2533 iris_bind_tes_state(struct pipe_context *ctx, void *state)
2534 {
2535 struct iris_context *ice = (struct iris_context *)ctx;
2536
2537 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2538 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
2539 ice->state.dirty |= IRIS_DIRTY_URB;
2540
2541 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
2542 }
2543
2544 static void
2545 iris_bind_gs_state(struct pipe_context *ctx, void *state)
2546 {
2547 struct iris_context *ice = (struct iris_context *)ctx;
2548
2549 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2550 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
2551 ice->state.dirty |= IRIS_DIRTY_URB;
2552
2553 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
2554 }
2555
2556 static void
2557 iris_bind_fs_state(struct pipe_context *ctx, void *state)
2558 {
2559 struct iris_context *ice = (struct iris_context *) ctx;
2560 struct iris_screen *screen = (struct iris_screen *) ctx->screen;
2561 const struct gen_device_info *devinfo = &screen->devinfo;
2562 struct iris_uncompiled_shader *old_ish =
2563 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
2564 struct iris_uncompiled_shader *new_ish = state;
2565
2566 const unsigned color_bits =
2567 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
2568 BITFIELD64_RANGE(FRAG_RESULT_DATA0, BRW_MAX_DRAW_BUFFERS);
2569
2570 /* Fragment shader outputs influence HasWriteableRT */
2571 if (!old_ish || !new_ish ||
2572 (old_ish->nir->info.outputs_written & color_bits) !=
2573 (new_ish->nir->info.outputs_written & color_bits))
2574 ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
2575
2576 if (devinfo->gen == 8)
2577 ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
2578
2579 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
2580 }
2581
2582 static void
2583 iris_bind_cs_state(struct pipe_context *ctx, void *state)
2584 {
2585 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
2586 }
2587
2588 void
2589 iris_init_program_functions(struct pipe_context *ctx)
2590 {
2591 ctx->create_vs_state = iris_create_vs_state;
2592 ctx->create_tcs_state = iris_create_tcs_state;
2593 ctx->create_tes_state = iris_create_tes_state;
2594 ctx->create_gs_state = iris_create_gs_state;
2595 ctx->create_fs_state = iris_create_fs_state;
2596 ctx->create_compute_state = iris_create_compute_state;
2597
2598 ctx->delete_vs_state = iris_delete_vs_state;
2599 ctx->delete_tcs_state = iris_delete_tcs_state;
2600 ctx->delete_tes_state = iris_delete_tes_state;
2601 ctx->delete_gs_state = iris_delete_gs_state;
2602 ctx->delete_fs_state = iris_delete_fs_state;
2603 ctx->delete_compute_state = iris_delete_cs_state;
2604
2605 ctx->bind_vs_state = iris_bind_vs_state;
2606 ctx->bind_tcs_state = iris_bind_tcs_state;
2607 ctx->bind_tes_state = iris_bind_tes_state;
2608 ctx->bind_gs_state = iris_bind_gs_state;
2609 ctx->bind_fs_state = iris_bind_fs_state;
2610 ctx->bind_compute_state = iris_bind_cs_state;
2611 }